aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
committerGrant Likely <grant.likely@secretlab.ca>2010-05-22 02:36:56 -0400
commitcf9b59e9d3e008591d1f54830f570982bb307a0d (patch)
tree113478ce8fd8c832ba726ffdf59b82cb46356476 /drivers/net
parent44504b2bebf8b5823c59484e73096a7d6574471d (diff)
parentf4b87dee923342505e1ddba8d34ce9de33e75050 (diff)
Merge remote branch 'origin' into secretlab/next-devicetree
Merging in current state of Linus' tree to deal with merge conflicts and build failures in vio.c after merge. Conflicts: drivers/i2c/busses/i2c-cpm.c drivers/i2c/busses/i2c-mpc.c drivers/net/gianfar.c Also fixed up one line in arch/powerpc/kernel/vio.c to use the correct node pointer. Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c503.c44
-rw-r--r--drivers/net/3c505.c14
-rw-r--r--drivers/net/3c507.c5
-rw-r--r--drivers/net/3c509.c4
-rw-r--r--drivers/net/3c515.c6
-rw-r--r--drivers/net/3c523.c11
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c11
-rw-r--r--drivers/net/7990.c12
-rw-r--r--drivers/net/8139cp.c9
-rw-r--r--drivers/net/8139too.c10
-rw-r--r--drivers/net/82596.c9
-rw-r--r--drivers/net/Kconfig46
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/a2065.c11
-rw-r--r--drivers/net/ac3200.c2
-rw-r--r--drivers/net/acenic.c44
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c8
-rw-r--r--drivers/net/apne.c1
-rw-r--r--drivers/net/appletalk/cops.c9
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/arcnet.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c4
-rw-r--r--drivers/net/ariadne.c3
-rw-r--r--drivers/net/arm/am79c961a.c7
-rw-r--r--drivers/net/arm/at91_ether.c7
-rw-r--r--drivers/net/arm/ep93xx_eth.c12
-rw-r--r--drivers/net/arm/ether1.c1
-rw-r--r--drivers/net/arm/ether3.c1
-rw-r--r--drivers/net/arm/ixp4xx_eth.c9
-rw-r--r--drivers/net/arm/ks8695net.c13
-rw-r--r--drivers/net/arm/w90p910_ether.c7
-rw-r--r--drivers/net/at1700.c11
-rw-r--r--drivers/net/atarilance.c5
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c9
-rw-r--r--drivers/net/atl1e/atl1e_ethtool.c2
-rw-r--r--drivers/net/atl1e/atl1e_main.c16
-rw-r--r--drivers/net/atlx/atl1.c7
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c10
-rw-r--r--drivers/net/au1000_eth.c262
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/ax88796.c1
-rw-r--r--drivers/net/b44.c8
-rw-r--r--drivers/net/bcm63xx_enet.c14
-rw-r--r--drivers/net/benet/be.h11
-rw-r--r--drivers/net/benet/be_cmds.c14
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c5
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c315
-rw-r--r--drivers/net/bfin_mac.c559
-rw-r--r--drivers/net/bfin_mac.h18
-rw-r--r--drivers/net/bmac.c15
-rw-r--r--drivers/net/bnx2.c146
-rw-r--r--drivers/net/bnx2.h9
-rw-r--r--drivers/net/bnx2x.h66
-rw-r--r--drivers/net/bnx2x_hsi.h2
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1878
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c275
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c449
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c3
-rw-r--r--drivers/net/can/mcp251x.c16
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c1
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c23
-rw-r--r--drivers/net/can/sja1000/sja1000.h1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c48
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/can/usb/ems_usb.c30
-rw-r--r--drivers/net/cassini.c15
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cnic.c80
-rw-r--r--drivers/net/cnic.h10
-rw-r--r--drivers/net/cpmac.c17
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c3
-rw-r--r--drivers/net/cxgb3/ael1002.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4.h9
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c102
-rw-r--r--drivers/net/cxgb4/sge.c11
-rw-r--r--drivers/net/cxgb4/t4_hw.c117
-rw-r--r--drivers/net/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/davinci_emac.c8
-rw-r--r--drivers/net/de600.c4
-rw-r--r--drivers/net/de620.c1
-rw-r--r--drivers/net/declance.c10
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c15
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/dm9000.c50
-rw-r--r--drivers/net/dnet.c4
-rw-r--r--drivers/net/e100.c200
-rw-r--r--drivers/net/e1000/e1000.h37
-rw-r--r--drivers/net/e1000/e1000_ethtool.c89
-rw-r--r--drivers/net/e1000/e1000_hw.c356
-rw-r--r--drivers/net/e1000/e1000_hw.h1
-rw-r--r--drivers/net/e1000/e1000_main.c429
-rw-r--r--drivers/net/e1000/e1000_osdep.h14
-rw-r--r--drivers/net/e1000/e1000_param.c112
-rw-r--r--drivers/net/e1000e/82571.c49
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h31
-rw-r--r--drivers/net/e1000e/es2lan.c11
-rw-r--r--drivers/net/e1000e/ethtool.c48
-rw-r--r--drivers/net/e1000e/hw.h5
-rw-r--r--drivers/net/e1000e/ich8lan.c391
-rw-r--r--drivers/net/e1000e/lib.c60
-rw-r--r--drivers/net/e1000e/netdev.c937
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/e1000e/phy.c21
-rw-r--r--drivers/net/e2100.c1
-rw-r--r--drivers/net/eepro.c13
-rw-r--r--drivers/net/eexpress.c11
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c78
-rw-r--r--drivers/net/ehea/ehea_qmr.c43
-rw-r--r--drivers/net/ehea/ehea_qmr.h14
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/Makefile2
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h12
-rw-r--r--drivers/net/enic/enic_main.c341
-rw-r--r--drivers/net/enic/enic_res.c5
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_dev.c110
-rw-r--r--drivers/net/enic/vnic_dev.h10
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_vic.c73
-rw-r--r--drivers/net/enic/vnic_vic.h59
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c13
-rw-r--r--drivers/net/eql.c2
-rw-r--r--drivers/net/es3210.c2
-rw-r--r--drivers/net/eth16i.c5
-rw-r--r--drivers/net/ethoc.c8
-rw-r--r--drivers/net/ewrk3.c14
-rw-r--r--drivers/net/fealnx.c9
-rw-r--r--drivers/net/fec.c1142
-rw-r--r--drivers/net/fec_mpc52xx.c8
-rw-r--r--drivers/net/forcedeth.c251
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c6
-rw-r--r--drivers/net/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/fsl_pq_mdio.c26
-rw-r--r--drivers/net/gianfar.c227
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamachi.c11
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hamradio/scc.c1
-rw-r--r--drivers/net/hp-plus.c4
-rw-r--r--drivers/net/hp.c3
-rw-r--r--drivers/net/hp100.c16
-rw-r--r--drivers/net/hydra.c1
-rw-r--r--drivers/net/ibm_newemac/core.c12
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c24
-rw-r--r--drivers/net/ifb.c1
-rw-r--r--drivers/net/igb/e1000_82575.c35
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h5
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/e1000_mac.c27
-rw-r--r--drivers/net/igb/igb.h9
-rw-r--r--drivers/net/igb/igb_ethtool.c58
-rw-r--r--drivers/net/igb/igb_main.c613
-rw-r--r--drivers/net/igbvf/ethtool.c2
-rw-r--r--drivers/net/igbvf/netdev.c88
-rw-r--r--drivers/net/ioc3-eth.c7
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/au1k_ir.c1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/mcs7780.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c1
-rw-r--r--drivers/net/irda/sa1100_ir.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/sir_dev.c1
-rw-r--r--drivers/net/irda/smsc-ircc2.c3
-rw-r--r--drivers/net/irda/via-ircc.h2
-rw-r--r--drivers/net/irda/vlsi_ir.c5
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb.h8
-rw-r--r--drivers/net/ixgb/ixgb_ee.c24
-rw-r--r--drivers/net/ixgb/ixgb_hw.c164
-rw-r--r--drivers/net/ixgb/ixgb_hw.h12
-rw-r--r--drivers/net/ixgb/ixgb_main.c159
-rw-r--r--drivers/net/ixgb/ixgb_osdep.h16
-rw-r--r--drivers/net/ixgb/ixgb_param.c31
-rw-r--r--drivers/net/ixgbe/ixgbe.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c8
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c480
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c539
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h22
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c146
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c678
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c137
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h8
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h52
-rw-r--r--drivers/net/ixgbevf/defines.h12
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c125
-rw-r--r--drivers/net/ixgbevf/vf.c27
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/ixp2000/ixpdev.c2
-rw-r--r--drivers/net/jme.c12
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c61
-rw-r--r--drivers/net/ks8851.c460
-rw-r--r--drivers/net/ks8851.h14
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c86
-rw-r--r--drivers/net/lance.c4
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/lib8390.c22
-rw-r--r--drivers/net/ll_temac.h14
-rw-r--r--drivers/net/ll_temac_main.c159
-rw-r--r--drivers/net/lne390.c2
-rw-r--r--drivers/net/lp486e.c8
-rw-r--r--drivers/net/mac8390.c2
-rw-r--r--drivers/net/mac89x0.c1
-rw-r--r--drivers/net/macb.c9
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c7
-rw-r--r--drivers/net/macvlan.c23
-rw-r--r--drivers/net/macvtap.c46
-rw-r--r--drivers/net/meth.c4
-rw-r--r--drivers/net/mlx4/en_ethtool.c2
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/mlx4.h1
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c10
-rw-r--r--drivers/net/myri10ge/myri10ge.c54
-rw-r--r--drivers/net/myri_sbus.c2
-rw-r--r--drivers/net/natsemi.c10
-rw-r--r--drivers/net/ne-h8300.c1
-rw-r--r--drivers/net/ne.c1
-rw-r--r--drivers/net/ne2.c1
-rw-r--r--drivers/net/ne2k-pci.c1
-rw-r--r--drivers/net/ne3210.c2
-rw-r--r--drivers/net/netconsole.c15
-rw-r--r--drivers/net/netx-eth.c1
-rw-r--r--drivers/net/netxen/netxen_nic.h6
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c9
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h8
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c131
-rw-r--r--drivers/net/netxen/netxen_nic_init.c169
-rw-r--r--drivers/net/netxen/netxen_nic_main.c85
-rw-r--r--drivers/net/ni5010.c5
-rw-r--r--drivers/net/ni52.c13
-rw-r--r--drivers/net/ni65.c5
-rw-r--r--drivers/net/niu.c57
-rw-r--r--drivers/net/niu.h7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c66
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c7
-rw-r--r--drivers/net/pcmcia/3c574_cs.c28
-rw-r--r--drivers/net/pcmcia/3c589_cs.c303
-rw-r--r--drivers/net/pcmcia/axnet_cs.c31
-rw-r--r--drivers/net/pcmcia/com20020_cs.c29
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c27
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c18
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c26
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c16
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c62
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c45
-rw-r--r--drivers/net/pcnet32.c15
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio-octeon.c10
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/micrel.c114
-rw-r--r--drivers/net/phy/national.c10
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/plip.c4
-rw-r--r--drivers/net/ppp_generic.c53
-rw-r--r--drivers/net/pppoe.c11
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c13
-rw-r--r--drivers/net/ps3_gelic_wireless.c76
-rw-r--r--drivers/net/qla3xxx.c72
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h40
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c3
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c34
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h60
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c136
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c101
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c447
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_ethtool.c2
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c46
-rw-r--r--drivers/net/r8169.c212
-rw-r--r--drivers/net/rrunner.c1
-rw-r--r--drivers/net/s2io.c15
-rw-r--r--drivers/net/s6gmac.c3
-rw-r--r--drivers/net/sb1000.c5
-rw-r--r--drivers/net/sb1250-mac.c342
-rw-r--r--drivers/net/sc92031.c8
-rw-r--r--drivers/net/seeq8005.c4
-rw-r--r--drivers/net/sfc/efx.c141
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/ethtool.c6
-rw-r--r--drivers/net/sfc/falcon.c20
-rw-r--r--drivers/net/sfc/falcon_boards.c13
-rw-r--r--drivers/net/sfc/falcon_xmac.c22
-rw-r--r--drivers/net/sfc/mcdi.c32
-rw-r--r--drivers/net/sfc/mcdi_mac.c25
-rw-r--r--drivers/net/sfc/mcdi_pcol.h71
-rw-r--r--drivers/net/sfc/mcdi_phy.c152
-rw-r--r--drivers/net/sfc/net_driver.h76
-rw-r--r--drivers/net/sfc/nic.c114
-rw-r--r--drivers/net/sfc/nic.h5
-rw-r--r--drivers/net/sfc/selftest.c8
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c32
-rw-r--r--drivers/net/sfc/tx.c61
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c6
-rw-r--r--drivers/net/sh_eth.c2
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c25
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c15
-rw-r--r--drivers/net/skfp/smt.c2
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c46
-rw-r--r--drivers/net/skge.h4
-rw-r--r--drivers/net/sky2.c215
-rw-r--r--drivers/net/sky2.h41
-rw-r--r--drivers/net/slhc.c1
-rw-r--r--drivers/net/slip.c4
-rw-r--r--drivers/net/smc-mca.c1
-rw-r--r--drivers/net/smc-ultra.c1
-rw-r--r--drivers/net/smc-ultra32.c1
-rw-r--r--drivers/net/smc911x.c21
-rw-r--r--drivers/net/smc9194.c61
-rw-r--r--drivers/net/smc91x.c12
-rw-r--r--drivers/net/smsc911x.c11
-rw-r--r--drivers/net/smsc9420.c8
-rw-r--r--drivers/net/sonic.c10
-rw-r--r--drivers/net/spider_net.c8
-rw-r--r--drivers/net/starfire.c16
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c41
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c338
-rw-r--r--drivers/net/stmmac/dwmac100_core.c196
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c134
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c19
-rw-r--r--drivers/net/stmmac/enh_desc.c337
-rw-r--r--drivers/net/stmmac/norm_desc.c236
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/stmmac/stmmac_main.c32
-rw-r--r--drivers/net/stmmac/stmmac_timer.c6
-rw-r--r--drivers/net/stnic.c1
-rw-r--r--drivers/net/sun3_82586.c13
-rw-r--r--drivers/net/sun3lance.c8
-rw-r--r--drivers/net/sunbmac.c12
-rw-r--r--drivers/net/sundance.c14
-rw-r--r--drivers/net/sungem.c9
-rw-r--r--drivers/net/sunhme.c22
-rw-r--r--drivers/net/sunlance.c11
-rw-r--r--drivers/net/sunqe.c7
-rw-r--r--drivers/net/sunvnet.c9
-rw-r--r--drivers/net/tc35815.c8
-rw-r--r--drivers/net/tehuti.c10
-rw-r--r--drivers/net/tg3.c840
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c13
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c13
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/madgemc.c12
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/smctr.c4
-rw-r--r--drivers/net/tokenring/tms380tr.c65
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c13
-rw-r--r--drivers/net/tulip/de4x5.c87
-rw-r--r--drivers/net/tulip/dmfe.c17
-rw-r--r--drivers/net/tulip/media.c2
-rw-r--r--drivers/net/tulip/pnic.c2
-rw-r--r--drivers/net/tulip/tulip_core.c31
-rw-r--r--drivers/net/tulip/uli526x.c10
-rw-r--r--drivers/net/tulip/winbond-840.c18
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c55
-rw-r--r--drivers/net/typhoon.c8
-rw-r--r--drivers/net/ucc_geth.c12
-rw-r--r--drivers/net/usb/Kconfig21
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/asix.c53
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/cdc_ether.c114
-rw-r--r--drivers/net/usb/dm9601.c11
-rw-r--r--drivers/net/usb/hso.c4
-rw-r--r--drivers/net/usb/ipheth.c565
-rw-r--r--drivers/net/usb/kaweth.c14
-rw-r--r--drivers/net/usb/mcs7830.c10
-rw-r--r--drivers/net/usb/pegasus.c9
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/sierra_net.c1004
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/via-rhine.c10
-rw-r--r--drivers/net/via-velocity.c121
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c95
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c11
-rw-r--r--drivers/net/vxge/vxge-config.c41
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c245
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c79
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cycx_x25.c13
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/hd64570.c1
-rw-r--r--drivers/net/wan/hd64572.c1
-rw-r--r--drivers/net/wan/hdlc_x25.c12
-rw-r--r--drivers/net/wan/ixp4xx_hss.c1
-rw-r--r--drivers/net/wan/lapbether.c12
-rw-r--r--drivers/net/wan/lmc/lmc_main.c6
-rw-r--r--drivers/net/wan/pc300_drv.c5
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wan/wanxl.c1
-rw-r--r--drivers/net/wan/x25_asy.c12
-rw-r--r--drivers/net/wd.c1
-rw-r--r--drivers/net/wimax/i2400m/control.c27
-rw-r--r--drivers/net/wimax/i2400m/driver.c167
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h5
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h82
-rw-r--r--drivers/net/wimax/i2400m/netdev.c14
-rw-r--r--drivers/net/wimax/i2400m/rx.c116
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c2
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c35
-rw-r--r--drivers/net/wimax/i2400m/sdio.c7
-rw-r--r--drivers/net/wimax/i2400m/tx.c155
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c14
-rw-r--r--drivers/net/wireless/Kconfig92
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c37
-rw-r--r--drivers/net/wireless/airo_cs.c72
-rw-r--r--drivers/net/wireless/at76c50x-usb.c2
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h52
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c587
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c36
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h1
-rw-r--r--drivers/net/wireless/ath/ath.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c329
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c379
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c77
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c41
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile26
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c217
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_initvals.h742
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c1374
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9001_initvals.h1254
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c1000
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c598
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_initvals.h (renamed from drivers/net/wireless/ath/ath9k/initvals.h)2292
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c535
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h572
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c802
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c1838
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h323
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c205
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_initvals.h1784
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c614
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h120
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c1134
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h847
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h28
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c1024
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c392
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h17
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c297
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h25
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c1008
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h464
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c255
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c834
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1775
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c707
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c480
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h245
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h280
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1913
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h275
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c88
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c571
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h93
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c158
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c978
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h596
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c533
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h183
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c336
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h139
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c591
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c4
-rw-r--r--drivers/net/wireless/atmel.c1
-rw-r--r--drivers/net/wireless/atmel_cs.c70
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c26
-rw-r--r--drivers/net/wireless/b43/pcmcia.c5
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/b43/xmit.c1
-rw-r--r--drivers/net/wireless/b43legacy/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_80211_rx.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c38
-rw-r--r--drivers/net/wireless/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c60
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c190
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_rx.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c500
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c93
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c361
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1493
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c335
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c850
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h56
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c276
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c307
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1530
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c208
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1340
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c425
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1022
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h181
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h120
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c1022
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h138
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c912
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h289
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h57
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c826
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c550
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c901
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c409
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/bus.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debug.h7
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c123
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c79
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c19
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c12
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/cfg.c1
-rw-r--r--drivers/net/wireless/libertas/debugfs.c5
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_cs.c21
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c127
-rw-r--r--drivers/net/wireless/libertas/if_usb.c4
-rw-r--r--drivers/net/wireless/libertas/main.c15
-rw-r--r--drivers/net/wireless/libertas/rx.c51
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/cmd.c203
-rw-r--r--drivers/net/wireless/libertas_tf/deb_defs.h104
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c252
-rw-r--r--drivers/net/wireless/libertas_tf/libertas_tf.h2
-rw-r--r--drivers/net/wireless/libertas_tf/main.c106
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mwl8k.c30
-rw-r--r--drivers/net/wireless/orinoco/Kconfig20
-rw-r--r--drivers/net/wireless/orinoco/Makefile4
-rw-r--r--drivers/net/wireless/orinoco/airport.c8
-rw-r--r--drivers/net/wireless/orinoco/cfg.c91
-rw-r--r--drivers/net/wireless/orinoco/fw.c10
-rw-r--r--drivers/net/wireless/orinoco/hermes.c286
-rw-r--r--drivers/net/wireless/orinoco/hermes.h62
-rw-r--r--drivers/net/wireless/orinoco/hermes_dld.c243
-rw-r--r--drivers/net/wireless/orinoco/hw.c102
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/main.c307
-rw-r--r--drivers/net/wireless/orinoco/main.h12
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h38
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c110
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c1795
-rw-r--r--drivers/net/wireless/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/orinoco/spectrum_cs.c32
-rw-r--r--drivers/net/wireless/orinoco/wext.c273
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c12
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/p54/txrx.c3
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c18
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c10
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c257
-rw-r--r--drivers/net/wireless/ray_cs.h1
-rw-r--r--drivers/net/wireless/rndis_wlan.c374
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig4
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c56
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c54
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c130
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h119
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c676
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c318
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c297
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h40
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h35
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00crypto.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c23
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dump.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00firmware.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c17
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c47
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c95
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c128
-rw-r--r--drivers/net/wireless/rtl818x/Kconfig88
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c115
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c73
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_reg.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c144
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h63
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c46
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h10
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c337
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h139
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1272
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c94
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501.h1
-rw-r--r--drivers/net/wireless/wl3501_cs.c80
-rw-r--r--drivers/net/wireless/zd1201.c12
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/xilinx_emaclite.c10
-rw-r--r--drivers/net/yellowfin.c13
-rw-r--r--drivers/net/znet.c2
-rw-r--r--drivers/net/zorro8390.c2
818 files changed, 62102 insertions, 33182 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 3ea42ff17657..1776ab61b05f 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -480,7 +480,6 @@ static netdev_tx_t el_start_xmit(struct sk_buff *skb, struct net_device *dev)
480 /* fire ... Trigger xmit. */ 480 /* fire ... Trigger xmit. */
481 outb(AX_XMIT, AX_CMD); 481 outb(AX_XMIT, AX_CMD);
482 lp->loading = 0; 482 lp->loading = 0;
483 dev->trans_start = jiffies;
484 if (el_debug > 2) 483 if (el_debug > 2)
485 pr_debug(" queued xmit.\n"); 484 pr_debug(" queued xmit.\n");
486 dev_kfree_skb(skb); 485 dev_kfree_skb(skb);
@@ -727,7 +726,6 @@ static void el_receive(struct net_device *dev)
727 dev->stats.rx_packets++; 726 dev->stats.rx_packets++;
728 dev->stats.rx_bytes += pkt_len; 727 dev->stats.rx_bytes += pkt_len;
729 } 728 }
730 return;
731} 729}
732 730
733/** 731/**
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323c1839..baac246561b9 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
380 return retval; 380 return retval;
381} 381}
382 382
383static irqreturn_t el2_probe_interrupt(int irq, void *seen)
384{
385 *(bool *)seen = true;
386 return IRQ_HANDLED;
387}
388
383static int 389static int
384el2_open(struct net_device *dev) 390el2_open(struct net_device *dev)
385{ 391{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
391 397
392 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ 398 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
393 do { 399 do {
394 retval = request_irq(*irqp, NULL, 0, "bogus", dev); 400 bool seen;
395 if (retval >= 0) { 401
402 retval = request_irq(*irqp, el2_probe_interrupt, 0,
403 dev->name, &seen);
404 if (retval == -EBUSY)
405 continue;
406 if (retval < 0)
407 goto err_disable;
408
396 /* Twinkle the interrupt, and check if it's seen. */ 409 /* Twinkle the interrupt, and check if it's seen. */
397 unsigned long cookie = probe_irq_on(); 410 seen = false;
411 smp_wmb();
398 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
399 outb_p(0x00, E33G_IDCFR); 413 outb_p(0x00, E33G_IDCFR);
400 if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */ 414 msleep(1);
401 ((retval = request_irq(dev->irq = *irqp, 415 free_irq(*irqp, el2_probe_interrupt);
402 eip_interrupt, 0, 416 if (!seen)
403 dev->name, dev)) == 0)) 417 continue;
404 break; 418
405 } else { 419 retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
406 if (retval != -EBUSY) 420 dev->name, dev);
407 return retval; 421 if (retval == -EBUSY)
408 } 422 continue;
423 if (retval < 0)
424 goto err_disable;
409 } while (*++irqp); 425 } while (*++irqp);
426
410 if (*irqp == 0) { 427 if (*irqp == 0) {
428 err_disable:
411 outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ 429 outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
412 return -EAGAIN; 430 return -EAGAIN;
413 } 431 }
@@ -555,7 +573,6 @@ el2_block_output(struct net_device *dev, int count,
555 } 573 }
556 blocked:; 574 blocked:;
557 outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); 575 outb_p(ei_status.interface_num==0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
558 return;
559} 576}
560 577
561/* Read the 4 byte, page aligned 8390 specific header. */ 578/* Read the 4 byte, page aligned 8390 specific header. */
@@ -671,7 +688,6 @@ el2_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring
671 } 688 }
672 blocked:; 689 blocked:;
673 outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL); 690 outb_p(ei_status.interface_num == 0 ? ECNTRL_THIN : ECNTRL_AUI, E33G_CNTRL);
674 return;
675} 691}
676 692
677 693
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d63bde..88d766ee0e1b 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1055,7 +1055,7 @@ static void elp_timeout(struct net_device *dev)
1055 (stat & ACRF) ? "interrupt" : "command"); 1055 (stat & ACRF) ? "interrupt" : "command");
1056 if (elp_debug >= 1) 1056 if (elp_debug >= 1)
1057 pr_debug("%s: status %#02x\n", dev->name, stat); 1057 pr_debug("%s: status %#02x\n", dev->name, stat);
1058 dev->trans_start = jiffies; 1058 dev->trans_start = jiffies; /* prevent tx timeout */
1059 dev->stats.tx_dropped++; 1059 dev->stats.tx_dropped++;
1060 netif_wake_queue(dev); 1060 netif_wake_queue(dev);
1061} 1061}
@@ -1093,11 +1093,6 @@ static netdev_tx_t elp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1093 if (elp_debug >= 3) 1093 if (elp_debug >= 3)
1094 pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len); 1094 pr_debug("%s: packet of length %d sent\n", dev->name, (int) skb->len);
1095 1095
1096 /*
1097 * start the transmit timeout
1098 */
1099 dev->trans_start = jiffies;
1100
1101 prime_rx(dev); 1096 prime_rx(dev);
1102 spin_unlock_irqrestore(&adapter->lock, flags); 1097 spin_unlock_irqrestore(&adapter->lock, flags);
1103 netif_start_queue(dev); 1098 netif_start_queue(dev);
@@ -1216,7 +1211,7 @@ static int elp_close(struct net_device *dev)
1216static void elp_set_mc_list(struct net_device *dev) 1211static void elp_set_mc_list(struct net_device *dev)
1217{ 1212{
1218 elp_device *adapter = netdev_priv(dev); 1213 elp_device *adapter = netdev_priv(dev);
1219 struct dev_mc_list *dmi; 1214 struct netdev_hw_addr *ha;
1220 int i; 1215 int i;
1221 unsigned long flags; 1216 unsigned long flags;
1222 1217
@@ -1231,8 +1226,9 @@ static void elp_set_mc_list(struct net_device *dev)
1231 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; 1226 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
1232 adapter->tx_pcb.length = 6 * netdev_mc_count(dev); 1227 adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
1233 i = 0; 1228 i = 0;
1234 netdev_for_each_mc_addr(dmi, dev) 1229 netdev_for_each_mc_addr(ha, dev)
1235 memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6); 1230 memcpy(adapter->tx_pcb.data.multicast[i++],
1231 ha->addr, 6);
1236 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; 1232 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
1237 if (!send_pcb(dev, &adapter->tx_pcb)) 1233 if (!send_pcb(dev, &adapter->tx_pcb))
1238 pr_err("%s: couldn't send set_multicast command\n", dev->name); 1234 pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index b32b7a1710b7..82eaf65d2d85 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -449,7 +449,6 @@ static int __init el16_probe1(struct net_device *dev, int ioaddr)
449 pr_debug("%s", version); 449 pr_debug("%s", version);
450 450
451 lp = netdev_priv(dev); 451 lp = netdev_priv(dev);
452 memset(lp, 0, sizeof(*lp));
453 spin_lock_init(&lp->lock); 452 spin_lock_init(&lp->lock);
454 lp->base = ioremap(dev->mem_start, RX_BUF_END); 453 lp->base = ioremap(dev->mem_start, RX_BUF_END);
455 if (!lp->base) { 454 if (!lp->base) {
@@ -505,7 +504,7 @@ static void el16_tx_timeout (struct net_device *dev)
505 outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */ 504 outb (0, ioaddr + SIGNAL_CA); /* Issue channel-attn. */
506 lp->last_restart = dev->stats.tx_packets; 505 lp->last_restart = dev->stats.tx_packets;
507 } 506 }
508 dev->trans_start = jiffies; 507 dev->trans_start = jiffies; /* prevent tx timeout */
509 netif_wake_queue (dev); 508 netif_wake_queue (dev);
510} 509}
511 510
@@ -529,7 +528,6 @@ static netdev_tx_t el16_send_packet (struct sk_buff *skb,
529 528
530 hardware_send_packet (dev, buf, skb->len, length - skb->len); 529 hardware_send_packet (dev, buf, skb->len, length - skb->len);
531 530
532 dev->trans_start = jiffies;
533 /* Enable the 82586 interrupt input. */ 531 /* Enable the 82586 interrupt input. */
534 outb (0x84, ioaddr + MISC_CTRL); 532 outb (0x84, ioaddr + MISC_CTRL);
535 533
@@ -766,7 +764,6 @@ static void init_82586_mem(struct net_device *dev)
766 if (net_debug > 4) 764 if (net_debug > 4)
767 pr_debug("%s: Initialized 82586, status %04x.\n", dev->name, 765 pr_debug("%s: Initialized 82586, status %04x.\n", dev->name,
768 readw(shmem+iSCB_STATUS)); 766 readw(shmem+iSCB_STATUS));
769 return;
770} 767}
771 768
772static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad) 769static void hardware_send_packet(struct net_device *dev, void *buf, short length, short pad)
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index ab9bb3c52002..91abb965fb44 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -807,7 +807,7 @@ el3_tx_timeout (struct net_device *dev)
807 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), 807 dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
808 inw(ioaddr + TX_FREE)); 808 inw(ioaddr + TX_FREE));
809 dev->stats.tx_errors++; 809 dev->stats.tx_errors++;
810 dev->trans_start = jiffies; 810 dev->trans_start = jiffies; /* prevent tx timeout */
811 /* Issue TX_RESET and TX_START commands. */ 811 /* Issue TX_RESET and TX_START commands. */
812 outw(TxReset, ioaddr + EL3_CMD); 812 outw(TxReset, ioaddr + EL3_CMD);
813 outw(TxEnable, ioaddr + EL3_CMD); 813 outw(TxEnable, ioaddr + EL3_CMD);
@@ -868,7 +868,6 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
868 /* ... and the packet rounded to a doubleword. */ 868 /* ... and the packet rounded to a doubleword. */
869 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 869 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
870 870
871 dev->trans_start = jiffies;
872 if (inw(ioaddr + TX_FREE) > 1536) 871 if (inw(ioaddr + TX_FREE) > 1536)
873 netif_start_queue(dev); 872 netif_start_queue(dev);
874 else 873 else
@@ -1038,7 +1037,6 @@ static void update_stats(struct net_device *dev)
1038 /* Back to window 1, and turn statistics back on. */ 1037 /* Back to window 1, and turn statistics back on. */
1039 EL3WINDOW(1); 1038 EL3WINDOW(1);
1040 outw(StatsEnable, ioaddr + EL3_CMD); 1039 outw(StatsEnable, ioaddr + EL3_CMD);
1041 return;
1042} 1040}
1043 1041
1044static int 1042static int
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 2e17837be546..3bba835f1a21 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -958,7 +958,6 @@ static void corkscrew_timer(unsigned long data)
958 dev->name, media_tbl[dev->if_port].name); 958 dev->name, media_tbl[dev->if_port].name);
959 959
960#endif /* AUTOMEDIA */ 960#endif /* AUTOMEDIA */
961 return;
962} 961}
963 962
964static void corkscrew_timeout(struct net_device *dev) 963static void corkscrew_timeout(struct net_device *dev)
@@ -992,7 +991,7 @@ static void corkscrew_timeout(struct net_device *dev)
992 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) 991 if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
993 break; 992 break;
994 outw(TxEnable, ioaddr + EL3_CMD); 993 outw(TxEnable, ioaddr + EL3_CMD);
995 dev->trans_start = jiffies; 994 dev->trans_start = jiffies; /* prevent tx timeout */
996 dev->stats.tx_errors++; 995 dev->stats.tx_errors++;
997 dev->stats.tx_dropped++; 996 dev->stats.tx_dropped++;
998 netif_wake_queue(dev); 997 netif_wake_queue(dev);
@@ -1055,7 +1054,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
1055 prev_entry->status &= ~0x80000000; 1054 prev_entry->status &= ~0x80000000;
1056 netif_wake_queue(dev); 1055 netif_wake_queue(dev);
1057 } 1056 }
1058 dev->trans_start = jiffies;
1059 return NETDEV_TX_OK; 1057 return NETDEV_TX_OK;
1060 } 1058 }
1061 /* Put out the doubleword header... */ 1059 /* Put out the doubleword header... */
@@ -1091,7 +1089,6 @@ static netdev_tx_t corkscrew_start_xmit(struct sk_buff *skb,
1091 outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD); 1089 outw(SetTxThreshold + (1536 >> 2), ioaddr + EL3_CMD);
1092#endif /* bus master */ 1090#endif /* bus master */
1093 1091
1094 dev->trans_start = jiffies;
1095 1092
1096 /* Clear the Tx status stack. */ 1093 /* Clear the Tx status stack. */
1097 { 1094 {
@@ -1518,7 +1515,6 @@ static void update_stats(int ioaddr, struct net_device *dev)
1518 1515
1519 /* We change back to window 7 (not 1) with the Vortex. */ 1516 /* We change back to window 7 (not 1) with the Vortex. */
1520 EL3WINDOW(7); 1517 EL3WINDOW(7);
1521 return;
1522} 1518}
1523 1519
1524/* This new version of set_rx_mode() supports v1.4 kernels. 1520/* This new version of set_rx_mode() supports v1.4 kernels.
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079cc498..a7b0e5e43a52 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -503,7 +503,6 @@ static int __init do_elmc_probe(struct net_device *dev)
503 break; 503 break;
504 } 504 }
505 505
506 memset(pr, 0, sizeof(struct priv));
507 pr->slot = slot; 506 pr->slot = slot;
508 507
509 pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision, 508 pr_info("%s: 3Com 3c523 Rev 0x%x at %#lx\n", dev->name, (int) revision,
@@ -624,7 +623,7 @@ static int init586(struct net_device *dev)
624 volatile struct iasetup_cmd_struct *ias_cmd; 623 volatile struct iasetup_cmd_struct *ias_cmd;
625 volatile struct tdr_cmd_struct *tdr_cmd; 624 volatile struct tdr_cmd_struct *tdr_cmd;
626 volatile struct mcsetup_cmd_struct *mc_cmd; 625 volatile struct mcsetup_cmd_struct *mc_cmd;
627 struct dev_mc_list *dmi; 626 struct netdev_hw_addr *ha;
628 int num_addrs = netdev_mc_count(dev); 627 int num_addrs = netdev_mc_count(dev);
629 628
630 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); 629 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +786,9 @@ static int init586(struct net_device *dev)
787 mc_cmd->cmd_link = 0xffff; 786 mc_cmd->cmd_link = 0xffff;
788 mc_cmd->mc_cnt = num_addrs * 6; 787 mc_cmd->mc_cnt = num_addrs * 6;
789 i = 0; 788 i = 0;
790 netdev_for_each_mc_addr(dmi, dev) 789 netdev_for_each_mc_addr(ha, dev)
791 memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6); 790 memcpy((char *) mc_cmd->mc_list[i++],
791 ha->addr, 6);
792 p->scb->cbl_offset = make16(mc_cmd); 792 p->scb->cbl_offset = make16(mc_cmd);
793 p->scb->cmd = CUC_START; 793 p->scb->cmd = CUC_START;
794 elmc_id_attn586(); 794 elmc_id_attn586();
@@ -1152,7 +1152,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1152 p->scb->cmd = CUC_START; 1152 p->scb->cmd = CUC_START;
1153 p->xmit_cmds[0]->cmd_status = 0; 1153 p->xmit_cmds[0]->cmd_status = 0;
1154 elmc_attn586(); 1154 elmc_attn586();
1155 dev->trans_start = jiffies;
1156 if (!i) { 1155 if (!i) {
1157 dev_kfree_skb(skb); 1156 dev_kfree_skb(skb);
1158 } 1157 }
@@ -1176,7 +1175,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1176 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; 1175 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
1177 1176
1178 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); 1177 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
1179 dev->trans_start = jiffies;
1180 p->nop_point = next_nop; 1178 p->nop_point = next_nop;
1181 dev_kfree_skb(skb); 1179 dev_kfree_skb(skb);
1182#endif 1180#endif
@@ -1190,7 +1188,6 @@ static netdev_tx_t elmc_send_packet(struct sk_buff *skb, struct net_device *dev)
1190 = make16((p->nop_cmds[next_nop])); 1188 = make16((p->nop_cmds[next_nop]));
1191 p->nop_cmds[next_nop]->cmd_status = 0; 1189 p->nop_cmds[next_nop]->cmd_status = 0;
1192 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); 1190 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
1193 dev->trans_start = jiffies;
1194 p->xmit_count = next_nop; 1191 p->xmit_count = next_nop;
1195 if (p->xmit_count != p->xmit_last) 1192 if (p->xmit_count != p->xmit_last)
1196 netif_wake_queue(dev); 1193 netif_wake_queue(dev);
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b147ec99..38395dfa4963 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1533 { 1533 {
1534 unsigned char block[62]; 1534 unsigned char block[62];
1535 unsigned char *bp; 1535 unsigned char *bp;
1536 struct dev_mc_list *dmc; 1536 struct netdev_hw_addr *ha;
1537 1537
1538 if(retry==0) 1538 if(retry==0)
1539 lp->mc_list_valid = 0; 1539 lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1543 block[0]=netdev_mc_count(dev); 1543 block[0]=netdev_mc_count(dev);
1544 bp=block+2; 1544 bp=block+2;
1545 1545
1546 netdev_for_each_mc_addr(dmc, dev) { 1546 netdev_for_each_mc_addr(ha, dev) {
1547 memcpy(bp, dmc->dmi_addr, 6); 1547 memcpy(bp, ha->addr, 6);
1548 bp+=6; 1548 bp+=6;
1549 } 1549 }
1550 if(mc32_command_nowait(dev, 2, block, 1550 if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdbe66e2..d75803e6e527 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1855,7 +1855,6 @@ leave_media_alone:
1855 mod_timer(&vp->timer, RUN_AT(next_tick)); 1855 mod_timer(&vp->timer, RUN_AT(next_tick));
1856 if (vp->deferred) 1856 if (vp->deferred)
1857 iowrite16(FakeIntr, ioaddr + EL3_CMD); 1857 iowrite16(FakeIntr, ioaddr + EL3_CMD);
1858 return;
1859} 1858}
1860 1859
1861static void vortex_tx_timeout(struct net_device *dev) 1860static void vortex_tx_timeout(struct net_device *dev)
@@ -1917,7 +1916,7 @@ static void vortex_tx_timeout(struct net_device *dev)
1917 1916
1918 /* Issue Tx Enable */ 1917 /* Issue Tx Enable */
1919 iowrite16(TxEnable, ioaddr + EL3_CMD); 1918 iowrite16(TxEnable, ioaddr + EL3_CMD);
1920 dev->trans_start = jiffies; 1919 dev->trans_start = jiffies; /* prevent tx timeout */
1921 1920
1922 /* Switch to register set 7 for normal use. */ 1921 /* Switch to register set 7 for normal use. */
1923 EL3WINDOW(7); 1922 EL3WINDOW(7);
@@ -2063,7 +2062,6 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2063 } 2062 }
2064 } 2063 }
2065 2064
2066 dev->trans_start = jiffies;
2067 2065
2068 /* Clear the Tx status stack. */ 2066 /* Clear the Tx status stack. */
2069 { 2067 {
@@ -2129,8 +2127,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2129 int i; 2127 int i;
2130 2128
2131 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2129 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2132 skb->len-skb->data_len, PCI_DMA_TODEVICE)); 2130 skb_headlen(skb), PCI_DMA_TODEVICE));
2133 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len); 2131 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2134 2132
2135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2133 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2136 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2134 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2174,7 +2172,6 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2174 } 2172 }
2175 iowrite16(DownUnstall, ioaddr + EL3_CMD); 2173 iowrite16(DownUnstall, ioaddr + EL3_CMD);
2176 spin_unlock_irqrestore(&vp->lock, flags); 2174 spin_unlock_irqrestore(&vp->lock, flags);
2177 dev->trans_start = jiffies;
2178 return NETDEV_TX_OK; 2175 return NETDEV_TX_OK;
2179} 2176}
2180 2177
@@ -2800,7 +2797,6 @@ static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2800 } 2797 }
2801 2798
2802 EL3WINDOW(old_window >> 13); 2799 EL3WINDOW(old_window >> 13);
2803 return;
2804} 2800}
2805 2801
2806static int vortex_nway_reset(struct net_device *dev) 2802static int vortex_nway_reset(struct net_device *dev)
@@ -3122,7 +3118,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
3122 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 3118 iowrite16(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
3123 mdio_delay(); 3119 mdio_delay();
3124 } 3120 }
3125 return;
3126} 3121}
3127 3122
3128/* ACPI: Advanced Configuration and Power Interface. */ 3123/* ACPI: Advanced Configuration and Power Interface. */
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135723bd..903bcb3ef5bd 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -262,7 +262,7 @@ static int lance_reset (struct net_device *dev)
262 262
263 load_csrs (lp); 263 load_csrs (lp);
264 lance_init_ring (dev); 264 lance_init_ring (dev);
265 dev->trans_start = jiffies; 265 dev->trans_start = jiffies; /* prevent tx timeout */
266 status = init_restart_lance (lp); 266 status = init_restart_lance (lp);
267#ifdef DEBUG_DRIVER 267#ifdef DEBUG_DRIVER
268 printk ("Lance restart=%d\n", status); 268 printk ("Lance restart=%d\n", status);
@@ -526,7 +526,7 @@ void lance_tx_timeout(struct net_device *dev)
526{ 526{
527 printk("lance_tx_timeout\n"); 527 printk("lance_tx_timeout\n");
528 lance_reset(dev); 528 lance_reset(dev);
529 dev->trans_start = jiffies; 529 dev->trans_start = jiffies; /* prevent tx timeout */
530 netif_wake_queue (dev); 530 netif_wake_queue (dev);
531} 531}
532EXPORT_SYMBOL_GPL(lance_tx_timeout); 532EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -574,7 +574,6 @@ int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
574 outs++; 574 outs++;
575 /* Kick the lance: transmit now */ 575 /* Kick the lance: transmit now */
576 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); 576 WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
577 dev->trans_start = jiffies;
578 dev_kfree_skb (skb); 577 dev_kfree_skb (skb);
579 578
580 spin_lock_irqsave (&lp->devlock, flags); 579 spin_lock_irqsave (&lp->devlock, flags);
@@ -594,7 +593,7 @@ static void lance_load_multicast (struct net_device *dev)
594 struct lance_private *lp = netdev_priv(dev); 593 struct lance_private *lp = netdev_priv(dev);
595 volatile struct lance_init_block *ib = lp->init_block; 594 volatile struct lance_init_block *ib = lp->init_block;
596 volatile u16 *mcast_table = (u16 *)&ib->filter; 595 volatile u16 *mcast_table = (u16 *)&ib->filter;
597 struct dev_mc_list *dmi; 596 struct netdev_hw_addr *ha;
598 char *addrs; 597 char *addrs;
599 u32 crc; 598 u32 crc;
600 599
@@ -609,8 +608,8 @@ static void lance_load_multicast (struct net_device *dev)
609 ib->filter [1] = 0; 608 ib->filter [1] = 0;
610 609
611 /* Add addresses */ 610 /* Add addresses */
612 netdev_for_each_mc_addr(dmi, dev) { 611 netdev_for_each_mc_addr(ha, dev) {
613 addrs = dmi->dmi_addr; 612 addrs = ha->addr;
614 613
615 /* multicast address? */ 614 /* multicast address? */
616 if (!(*addrs & 1)) 615 if (!(*addrs & 1))
@@ -620,7 +619,6 @@ static void lance_load_multicast (struct net_device *dev)
620 crc = crc >> 26; 619 crc = crc >> 26;
621 mcast_table [crc >> 4] |= 1 << (crc & 0xf); 620 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
622 } 621 }
623 return;
624} 622}
625 623
626 624
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce3eaa0..9c149750e2bf 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -882,7 +882,6 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
882 spin_unlock_irqrestore(&cp->lock, intr_flags); 882 spin_unlock_irqrestore(&cp->lock, intr_flags);
883 883
884 cpw8(TxPoll, NormalTxPoll); 884 cpw8(TxPoll, NormalTxPoll);
885 dev->trans_start = jiffies;
886 885
887 return NETDEV_TX_OK; 886 return NETDEV_TX_OK;
888} 887}
@@ -910,11 +909,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
910 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 909 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
911 mc_filter[1] = mc_filter[0] = 0xffffffff; 910 mc_filter[1] = mc_filter[0] = 0xffffffff;
912 } else { 911 } else {
913 struct dev_mc_list *mclist; 912 struct netdev_hw_addr *ha;
914 rx_mode = AcceptBroadcast | AcceptMyPhys; 913 rx_mode = AcceptBroadcast | AcceptMyPhys;
915 mc_filter[1] = mc_filter[0] = 0; 914 mc_filter[1] = mc_filter[0] = 0;
916 netdev_for_each_mc_addr(mclist, dev) { 915 netdev_for_each_mc_addr(ha, dev) {
917 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 916 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
918 917
919 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 918 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
920 rx_mode |= AcceptMulticast; 919 rx_mode |= AcceptMulticast;
@@ -1225,8 +1224,6 @@ static void cp_tx_timeout(struct net_device *dev)
1225 netif_wake_queue(dev); 1224 netif_wake_queue(dev);
1226 1225
1227 spin_unlock_irqrestore(&cp->lock, flags); 1226 spin_unlock_irqrestore(&cp->lock, flags);
1228
1229 return;
1230} 1227}
1231 1228
1232#ifdef BROKEN 1229#ifdef BROKEN
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index a03d291de854..4ba72933f0da 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1716,8 +1716,6 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
1716 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), 1716 RTL_W32_F (TxStatus0 + (entry * sizeof (u32)),
1717 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN)); 1717 tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
1718 1718
1719 dev->trans_start = jiffies;
1720
1721 tp->cur_tx++; 1719 tp->cur_tx++;
1722 1720
1723 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) 1721 if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
@@ -1944,7 +1942,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
1944 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n", 1942 netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
1945 __func__, rx_status, rx_size, cur_rx); 1943 __func__, rx_status, rx_size, cur_rx);
1946#if RTL8139_DEBUG > 2 1944#if RTL8139_DEBUG > 2
1947 print_dump_hex(KERN_DEBUG, "Frame contents: ", 1945 print_hex_dump(KERN_DEBUG, "Frame contents: ",
1948 DUMP_PREFIX_OFFSET, 16, 1, 1946 DUMP_PREFIX_OFFSET, 16, 1,
1949 &rx_ring[ring_offset], 70, true); 1947 &rx_ring[ring_offset], 70, true);
1950#endif 1948#endif
@@ -2503,11 +2501,11 @@ static void __set_rx_mode (struct net_device *dev)
2503 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 2501 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2504 mc_filter[1] = mc_filter[0] = 0xffffffff; 2502 mc_filter[1] = mc_filter[0] = 0xffffffff;
2505 } else { 2503 } else {
2506 struct dev_mc_list *mclist; 2504 struct netdev_hw_addr *ha;
2507 rx_mode = AcceptBroadcast | AcceptMyPhys; 2505 rx_mode = AcceptBroadcast | AcceptMyPhys;
2508 mc_filter[1] = mc_filter[0] = 0; 2506 mc_filter[1] = mc_filter[0] = 0;
2509 netdev_for_each_mc_addr(mclist, dev) { 2507 netdev_for_each_mc_addr(ha, dev) {
2510 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 2508 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2511 2509
2512 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2510 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2513 rx_mode |= AcceptMulticast; 2511 rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db48861..dd8dc15556cb 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1050,7 +1050,7 @@ static void i596_tx_timeout (struct net_device *dev)
1050 lp->last_restart = dev->stats.tx_packets; 1050 lp->last_restart = dev->stats.tx_packets;
1051 } 1051 }
1052 1052
1053 dev->trans_start = jiffies; 1053 dev->trans_start = jiffies; /* prevent tx timeout */
1054 netif_wake_queue (dev); 1054 netif_wake_queue (dev);
1055} 1055}
1056 1056
@@ -1060,7 +1060,6 @@ static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1060 struct tx_cmd *tx_cmd; 1060 struct tx_cmd *tx_cmd;
1061 struct i596_tbd *tbd; 1061 struct i596_tbd *tbd;
1062 short length = skb->len; 1062 short length = skb->len;
1063 dev->trans_start = jiffies;
1064 1063
1065 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", 1064 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n",
1066 dev->name, skb->len, skb->data)); 1065 dev->name, skb->len, skb->data));
@@ -1542,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
1542 } 1541 }
1543 1542
1544 if (!netdev_mc_empty(dev)) { 1543 if (!netdev_mc_empty(dev)) {
1545 struct dev_mc_list *dmi; 1544 struct netdev_hw_addr *ha;
1546 unsigned char *cp; 1545 unsigned char *cp;
1547 struct mc_cmd *cmd; 1546 struct mc_cmd *cmd;
1548 1547
@@ -1552,10 +1551,10 @@ static void set_multicast_list(struct net_device *dev)
1552 cmd->cmd.command = CmdMulticastList; 1551 cmd->cmd.command = CmdMulticastList;
1553 cmd->mc_cnt = cnt * ETH_ALEN; 1552 cmd->mc_cnt = cnt * ETH_ALEN;
1554 cp = cmd->mc_addrs; 1553 cp = cmd->mc_addrs;
1555 netdev_for_each_mc_addr(dmi, dev) { 1554 netdev_for_each_mc_addr(ha, dev) {
1556 if (!cnt--) 1555 if (!cnt--)
1557 break; 1556 break;
1558 memcpy(cp, dmi->dmi_addr, ETH_ALEN); 1557 memcpy(cp, ha->addr, ETH_ALEN);
1559 if (i596_debug > 1) 1558 if (i596_debug > 1)
1560 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", 1559 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1561 dev->name, cp)); 1560 dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c727f87..2decc597bda7 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
483 This is the driver for the onboard card of the Xtensa XT2000 board. 483 This is the driver for the onboard card of the Xtensa XT2000 board.
484 484
485config MIPS_AU1X00_ENET 485config MIPS_AU1X00_ENET
486 bool "MIPS AU1000 Ethernet support" 486 tristate "MIPS AU1000 Ethernet support"
487 depends on SOC_AU1X00 487 depends on SOC_AU1X00
488 select PHYLIB 488 select PHYLIB
489 select CRC32 489 select CRC32
@@ -887,6 +887,13 @@ config BFIN_MAC_RMII
887 help 887 help
888 Use Reduced PHY MII Interface 888 Use Reduced PHY MII Interface
889 889
890config BFIN_MAC_USE_HWSTAMP
891 bool "Use IEEE 1588 hwstamp"
892 depends on BFIN_MAC && BF518
893 default y
894 help
895 To support the IEEE 1588 Precision Time Protocol (PTP), select y here
896
890config SMC9194 897config SMC9194
891 tristate "SMC 9194 support" 898 tristate "SMC 9194 support"
892 depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN) 899 depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
@@ -1453,20 +1460,6 @@ config FORCEDETH
1453 To compile this driver as a module, choose M here. The module 1460 To compile this driver as a module, choose M here. The module
1454 will be called forcedeth. 1461 will be called forcedeth.
1455 1462
1456config FORCEDETH_NAPI
1457 bool "Use Rx Polling (NAPI) (EXPERIMENTAL)"
1458 depends on FORCEDETH && EXPERIMENTAL
1459 help
1460 NAPI is a new driver API designed to reduce CPU and interrupt load
1461 when the driver is receiving lots of packets from the card. It is
1462 still somewhat experimental and thus not yet enabled by default.
1463
1464 If your estimated Rx load is 10kpps or more, or if the card will be
1465 deployed on potentially unfriendly networks (e.g. in a firewall),
1466 then say Y here.
1467
1468 If in doubt, say N.
1469
1470config CS89x0 1463config CS89x0
1471 tristate "CS89x0 support" 1464 tristate "CS89x0 support"
1472 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \ 1465 depends on NET_ETHERNET && (ISA || EISA || MACH_IXDP2351 \
@@ -1916,6 +1909,7 @@ config FEC
1916 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1909 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1917 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1910 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1918 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1911 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
1912 select PHYLIB
1919 help 1913 help
1920 Say Y here if you want to use the built-in 10/100 Fast ethernet 1914 Say Y here if you want to use the built-in 10/100 Fast ethernet
1921 controller on some Motorola ColdFire and Freescale i.MX processors. 1915 controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2428,8 @@ config MV643XX_ETH
2434 2428
2435config XILINX_LL_TEMAC 2429config XILINX_LL_TEMAC
2436 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 2430 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
2431 depends on PPC || MICROBLAZE
2437 select PHYLIB 2432 select PHYLIB
2438 depends on PPC_DCR_NATIVE
2439 help 2433 help
2440 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC 2434 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
2441 core used in Xilinx Spartan and Virtex FPGAs 2435 core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2612,11 @@ config EHEA
2618 will be called ehea. 2612 will be called ehea.
2619 2613
2620config ENIC 2614config ENIC
2621 tristate "Cisco 10G Ethernet NIC support" 2615 tristate "Cisco VIC Ethernet NIC Support"
2622 depends on PCI && INET 2616 depends on PCI && INET
2623 select INET_LRO 2617 select INET_LRO
2624 help 2618 help
2625 This enables the support for the Cisco 10G Ethernet card. 2619 This enables the support for the Cisco VIC Ethernet card.
2626 2620
2627config IXGBE 2621config IXGBE
2628 tristate "Intel(R) 10GbE PCI Express adapters support" 2622 tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2856,8 @@ source "drivers/ieee802154/Kconfig"
2862 2856
2863source "drivers/s390/net/Kconfig" 2857source "drivers/s390/net/Kconfig"
2864 2858
2859source "drivers/net/caif/Kconfig"
2860
2865config XEN_NETDEV_FRONTEND 2861config XEN_NETDEV_FRONTEND
2866 tristate "Xen network device frontend driver" 2862 tristate "Xen network device frontend driver"
2867 depends on XEN 2863 depends on XEN
@@ -3180,17 +3176,12 @@ config PPPOATM
3180 3176
3181config PPPOL2TP 3177config PPPOL2TP
3182 tristate "PPP over L2TP (EXPERIMENTAL)" 3178 tristate "PPP over L2TP (EXPERIMENTAL)"
3183 depends on EXPERIMENTAL && PPP && INET 3179 depends on EXPERIMENTAL && L2TP && PPP
3184 help 3180 help
3185 Support for PPP-over-L2TP socket family. L2TP is a protocol 3181 Support for PPP-over-L2TP socket family. L2TP is a protocol
3186 used by ISPs and enterprises to tunnel PPP traffic over UDP 3182 used by ISPs and enterprises to tunnel PPP traffic over UDP
3187 tunnels. L2TP is replacing PPTP for VPN uses. 3183 tunnels. L2TP is replacing PPTP for VPN uses.
3188 3184
3189 This kernel component handles only L2TP data packets: a
3190 userland daemon handles L2TP the control protocol (tunnel
3191 and session setup). One such daemon is OpenL2TP
3192 (http://openl2tp.sourceforge.net/).
3193
3194config SLIP 3185config SLIP
3195 tristate "SLIP (serial line) support" 3186 tristate "SLIP (serial line) support"
3196 ---help--- 3187 ---help---
@@ -3277,15 +3268,14 @@ config NET_FC
3277 "SCSI generic support". 3268 "SCSI generic support".
3278 3269
3279config NETCONSOLE 3270config NETCONSOLE
3280 tristate "Network console logging support (EXPERIMENTAL)" 3271 tristate "Network console logging support"
3281 depends on EXPERIMENTAL
3282 ---help--- 3272 ---help---
3283 If you want to log kernel messages over the network, enable this. 3273 If you want to log kernel messages over the network, enable this.
3284 See <file:Documentation/networking/netconsole.txt> for details. 3274 See <file:Documentation/networking/netconsole.txt> for details.
3285 3275
3286config NETCONSOLE_DYNAMIC 3276config NETCONSOLE_DYNAMIC
3287 bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)" 3277 bool "Dynamic reconfiguration of logging targets"
3288 depends on NETCONSOLE && SYSFS && EXPERIMENTAL 3278 depends on NETCONSOLE && SYSFS
3289 select CONFIGFS_FS 3279 select CONFIGFS_FS
3290 help 3280 help
3291 This option enables the ability to dynamically reconfigure target 3281 This option enables the ability to dynamically reconfigure target
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a583b50d9de8..0a0512ae77da 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
161obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o 161obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
164obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o 164obj-$(CONFIG_PPPOL2TP) += pppox.o
165 165
166obj-$(CONFIG_SLIP) += slip.o 166obj-$(CONFIG_SLIP) += slip.o
167obj-$(CONFIG_SLHC) += slhc.o 167obj-$(CONFIG_SLHC) += slhc.o
@@ -273,6 +273,7 @@ obj-$(CONFIG_USB_RTL8150) += usb/
273obj-$(CONFIG_USB_HSO) += usb/ 273obj-$(CONFIG_USB_HSO) += usb/
274obj-$(CONFIG_USB_USBNET) += usb/ 274obj-$(CONFIG_USB_USBNET) += usb/
275obj-$(CONFIG_USB_ZD1201) += usb/ 275obj-$(CONFIG_USB_ZD1201) += usb/
276obj-$(CONFIG_USB_IPHETH) += usb/
276 277
277obj-y += wireless/ 278obj-y += wireless/
278obj-$(CONFIG_NET_TULIP) += tulip/ 279obj-$(CONFIG_NET_TULIP) += tulip/
@@ -291,5 +292,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
291obj-$(CONFIG_SFC) += sfc/ 292obj-$(CONFIG_SFC) += sfc/
292 293
293obj-$(CONFIG_WIMAX) += wimax/ 294obj-$(CONFIG_WIMAX) += wimax/
295obj-$(CONFIG_CAIF) += caif/
294 296
295obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 297obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..f142cc21e453 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -525,7 +525,7 @@ static inline int lance_reset (struct net_device *dev)
525 load_csrs (lp); 525 load_csrs (lp);
526 526
527 lance_init_ring (dev); 527 lance_init_ring (dev);
528 dev->trans_start = jiffies; 528 dev->trans_start = jiffies; /* prevent tx timeout */
529 netif_start_queue(dev); 529 netif_start_queue(dev);
530 530
531 status = init_restart_lance (lp); 531 status = init_restart_lance (lp);
@@ -588,7 +588,6 @@ static netdev_tx_t lance_start_xmit (struct sk_buff *skb,
588 588
589 /* Kick the lance: transmit now */ 589 /* Kick the lance: transmit now */
590 ll->rdp = LE_C0_INEA | LE_C0_TDMD; 590 ll->rdp = LE_C0_INEA | LE_C0_TDMD;
591 dev->trans_start = jiffies;
592 dev_kfree_skb (skb); 591 dev_kfree_skb (skb);
593 592
594 local_irq_restore(flags); 593 local_irq_restore(flags);
@@ -602,7 +601,7 @@ static void lance_load_multicast (struct net_device *dev)
602 struct lance_private *lp = netdev_priv(dev); 601 struct lance_private *lp = netdev_priv(dev);
603 volatile struct lance_init_block *ib = lp->init_block; 602 volatile struct lance_init_block *ib = lp->init_block;
604 volatile u16 *mcast_table = (u16 *)&ib->filter; 603 volatile u16 *mcast_table = (u16 *)&ib->filter;
605 struct dev_mc_list *dmi; 604 struct netdev_hw_addr *ha;
606 char *addrs; 605 char *addrs;
607 u32 crc; 606 u32 crc;
608 607
@@ -617,8 +616,8 @@ static void lance_load_multicast (struct net_device *dev)
617 ib->filter [1] = 0; 616 ib->filter [1] = 0;
618 617
619 /* Add addresses */ 618 /* Add addresses */
620 netdev_for_each_mc_addr(dmi, dev) { 619 netdev_for_each_mc_addr(ha, dev) {
621 addrs = dmi->dmi_addr; 620 addrs = ha->addr;
622 621
623 /* multicast address? */ 622 /* multicast address? */
624 if (!(*addrs & 1)) 623 if (!(*addrs & 1))
@@ -628,7 +627,6 @@ static void lance_load_multicast (struct net_device *dev)
628 crc = crc >> 26; 627 crc = crc >> 26;
629 mcast_table [crc >> 4] |= 1 << (crc & 0xf); 628 mcast_table [crc >> 4] |= 1 << (crc & 0xf);
630 } 629 }
631 return;
632} 630}
633 631
634static void lance_set_multicast (struct net_device *dev) 632static void lance_set_multicast (struct net_device *dev)
@@ -674,6 +672,7 @@ static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
674 { ZORRO_PROD_AMERISTAR_A2065 }, 672 { ZORRO_PROD_AMERISTAR_A2065 },
675 { 0 } 673 { 0 }
676}; 674};
675MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
677 676
678static struct zorro_driver a2065_driver = { 677static struct zorro_driver a2065_driver = {
679 .name = "a2065", 678 .name = "a2065",
diff --git a/drivers/net/ac3200.c b/drivers/net/ac3200.c
index eac73382c087..b9115a776fdd 100644
--- a/drivers/net/ac3200.c
+++ b/drivers/net/ac3200.c
@@ -307,8 +307,6 @@ static void ac_reset_8390(struct net_device *dev)
307 ei_status.txing = 0; 307 ei_status.txing = 0;
308 outb(AC_ENABLE, ioaddr + AC_RESET_PORT); 308 outb(AC_ENABLE, ioaddr + AC_RESET_PORT);
309 if (ei_debug > 1) printk("reset done\n"); 309 if (ei_debug > 1) printk("reset done\n");
310
311 return;
312} 310}
313 311
314/* Grab the 8390 specific header. Similar to the block_input routine, but 312/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd94dfa..b9a591604e5b 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
661 dma_addr_t mapping; 661 dma_addr_t mapping;
662 662
663 ringp = &ap->skb->rx_std_skbuff[i]; 663 ringp = &ap->skb->rx_std_skbuff[i];
664 mapping = pci_unmap_addr(ringp, mapping); 664 mapping = dma_unmap_addr(ringp, mapping);
665 pci_unmap_page(ap->pdev, mapping, 665 pci_unmap_page(ap->pdev, mapping,
666 ACE_STD_BUFSIZE, 666 ACE_STD_BUFSIZE,
667 PCI_DMA_FROMDEVICE); 667 PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
681 dma_addr_t mapping; 681 dma_addr_t mapping;
682 682
683 ringp = &ap->skb->rx_mini_skbuff[i]; 683 ringp = &ap->skb->rx_mini_skbuff[i];
684 mapping = pci_unmap_addr(ringp,mapping); 684 mapping = dma_unmap_addr(ringp,mapping);
685 pci_unmap_page(ap->pdev, mapping, 685 pci_unmap_page(ap->pdev, mapping,
686 ACE_MINI_BUFSIZE, 686 ACE_MINI_BUFSIZE,
687 PCI_DMA_FROMDEVICE); 687 PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
700 dma_addr_t mapping; 700 dma_addr_t mapping;
701 701
702 ringp = &ap->skb->rx_jumbo_skbuff[i]; 702 ringp = &ap->skb->rx_jumbo_skbuff[i];
703 mapping = pci_unmap_addr(ringp, mapping); 703 mapping = dma_unmap_addr(ringp, mapping);
704 pci_unmap_page(ap->pdev, mapping, 704 pci_unmap_page(ap->pdev, mapping,
705 ACE_JUMBO_BUFSIZE, 705 ACE_JUMBO_BUFSIZE,
706 PCI_DMA_FROMDEVICE); 706 PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1683 ACE_STD_BUFSIZE, 1683 ACE_STD_BUFSIZE,
1684 PCI_DMA_FROMDEVICE); 1684 PCI_DMA_FROMDEVICE);
1685 ap->skb->rx_std_skbuff[idx].skb = skb; 1685 ap->skb->rx_std_skbuff[idx].skb = skb;
1686 pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], 1686 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1687 mapping, mapping); 1687 mapping, mapping);
1688 1688
1689 rd = &ap->rx_std_ring[idx]; 1689 rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1744 ACE_MINI_BUFSIZE, 1744 ACE_MINI_BUFSIZE,
1745 PCI_DMA_FROMDEVICE); 1745 PCI_DMA_FROMDEVICE);
1746 ap->skb->rx_mini_skbuff[idx].skb = skb; 1746 ap->skb->rx_mini_skbuff[idx].skb = skb;
1747 pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], 1747 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1748 mapping, mapping); 1748 mapping, mapping);
1749 1749
1750 rd = &ap->rx_mini_ring[idx]; 1750 rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
1800 ACE_JUMBO_BUFSIZE, 1800 ACE_JUMBO_BUFSIZE,
1801 PCI_DMA_FROMDEVICE); 1801 PCI_DMA_FROMDEVICE);
1802 ap->skb->rx_jumbo_skbuff[idx].skb = skb; 1802 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1803 pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], 1803 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1804 mapping, mapping); 1804 mapping, mapping);
1805 1805
1806 rd = &ap->rx_jumbo_ring[idx]; 1806 rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2013 skb = rip->skb; 2013 skb = rip->skb;
2014 rip->skb = NULL; 2014 rip->skb = NULL;
2015 pci_unmap_page(ap->pdev, 2015 pci_unmap_page(ap->pdev,
2016 pci_unmap_addr(rip, mapping), 2016 dma_unmap_addr(rip, mapping),
2017 mapsize, 2017 mapsize,
2018 PCI_DMA_FROMDEVICE); 2018 PCI_DMA_FROMDEVICE);
2019 skb_put(skb, retdesc->size); 2019 skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
2078 2078
2079 do { 2079 do {
2080 struct sk_buff *skb; 2080 struct sk_buff *skb;
2081 dma_addr_t mapping;
2082 struct tx_ring_info *info; 2081 struct tx_ring_info *info;
2083 2082
2084 info = ap->skb->tx_skbuff + idx; 2083 info = ap->skb->tx_skbuff + idx;
2085 skb = info->skb; 2084 skb = info->skb;
2086 mapping = pci_unmap_addr(info, mapping);
2087 2085
2088 if (mapping) { 2086 if (dma_unmap_len(info, maplen)) {
2089 pci_unmap_page(ap->pdev, mapping, 2087 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2090 pci_unmap_len(info, maplen), 2088 dma_unmap_len(info, maplen),
2091 PCI_DMA_TODEVICE); 2089 PCI_DMA_TODEVICE);
2092 pci_unmap_addr_set(info, mapping, 0); 2090 dma_unmap_len_set(info, maplen, 0);
2093 } 2091 }
2094 2092
2095 if (skb) { 2093 if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
2377 2375
2378 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { 2376 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2379 struct sk_buff *skb; 2377 struct sk_buff *skb;
2380 dma_addr_t mapping;
2381 struct tx_ring_info *info; 2378 struct tx_ring_info *info;
2382 2379
2383 info = ap->skb->tx_skbuff + i; 2380 info = ap->skb->tx_skbuff + i;
2384 skb = info->skb; 2381 skb = info->skb;
2385 mapping = pci_unmap_addr(info, mapping);
2386 2382
2387 if (mapping) { 2383 if (dma_unmap_len(info, maplen)) {
2388 if (ACE_IS_TIGON_I(ap)) { 2384 if (ACE_IS_TIGON_I(ap)) {
2389 /* NB: TIGON_1 is special, tx_ring is in io space */ 2385 /* NB: TIGON_1 is special, tx_ring is in io space */
2390 struct tx_desc __iomem *tx; 2386 struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
2395 } else 2391 } else
2396 memset(ap->tx_ring + i, 0, 2392 memset(ap->tx_ring + i, 0,
2397 sizeof(struct tx_desc)); 2393 sizeof(struct tx_desc));
2398 pci_unmap_page(ap->pdev, mapping, 2394 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2399 pci_unmap_len(info, maplen), 2395 dma_unmap_len(info, maplen),
2400 PCI_DMA_TODEVICE); 2396 PCI_DMA_TODEVICE);
2401 pci_unmap_addr_set(info, mapping, 0); 2397 dma_unmap_len_set(info, maplen, 0);
2402 } 2398 }
2403 if (skb) { 2399 if (skb) {
2404 dev_kfree_skb(skb); 2400 dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2433 2429
2434 info = ap->skb->tx_skbuff + idx; 2430 info = ap->skb->tx_skbuff + idx;
2435 info->skb = tail; 2431 info->skb = tail;
2436 pci_unmap_addr_set(info, mapping, mapping); 2432 dma_unmap_addr_set(info, mapping, mapping);
2437 pci_unmap_len_set(info, maplen, skb->len); 2433 dma_unmap_len_set(info, maplen, skb->len);
2438 return mapping; 2434 return mapping;
2439} 2435}
2440 2436
@@ -2553,8 +2549,8 @@ restart:
2553 } else { 2549 } else {
2554 info->skb = NULL; 2550 info->skb = NULL;
2555 } 2551 }
2556 pci_unmap_addr_set(info, mapping, mapping); 2552 dma_unmap_addr_set(info, mapping, mapping);
2557 pci_unmap_len_set(info, maplen, frag->size); 2553 dma_unmap_len_set(info, maplen, frag->size);
2558 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); 2554 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2559 } 2555 }
2560 } 2556 }
@@ -2923,8 +2919,6 @@ static void __devinit ace_clear(struct ace_regs __iomem *regs, u32 dest, int siz
2923 dest += tsize; 2919 dest += tsize;
2924 size -= tsize; 2920 size -= tsize;
2925 } 2921 }
2926
2927 return;
2928} 2922}
2929 2923
2930 2924
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b927ffa..0681da7e8753 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
589 589
590struct ring_info { 590struct ring_info {
591 struct sk_buff *skb; 591 struct sk_buff *skb;
592 DECLARE_PCI_UNMAP_ADDR(mapping) 592 DEFINE_DMA_UNMAP_ADDR(mapping);
593}; 593};
594 594
595 595
@@ -600,8 +600,8 @@ struct ring_info {
600 */ 600 */
601struct tx_ring_info { 601struct tx_ring_info {
602 struct sk_buff *skb; 602 struct sk_buff *skb;
603 DECLARE_PCI_UNMAP_ADDR(mapping) 603 DEFINE_DMA_UNMAP_ADDR(mapping);
604 DECLARE_PCI_UNMAP_LEN(maplen) 604 DEFINE_DMA_UNMAP_LEN(maplen);
605}; 605};
606 606
607 607
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a8f42f..585c25f4b60c 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1339,8 +1339,6 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
1339 writel( VAL1 | TDMD0, lp->mmio + CMD0); 1339 writel( VAL1 | TDMD0, lp->mmio + CMD0);
1340 writel( VAL2 | RDMD0,lp->mmio + CMD0); 1340 writel( VAL2 | RDMD0,lp->mmio + CMD0);
1341 1341
1342 dev->trans_start = jiffies;
1343
1344 if(amd8111e_tx_queue_avail(lp) < 0){ 1342 if(amd8111e_tx_queue_avail(lp) < 0){
1345 netif_stop_queue(dev); 1343 netif_stop_queue(dev);
1346 } 1344 }
@@ -1376,7 +1374,7 @@ list to the device.
1376*/ 1374*/
1377static void amd8111e_set_multicast_list(struct net_device *dev) 1375static void amd8111e_set_multicast_list(struct net_device *dev)
1378{ 1376{
1379 struct dev_mc_list *mc_ptr; 1377 struct netdev_hw_addr *ha;
1380 struct amd8111e_priv *lp = netdev_priv(dev); 1378 struct amd8111e_priv *lp = netdev_priv(dev);
1381 u32 mc_filter[2] ; 1379 u32 mc_filter[2] ;
1382 int bit_num; 1380 int bit_num;
@@ -1407,8 +1405,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
1407 /* load all the multicast addresses in the logic filter */ 1405 /* load all the multicast addresses in the logic filter */
1408 lp->options |= OPTION_MULTICAST_ENABLE; 1406 lp->options |= OPTION_MULTICAST_ENABLE;
1409 mc_filter[1] = mc_filter[0] = 0; 1407 mc_filter[1] = mc_filter[0] = 0;
1410 netdev_for_each_mc_addr(mc_ptr, dev) { 1408 netdev_for_each_mc_addr(ha, dev) {
1411 bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; 1409 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1412 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); 1410 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1413 } 1411 }
1414 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); 1412 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/apne.c b/drivers/net/apne.c
index 1437f5d12121..2fe60f168108 100644
--- a/drivers/net/apne.c
+++ b/drivers/net/apne.c
@@ -521,7 +521,6 @@ apne_block_output(struct net_device *dev, int count,
521 521
522 outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ 522 outb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
523 ei_status.dmaing &= ~0x01; 523 ei_status.dmaing &= ~0x01;
524 return;
525} 524}
526 525
527static irqreturn_t apne_interrupt(int irq, void *dev_id) 526static irqreturn_t apne_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 6f8d6206b5c4..748c9f526e71 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -593,8 +593,6 @@ static void cops_load (struct net_device *dev)
593 tangent_wait_reset(ioaddr); 593 tangent_wait_reset(ioaddr);
594 inb(ioaddr); /* Clear initial ready signal. */ 594 inb(ioaddr); /* Clear initial ready signal. */
595 } 595 }
596
597 return;
598} 596}
599 597
600/* 598/*
@@ -701,8 +699,6 @@ static void cops_poll(unsigned long ltdev)
701 /* poll 20 times per second */ 699 /* poll 20 times per second */
702 cops_timer.expires = jiffies + HZ/20; 700 cops_timer.expires = jiffies + HZ/20;
703 add_timer(&cops_timer); 701 add_timer(&cops_timer);
704
705 return;
706} 702}
707 703
708/* 704/*
@@ -866,7 +862,7 @@ static void cops_timeout(struct net_device *dev)
866 } 862 }
867 printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name); 863 printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
868 cops_jumpstart(dev); /* Restart the card. */ 864 cops_jumpstart(dev); /* Restart the card. */
869 dev->trans_start = jiffies; 865 dev->trans_start = jiffies; /* prevent tx timeout */
870 netif_wake_queue(dev); 866 netif_wake_queue(dev);
871} 867}
872 868
@@ -919,9 +915,8 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb,
919 /* Done sending packet, update counters and cleanup. */ 915 /* Done sending packet, update counters and cleanup. */
920 dev->stats.tx_packets++; 916 dev->stats.tx_packets++;
921 dev->stats.tx_bytes += skb->len; 917 dev->stats.tx_bytes += skb->len;
922 dev->trans_start = jiffies;
923 dev_kfree_skb (skb); 918 dev_kfree_skb (skb);
924 return NETDEV_TX_OK; 919 return NETDEV_TX_OK;
925} 920}
926 921
927/* 922/*
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 6af65b656f31..adc07551739e 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -641,7 +641,6 @@ done:
641 inb_p(base+7); 641 inb_p(base+7);
642 inb_p(base+7); 642 inb_p(base+7);
643 } 643 }
644 return;
645} 644}
646 645
647 646
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index d8f029303754..a746ba272f04 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -654,7 +654,6 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
654 } 654 }
655 } 655 }
656 retval = NETDEV_TX_OK; 656 retval = NETDEV_TX_OK;
657 dev->trans_start = jiffies;
658 lp->next_tx = txbuf; 657 lp->next_tx = txbuf;
659 } else { 658 } else {
660 retval = NETDEV_TX_BUSY; 659 retval = NETDEV_TX_BUSY;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 2c712af6c265..48a1dbf01e60 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
164 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 164 { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
165 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 165 { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
166 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 166 { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
167 { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 167 { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT },
168 { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 168 { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT },
169 { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 169 { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
170 { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, 170 { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT },
171 {0,} 171 {0,}
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index fa1a2354f5f9..39214e512452 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -145,6 +145,7 @@ static struct zorro_device_id ariadne_zorro_tbl[] __devinitdata = {
145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE }, 145 { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
146 { 0 } 146 { 0 }
147}; 147};
148MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
148 149
149static struct zorro_driver ariadne_driver = { 150static struct zorro_driver ariadne_driver = {
150 .name = "ariadne", 151 .name = "ariadne",
@@ -676,8 +677,6 @@ static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
676 lance->RAP = CSR0; /* PCnet-ISA Controller Status */ 677 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
677 lance->RDP = INEA|TDMD; 678 lance->RDP = INEA|TDMD;
678 679
679 dev->trans_start = jiffies;
680
681 if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) { 680 if (lowb(priv->tx_ring[(entry+1) % TX_RING_SIZE]->TMD1) != 0) {
682 netif_stop_queue(dev); 681 netif_stop_queue(dev);
683 priv->tx_full = 1; 682 priv->tx_full = 1;
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5e27bf..8c496fb1ac9e 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
383 } else if (dev->flags & IFF_ALLMULTI) { 383 } else if (dev->flags & IFF_ALLMULTI) {
384 memset(multi_hash, 0xff, sizeof(multi_hash)); 384 memset(multi_hash, 0xff, sizeof(multi_hash));
385 } else { 385 } else {
386 struct dev_mc_list *dmi; 386 struct netdev_hw_addr *ha;
387 387
388 memset(multi_hash, 0x00, sizeof(multi_hash)); 388 memset(multi_hash, 0x00, sizeof(multi_hash));
389 389
390 netdev_for_each_mc_addr(dmi, dev) 390 netdev_for_each_mc_addr(ha, dev)
391 am79c961_mc_hash(dmi->dmi_addr, multi_hash); 391 am79c961_mc_hash(ha->addr, multi_hash);
392 } 392 }
393 393
394 spin_lock_irqsave(&priv->chip_lock, flags); 394 spin_lock_irqsave(&priv->chip_lock, flags);
@@ -469,7 +469,6 @@ am79c961_sendpacket(struct sk_buff *skb, struct net_device *dev)
469 469
470 spin_lock_irqsave(&priv->chip_lock, flags); 470 spin_lock_irqsave(&priv->chip_lock, flags);
471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA); 471 write_rreg (dev->base_addr, CSR0, CSR0_TDMD|CSR0_IENA);
472 dev->trans_start = jiffies;
473 spin_unlock_irqrestore(&priv->chip_lock, flags); 472 spin_unlock_irqrestore(&priv->chip_lock, flags);
474 473
475 /* 474 /*
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b5479b50..e07b314ed8fd 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
557 */ 557 */
558static void at91ether_sethashtable(struct net_device *dev) 558static void at91ether_sethashtable(struct net_device *dev)
559{ 559{
560 struct dev_mc_list *curr; 560 struct netdev_hw_addr *ha;
561 unsigned long mc_filter[2]; 561 unsigned long mc_filter[2];
562 unsigned int bitnr; 562 unsigned int bitnr;
563 563
564 mc_filter[0] = mc_filter[1] = 0; 564 mc_filter[0] = mc_filter[1] = 0;
565 565
566 netdev_for_each_mc_addr(curr, dev) { 566 netdev_for_each_mc_addr(ha, dev) {
567 bitnr = hash_get_index(curr->dmi_addr); 567 bitnr = hash_get_index(ha->addr);
568 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 568 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
569 } 569 }
570 570
@@ -824,7 +824,6 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
824 /* Set length of the packet in the Transmit Control register */ 824 /* Set length of the packet in the Transmit Control register */
825 at91_emac_write(AT91_EMAC_TCR, skb->len); 825 at91_emac_write(AT91_EMAC_TCR, skb->len);
826 826
827 dev->trans_start = jiffies;
828 } else { 827 } else {
829 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n"); 828 printk(KERN_ERR "at91_ether.c: at91ether_start_xmit() called, but device is busy!\n");
830 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb) 829 return NETDEV_TX_BUSY; /* if we return anything but zero, dev.c:1055 calls kfree_skb(skb)
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index 6995169d285a..4a5ec9470aa1 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -311,11 +311,6 @@ err:
311 processed++; 311 processed++;
312 } 312 }
313 313
314 if (processed) {
315 wrw(ep, REG_RXDENQ, processed);
316 wrw(ep, REG_RXSTSENQ, processed);
317 }
318
319 return processed; 314 return processed;
320} 315}
321 316
@@ -350,6 +345,11 @@ poll_some_more:
350 goto poll_some_more; 345 goto poll_some_more;
351 } 346 }
352 347
348 if (rx) {
349 wrw(ep, REG_RXDENQ, rx);
350 wrw(ep, REG_RXSTSENQ, rx);
351 }
352
353 return rx; 353 return rx;
354} 354}
355 355
@@ -374,8 +374,6 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
374 skb->len, DMA_TO_DEVICE); 374 skb->len, DMA_TO_DEVICE);
375 dev_kfree_skb(skb); 375 dev_kfree_skb(skb);
376 376
377 dev->trans_start = jiffies;
378
379 spin_lock_irq(&ep->tx_pending_lock); 377 spin_lock_irq(&ep->tx_pending_lock);
380 ep->tx_pending++; 378 ep->tx_pending++;
381 if (ep->tx_pending == TX_QUEUE_ENTRIES) 379 if (ep->tx_pending == TX_QUEUE_ENTRIES)
diff --git a/drivers/net/arm/ether1.c b/drivers/net/arm/ether1.c
index e47c0d962857..b17ab5153f51 100644
--- a/drivers/net/arm/ether1.c
+++ b/drivers/net/arm/ether1.c
@@ -736,7 +736,6 @@ ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
736 local_irq_restore(flags); 736 local_irq_restore(flags);
737 737
738 /* handle transmit */ 738 /* handle transmit */
739 dev->trans_start = jiffies;
740 739
741 /* check to see if we have room for a full sized ether frame */ 740 /* check to see if we have room for a full sized ether frame */
742 tmp = priv(dev)->tx_head; 741 tmp = priv(dev)->tx_head;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index d9de9bce2395..1361b7367c28 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -529,7 +529,6 @@ ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
529 return NETDEV_TX_BUSY; /* unable to queue */ 529 return NETDEV_TX_BUSY; /* unable to queue */
530 } 530 }
531 531
532 dev->trans_start = jiffies;
533 ptr = 0x600 * priv(dev)->tx_head; 532 ptr = 0x600 * priv(dev)->tx_head;
534 priv(dev)->tx_head = next_ptr; 533 priv(dev)->tx_head = next_ptr;
535 next_ptr *= 0x600; 534 next_ptr *= 0x600;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b098b8b4..24df0325090c 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -708,7 +708,6 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
708 /* NPE firmware pads short frames with zeros internally */ 708 /* NPE firmware pads short frames with zeros internally */
709 wmb(); 709 wmb();
710 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 710 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
711 dev->trans_start = jiffies;
712 711
713 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 712 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
714#if DEBUG_TX 713#if DEBUG_TX
@@ -736,7 +735,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
736static void eth_set_mcast_list(struct net_device *dev) 735static void eth_set_mcast_list(struct net_device *dev)
737{ 736{
738 struct port *port = netdev_priv(dev); 737 struct port *port = netdev_priv(dev);
739 struct dev_mc_list *mclist; 738 struct netdev_hw_addr *ha;
740 u8 diffs[ETH_ALEN], *addr; 739 u8 diffs[ETH_ALEN], *addr;
741 int i; 740 int i;
742 741
@@ -749,11 +748,11 @@ static void eth_set_mcast_list(struct net_device *dev)
749 memset(diffs, 0, ETH_ALEN); 748 memset(diffs, 0, ETH_ALEN);
750 749
751 addr = NULL; 750 addr = NULL;
752 netdev_for_each_mc_addr(mclist, dev) { 751 netdev_for_each_mc_addr(ha, dev) {
753 if (!addr) 752 if (!addr)
754 addr = mclist->dmi_addr; /* first MAC address */ 753 addr = ha->addr; /* first MAC address */
755 for (i = 0; i < ETH_ALEN; i++) 754 for (i = 0; i < ETH_ALEN; i++)
756 diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; 755 diffs[i] |= addr[i] ^ ha->addr[i];
757 } 756 }
758 757
759 for (i = 0; i < ETH_ALEN; i++) { 758 for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f73802..54c6d849cf25 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
332{ 332{
333 u32 low, high; 333 u32 low, high;
334 int i; 334 int i;
335 struct dev_mc_list *dmi; 335 struct netdev_hw_addr *ha;
336 336
337 i = 0; 337 i = 0;
338 netdev_for_each_mc_addr(dmi, ndev) { 338 netdev_for_each_mc_addr(ha, ndev) {
339 /* Ran out of space in chip? */ 339 /* Ran out of space in chip? */
340 BUG_ON(i == KS8695_NR_ADDRESSES); 340 BUG_ON(i == KS8695_NR_ADDRESSES);
341 341
342 low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) | 342 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
343 (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]); 343 (ha->addr[4] << 8) | (ha->addr[5]);
344 high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]); 344 high = (ha->addr[0] << 8) | (ha->addr[1]);
345 345
346 ks8695_writereg(ksp, KS8695_AAL_(i), low); 346 ks8695_writereg(ksp, KS8695_AAL_(i), low);
347 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); 347 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
@@ -1302,8 +1302,6 @@ ks8695_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1302 if (++ksp->tx_ring_used == MAX_TX_DESC) 1302 if (++ksp->tx_ring_used == MAX_TX_DESC)
1303 netif_stop_queue(ndev); 1303 netif_stop_queue(ndev);
1304 1304
1305 ndev->trans_start = jiffies;
1306
1307 /* Kick the TX DMA in case it decided to go IDLE */ 1305 /* Kick the TX DMA in case it decided to go IDLE */
1308 ks8695_writereg(ksp, KS8695_DTSC, 0); 1306 ks8695_writereg(ksp, KS8695_DTSC, 0);
1309 1307
@@ -1472,7 +1470,6 @@ ks8695_probe(struct platform_device *pdev)
1472 1470
1473 /* Configure our private structure a little */ 1471 /* Configure our private structure a little */
1474 ksp = netdev_priv(ndev); 1472 ksp = netdev_priv(ndev);
1475 memset(ksp, 0, sizeof(struct ks8695_priv));
1476 1473
1477 ksp->dev = &pdev->dev; 1474 ksp->dev = &pdev->dev;
1478 ksp->ndev = ndev; 1475 ksp->ndev = ndev;
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1dfb17..2e852463382b 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -483,7 +483,7 @@ static void w90p910_reset_mac(struct net_device *dev)
483 483
484 w90p910_init_desc(dev); 484 w90p910_init_desc(dev);
485 485
486 dev->trans_start = jiffies; 486 dev->trans_start = jiffies; /* prevent tx timeout */
487 ether->cur_tx = 0x0; 487 ether->cur_tx = 0x0;
488 ether->finish_tx = 0x0; 488 ether->finish_tx = 0x0;
489 ether->cur_rx = 0x0; 489 ether->cur_rx = 0x0;
@@ -497,7 +497,7 @@ static void w90p910_reset_mac(struct net_device *dev)
497 w90p910_trigger_tx(dev); 497 w90p910_trigger_tx(dev);
498 w90p910_trigger_rx(dev); 498 w90p910_trigger_rx(dev);
499 499
500 dev->trans_start = jiffies; 500 dev->trans_start = jiffies; /* prevent tx timeout */
501 501
502 if (netif_queue_stopped(dev)) 502 if (netif_queue_stopped(dev))
503 netif_wake_queue(dev); 503 netif_wake_queue(dev);
@@ -634,8 +634,6 @@ static int w90p910_send_frame(struct net_device *dev,
634 634
635 txbd = &ether->tdesc->desclist[ether->cur_tx]; 635 txbd = &ether->tdesc->desclist[ether->cur_tx];
636 636
637 dev->trans_start = jiffies;
638
639 if (txbd->mode & TX_OWEN_DMA) 637 if (txbd->mode & TX_OWEN_DMA)
640 netif_stop_queue(dev); 638 netif_stop_queue(dev);
641 639
@@ -744,7 +742,6 @@ static void netdev_rx(struct net_device *dev)
744 return; 742 return;
745 } 743 }
746 744
747 skb->dev = dev;
748 skb_reserve(skb, 2); 745 skb_reserve(skb, 2);
749 skb_put(skb, length); 746 skb_put(skb, length);
750 skb_copy_to_linear_data(skb, data, length); 747 skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb9ae65..93185f5f09ac 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -583,7 +583,7 @@ static void net_tx_timeout (struct net_device *dev)
583 outb (0x00, ioaddr + TX_START); 583 outb (0x00, ioaddr + TX_START);
584 outb (0x03, ioaddr + COL16CNTL); 584 outb (0x03, ioaddr + COL16CNTL);
585 585
586 dev->trans_start = jiffies; 586 dev->trans_start = jiffies; /* prevent tx timeout */
587 587
588 lp->tx_started = 0; 588 lp->tx_started = 0;
589 lp->tx_queue_ready = 1; 589 lp->tx_queue_ready = 1;
@@ -636,7 +636,6 @@ static netdev_tx_t net_send_packet (struct sk_buff *skb,
636 outb (0x80 | lp->tx_queue, ioaddr + TX_START); 636 outb (0x80 | lp->tx_queue, ioaddr + TX_START);
637 lp->tx_queue = 0; 637 lp->tx_queue = 0;
638 lp->tx_queue_len = 0; 638 lp->tx_queue_len = 0;
639 dev->trans_start = jiffies;
640 lp->tx_started = 1; 639 lp->tx_started = 1;
641 netif_start_queue (dev); 640 netif_start_queue (dev);
642 } else if (lp->tx_queue_len < 4096 - 1502) 641 } else if (lp->tx_queue_len < 4096 - 1502)
@@ -796,7 +795,6 @@ net_rx(struct net_device *dev)
796 printk("%s: Exint Rx packet with mode %02x after %d ticks.\n", 795 printk("%s: Exint Rx packet with mode %02x after %d ticks.\n",
797 dev->name, inb(ioaddr + RX_MODE), i); 796 dev->name, inb(ioaddr + RX_MODE), i);
798 } 797 }
799 return;
800} 798}
801 799
802/* The inverse routine to net_open(). */ 800/* The inverse routine to net_open(). */
@@ -847,12 +845,12 @@ set_rx_mode(struct net_device *dev)
847 memset(mc_filter, 0x00, sizeof(mc_filter)); 845 memset(mc_filter, 0x00, sizeof(mc_filter));
848 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 846 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
849 } else { 847 } else {
850 struct dev_mc_list *mclist; 848 struct netdev_hw_addr *ha;
851 849
852 memset(mc_filter, 0, sizeof(mc_filter)); 850 memset(mc_filter, 0, sizeof(mc_filter));
853 netdev_for_each_mc_addr(mclist, dev) { 851 netdev_for_each_mc_addr(ha, dev) {
854 unsigned int bit = 852 unsigned int bit =
855 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; 853 ether_crc_le(ETH_ALEN, ha->addr) >> 26;
856 mc_filter[bit >> 3] |= (1 << bit); 854 mc_filter[bit >> 3] |= (1 << bit);
857 } 855 }
858 outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */ 856 outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
@@ -870,7 +868,6 @@ set_rx_mode(struct net_device *dev)
870 outw(saved_bank, ioaddr + CONFIG_0); 868 outw(saved_bank, ioaddr + CONFIG_0);
871 } 869 }
872 spin_unlock_irqrestore (&lp->lock, flags); 870 spin_unlock_irqrestore (&lp->lock, flags);
873 return;
874} 871}
875 872
876#ifdef MODULE 873#ifdef MODULE
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index a8686bfec7a1..b57d7dee389a 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -767,8 +767,8 @@ static void lance_tx_timeout (struct net_device *dev)
767 /* lance_restart, essentially */ 767 /* lance_restart, essentially */
768 lance_init_ring(dev); 768 lance_init_ring(dev);
769 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; 769 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
770 dev->trans_start = jiffies; 770 dev->trans_start = jiffies; /* prevent tx timeout */
771 netif_wake_queue (dev); 771 netif_wake_queue(dev);
772} 772}
773 773
774/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */ 774/* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
@@ -836,7 +836,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
836 836
837 /* Trigger an immediate send poll. */ 837 /* Trigger an immediate send poll. */
838 DREG = CSR0_INEA | CSR0_TDMD; 838 DREG = CSR0_INEA | CSR0_TDMD;
839 dev->trans_start = jiffies;
840 839
841 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == 840 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
842 TMD1_OWN_HOST) 841 TMD1_OWN_HOST)
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 32339243d61f..7c521508313c 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -263,8 +263,6 @@ static void atl1c_get_wol(struct net_device *netdev,
263 wol->wolopts |= WAKE_MAGIC; 263 wol->wolopts |= WAKE_MAGIC;
264 if (adapter->wol & AT_WUFC_LNKC) 264 if (adapter->wol & AT_WUFC_LNKC)
265 wol->wolopts |= WAKE_PHY; 265 wol->wolopts |= WAKE_PHY;
266
267 return;
268} 266}
269 267
270static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 268static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531a02d8..1c3c046d5f34 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -317,8 +317,6 @@ static void atl1c_common_task(struct work_struct *work)
317 317
318 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE) 318 if (adapter->work_event & ATL1C_WORK_EVENT_LINK_CHANGE)
319 atl1c_check_link_status(adapter); 319 atl1c_check_link_status(adapter);
320
321 return;
322} 320}
323 321
324 322
@@ -354,7 +352,7 @@ static void atl1c_set_multi(struct net_device *netdev)
354{ 352{
355 struct atl1c_adapter *adapter = netdev_priv(netdev); 353 struct atl1c_adapter *adapter = netdev_priv(netdev);
356 struct atl1c_hw *hw = &adapter->hw; 354 struct atl1c_hw *hw = &adapter->hw;
357 struct dev_mc_list *mc_ptr; 355 struct netdev_hw_addr *ha;
358 u32 mac_ctrl_data; 356 u32 mac_ctrl_data;
359 u32 hash_value; 357 u32 hash_value;
360 358
@@ -377,8 +375,8 @@ static void atl1c_set_multi(struct net_device *netdev)
377 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 375 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
378 376
379 /* comoute mc addresses' hash value ,and put it into hash table */ 377 /* comoute mc addresses' hash value ,and put it into hash table */
380 netdev_for_each_mc_addr(mc_ptr, netdev) { 378 netdev_for_each_mc_addr(ha, netdev) {
381 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); 379 hash_value = atl1c_hash_mc_addr(hw, ha->addr);
382 atl1c_hash_set(hw, hash_value); 380 atl1c_hash_set(hw, hash_value);
383 } 381 }
384} 382}
@@ -1817,7 +1815,6 @@ rrs_checked:
1817 atl1c_clean_rfd(rfd_ring, rrs, rfd_num); 1815 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1818 skb_put(skb, length - ETH_FCS_LEN); 1816 skb_put(skb, length - ETH_FCS_LEN);
1819 skb->protocol = eth_type_trans(skb, netdev); 1817 skb->protocol = eth_type_trans(skb, netdev);
1820 skb->dev = netdev;
1821 atl1c_rx_checksum(adapter, skb, rrs); 1818 atl1c_rx_checksum(adapter, skb, rrs);
1822 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) { 1819 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
1823 u16 vlan; 1820 u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_ethtool.c b/drivers/net/atl1e/atl1e_ethtool.c
index ffd696ee7c8e..6943a6c3b948 100644
--- a/drivers/net/atl1e/atl1e_ethtool.c
+++ b/drivers/net/atl1e/atl1e_ethtool.c
@@ -338,8 +338,6 @@ static void atl1e_get_wol(struct net_device *netdev,
338 wol->wolopts |= WAKE_MAGIC; 338 wol->wolopts |= WAKE_MAGIC;
339 if (adapter->wol & AT_WUFC_LNKC) 339 if (adapter->wol & AT_WUFC_LNKC)
340 wol->wolopts |= WAKE_PHY; 340 wol->wolopts |= WAKE_PHY;
341
342 return;
343} 341}
344 342
345static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 343static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae468aa..1acea5774e89 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
284{ 284{
285 struct atl1e_adapter *adapter = netdev_priv(netdev); 285 struct atl1e_adapter *adapter = netdev_priv(netdev);
286 struct atl1e_hw *hw = &adapter->hw; 286 struct atl1e_hw *hw = &adapter->hw;
287 struct dev_mc_list *mc_ptr; 287 struct netdev_hw_addr *ha;
288 u32 mac_ctrl_data = 0; 288 u32 mac_ctrl_data = 0;
289 u32 hash_value; 289 u32 hash_value;
290 290
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
308 308
309 /* comoute mc addresses' hash value ,and put it into hash table */ 309 /* comoute mc addresses' hash value ,and put it into hash table */
310 netdev_for_each_mc_addr(mc_ptr, netdev) { 310 netdev_for_each_mc_addr(ha, netdev) {
311 hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); 311 hash_value = atl1e_hash_mc_addr(hw, ha->addr);
312 atl1e_hash_set(hw, hash_value); 312 atl1e_hash_set(hw, hash_value);
313 } 313 }
314} 314}
@@ -707,8 +707,6 @@ static void atl1e_init_ring_resources(struct atl1e_adapter *adapter)
707 adapter->ring_vir_addr = NULL; 707 adapter->ring_vir_addr = NULL;
708 adapter->rx_ring.desc = NULL; 708 adapter->rx_ring.desc = NULL;
709 rwlock_init(&adapter->tx_ring.tx_lock); 709 rwlock_init(&adapter->tx_ring.tx_lock);
710
711 return;
712} 710}
713 711
714/* 712/*
@@ -905,8 +903,6 @@ static inline void atl1e_configure_des_ring(const struct atl1e_adapter *adapter)
905 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); 903 AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size);
906 /* Load all of base address above */ 904 /* Load all of base address above */
907 AT_WRITE_REG(hw, REG_LOAD_PTR, 1); 905 AT_WRITE_REG(hw, REG_LOAD_PTR, 1);
908
909 return;
910} 906}
911 907
912static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) 908static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
@@ -950,7 +946,6 @@ static inline void atl1e_configure_tx(struct atl1e_adapter *adapter)
950 (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) 946 (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK)
951 << TXQ_CTRL_NUM_TPD_BURST_SHIFT) 947 << TXQ_CTRL_NUM_TPD_BURST_SHIFT)
952 | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); 948 | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN);
953 return;
954} 949}
955 950
956static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) 951static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
@@ -1004,7 +999,6 @@ static inline void atl1e_configure_rx(struct atl1e_adapter *adapter)
1004 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; 999 RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN;
1005 1000
1006 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); 1001 AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data);
1007 return;
1008} 1002}
1009 1003
1010static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) 1004static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
@@ -1024,7 +1018,6 @@ static inline void atl1e_configure_dma(struct atl1e_adapter *adapter)
1024 << DMA_CTRL_DMAW_DLY_CNT_SHIFT; 1018 << DMA_CTRL_DMAW_DLY_CNT_SHIFT;
1025 1019
1026 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); 1020 AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data);
1027 return;
1028} 1021}
1029 1022
1030static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) 1023static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
@@ -1428,7 +1421,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1428 "Memory squeeze, deferring packet\n"); 1421 "Memory squeeze, deferring packet\n");
1429 goto skip_pkt; 1422 goto skip_pkt;
1430 } 1423 }
1431 skb->dev = netdev;
1432 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1424 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1433 skb_put(skb, packet_size); 1425 skb_put(skb, packet_size);
1434 skb->protocol = eth_type_trans(skb, netdev); 1426 skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1672,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
1680{ 1672{
1681 struct atl1e_tpd_desc *use_tpd = NULL; 1673 struct atl1e_tpd_desc *use_tpd = NULL;
1682 struct atl1e_tx_buffer *tx_buffer = NULL; 1674 struct atl1e_tx_buffer *tx_buffer = NULL;
1683 u16 buf_len = skb->len - skb->data_len; 1675 u16 buf_len = skb_headlen(skb);
1684 u16 map_len = 0; 1676 u16 map_len = 0;
1685 u16 mapped_len = 0; 1677 u16 mapped_len = 0;
1686 u16 hdr_len = 0; 1678 u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd8208f606..63b9ba0cc67e 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1830,8 +1830,6 @@ static void atl1_rx_checksum(struct atl1_adapter *adapter,
1830 adapter->hw_csum_good++; 1830 adapter->hw_csum_good++;
1831 return; 1831 return;
1832 } 1832 }
1833
1834 return;
1835} 1833}
1836 1834
1837/* 1835/*
@@ -2347,7 +2345,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2347{ 2345{
2348 struct atl1_adapter *adapter = netdev_priv(netdev); 2346 struct atl1_adapter *adapter = netdev_priv(netdev);
2349 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2347 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2350 int len = skb->len; 2348 int len;
2351 int tso; 2349 int tso;
2352 int count = 1; 2350 int count = 1;
2353 int ret_val; 2351 int ret_val;
@@ -2359,7 +2357,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2359 unsigned int f; 2357 unsigned int f;
2360 unsigned int proto_hdr_len; 2358 unsigned int proto_hdr_len;
2361 2359
2362 len -= skb->data_len; 2360 len = skb_headlen(skb);
2363 2361
2364 if (unlikely(skb->len <= 0)) { 2362 if (unlikely(skb->len <= 0)) {
2365 dev_kfree_skb_any(skb); 2363 dev_kfree_skb_any(skb);
@@ -3390,7 +3388,6 @@ static void atl1_get_wol(struct net_device *netdev,
3390 wol->wolopts = 0; 3388 wol->wolopts = 0;
3391 if (adapter->wol & ATLX_WUFC_MAG) 3389 if (adapter->wol & ATLX_WUFC_MAG)
3392 wol->wolopts |= WAKE_MAGIC; 3390 wol->wolopts |= WAKE_MAGIC;
3393 return;
3394} 3391}
3395 3392
3396static int atl1_set_wol(struct net_device *netdev, 3393static int atl1_set_wol(struct net_device *netdev,
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f24f9bb..8da87383fb39 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
136{ 136{
137 struct atl2_adapter *adapter = netdev_priv(netdev); 137 struct atl2_adapter *adapter = netdev_priv(netdev);
138 struct atl2_hw *hw = &adapter->hw; 138 struct atl2_hw *hw = &adapter->hw;
139 struct dev_mc_list *mc_ptr; 139 struct netdev_hw_addr *ha;
140 u32 rctl; 140 u32 rctl;
141 u32 hash_value; 141 u32 hash_value;
142 142
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
159 159
160 /* comoute mc addresses' hash value ,and put it into hash table */ 160 /* comoute mc addresses' hash value ,and put it into hash table */
161 netdev_for_each_mc_addr(mc_ptr, netdev) { 161 netdev_for_each_mc_addr(ha, netdev) {
162 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); 162 hash_value = atl2_hash_mc_addr(hw, ha->addr);
163 atl2_hash_set(hw, hash_value); 163 atl2_hash_set(hw, hash_value);
164 } 164 }
165} 165}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
422 netdev->stats.rx_dropped++; 422 netdev->stats.rx_dropped++;
423 break; 423 break;
424 } 424 }
425 skb->dev = netdev;
426 memcpy(skb->data, rxd->packet, rx_size); 425 memcpy(skb->data, rxd->packet, rx_size);
427 skb_put(skb, rx_size); 426 skb_put(skb, rx_size);
428 skb->protocol = eth_type_trans(skb, netdev); 427 skb->protocol = eth_type_trans(skb, netdev);
@@ -893,7 +892,6 @@ static netdev_tx_t atl2_xmit_frame(struct sk_buff *skb,
893 (adapter->txd_write_ptr >> 2)); 892 (adapter->txd_write_ptr >> 2));
894 893
895 mmiowb(); 894 mmiowb();
896 netdev->trans_start = jiffies;
897 dev_kfree_skb_any(skb); 895 dev_kfree_skb_any(skb);
898 return NETDEV_TX_OK; 896 return NETDEV_TX_OK;
899} 897}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306352e2..f979ea2d6d3c 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
123{ 123{
124 struct atlx_adapter *adapter = netdev_priv(netdev); 124 struct atlx_adapter *adapter = netdev_priv(netdev);
125 struct atlx_hw *hw = &adapter->hw; 125 struct atlx_hw *hw = &adapter->hw;
126 struct dev_mc_list *mc_ptr; 126 struct netdev_hw_addr *ha;
127 u32 rctl; 127 u32 rctl;
128 u32 hash_value; 128 u32 hash_value;
129 129
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
145 145
146 /* compute mc addresses' hash value ,and put it into hash table */ 146 /* compute mc addresses' hash value ,and put it into hash table */
147 netdev_for_each_mc_addr(mc_ptr, netdev) { 147 netdev_for_each_mc_addr(ha, netdev) {
148 hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); 148 hash_value = atlx_hash_mc_addr(hw, ha->addr);
149 atlx_hash_set(hw, hash_value); 149 atlx_hash_set(hw, hash_value);
150 } 150 }
151} 151}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d44dc47..bd2f9d331dac 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -547,7 +547,7 @@ static void tx_timeout(struct net_device *dev)
547 dev->stats.tx_errors++; 547 dev->stats.tx_errors++;
548 /* Try to restart the adapter. */ 548 /* Try to restart the adapter. */
549 hardware_init(dev); 549 hardware_init(dev);
550 dev->trans_start = jiffies; 550 dev->trans_start = jiffies; /* prevent tx timeout */
551 netif_wake_queue(dev); 551 netif_wake_queue(dev);
552 dev->stats.tx_errors++; 552 dev->stats.tx_errors++;
553} 553}
@@ -586,7 +586,6 @@ static netdev_tx_t atp_send_packet(struct sk_buff *skb,
586 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK); 586 write_reg(ioaddr, IMR, ISR_RxOK | ISR_TxErr | ISR_TxOK);
587 write_reg_high(ioaddr, IMR, ISRh_RxErr); 587 write_reg_high(ioaddr, IMR, ISRh_RxErr);
588 588
589 dev->trans_start = jiffies;
590 dev_kfree_skb (skb); 589 dev_kfree_skb (skb);
591 return NETDEV_TX_OK; 590 return NETDEV_TX_OK;
592} 591}
@@ -803,7 +802,6 @@ static void net_rx(struct net_device *dev)
803 done: 802 done:
804 write_reg(ioaddr, CMR1, CMR1_NextPkt); 803 write_reg(ioaddr, CMR1, CMR1_NextPkt);
805 lp->last_rx_time = jiffies; 804 lp->last_rx_time = jiffies;
806 return;
807} 805}
808 806
809static void read_block(long ioaddr, int length, unsigned char *p, int data_mode) 807static void read_block(long ioaddr, int length, unsigned char *p, int data_mode)
@@ -882,11 +880,11 @@ static void set_rx_mode_8012(struct net_device *dev)
882 memset(mc_filter, 0xff, sizeof(mc_filter)); 880 memset(mc_filter, 0xff, sizeof(mc_filter));
883 new_mode = CMR2h_Normal; 881 new_mode = CMR2h_Normal;
884 } else { 882 } else {
885 struct dev_mc_list *mclist; 883 struct netdev_hw_addr *ha;
886 884
887 memset(mc_filter, 0, sizeof(mc_filter)); 885 memset(mc_filter, 0, sizeof(mc_filter));
888 netdev_for_each_mc_addr(mclist, dev) { 886 netdev_for_each_mc_addr(ha, dev) {
889 int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 887 int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
890 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 888 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
891 } 889 }
892 new_mode = CMR2h_Normal; 890 new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b87b0d..ece6128bef14 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
75static int au1000_debug = 3; 75static int au1000_debug = 3;
76#endif 76#endif
77 77
78#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK)
81
78#define DRV_NAME "au1000_eth" 82#define DRV_NAME "au1000_eth"
79#define DRV_VERSION "1.6" 83#define DRV_VERSION "1.7"
80#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" 84#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
81#define DRV_DESC "Au1xxx on-chip Ethernet driver" 85#define DRV_DESC "Au1xxx on-chip Ethernet driver"
82 86
83MODULE_AUTHOR(DRV_AUTHOR); 87MODULE_AUTHOR(DRV_AUTHOR);
84MODULE_DESCRIPTION(DRV_DESC); 88MODULE_DESCRIPTION(DRV_DESC);
85MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
86 91
87/* 92/*
88 * Theory of operation 93 * Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
148 * specific irq-map 153 * specific irq-map
149 */ 154 */
150 155
151static void enable_mac(struct net_device *dev, int force_reset) 156static void au1000_enable_mac(struct net_device *dev, int force_reset)
152{ 157{
153 unsigned long flags; 158 unsigned long flags;
154 struct au1000_private *aup = netdev_priv(dev); 159 struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
182 while (*mii_control_reg & MAC_MII_BUSY) { 187 while (*mii_control_reg & MAC_MII_BUSY) {
183 mdelay(1); 188 mdelay(1);
184 if (--timedout == 0) { 189 if (--timedout == 0) {
185 printk(KERN_ERR "%s: read_MII busy timeout!!\n", 190 netdev_err(dev, "read_MII busy timeout!!\n");
186 dev->name);
187 return -1; 191 return -1;
188 } 192 }
189 } 193 }
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
197 while (*mii_control_reg & MAC_MII_BUSY) { 201 while (*mii_control_reg & MAC_MII_BUSY) {
198 mdelay(1); 202 mdelay(1);
199 if (--timedout == 0) { 203 if (--timedout == 0) {
200 printk(KERN_ERR "%s: mdio_read busy timeout!!\n", 204 netdev_err(dev, "mdio_read busy timeout!!\n");
201 dev->name);
202 return -1; 205 return -1;
203 } 206 }
204 } 207 }
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
217 while (*mii_control_reg & MAC_MII_BUSY) { 220 while (*mii_control_reg & MAC_MII_BUSY) {
218 mdelay(1); 221 mdelay(1);
219 if (--timedout == 0) { 222 if (--timedout == 0) {
220 printk(KERN_ERR "%s: mdio_write busy timeout!!\n", 223 netdev_err(dev, "mdio_write busy timeout!!\n");
221 dev->name);
222 return; 224 return;
223 } 225 }
224 } 226 }
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
236 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ 238 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
237 struct net_device *const dev = bus->priv; 239 struct net_device *const dev = bus->priv;
238 240
239 enable_mac(dev, 0); /* make sure the MAC associated with this 241 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
240 * mii_bus is enabled */ 242 * mii_bus is enabled */
241 return au1000_mdio_read(dev, phy_addr, regnum); 243 return au1000_mdio_read(dev, phy_addr, regnum);
242} 244}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
246{ 248{
247 struct net_device *const dev = bus->priv; 249 struct net_device *const dev = bus->priv;
248 250
249 enable_mac(dev, 0); /* make sure the MAC associated with this 251 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
250 * mii_bus is enabled */ 252 * mii_bus is enabled */
251 au1000_mdio_write(dev, phy_addr, regnum, value); 253 au1000_mdio_write(dev, phy_addr, regnum, value);
252 return 0; 254 return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
256{ 258{
257 struct net_device *const dev = bus->priv; 259 struct net_device *const dev = bus->priv;
258 260
259 enable_mac(dev, 0); /* make sure the MAC associated with this 261 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
260 * mii_bus is enabled */ 262 * mii_bus is enabled */
261 return 0; 263 return 0;
262} 264}
263 265
264static void hard_stop(struct net_device *dev) 266static void au1000_hard_stop(struct net_device *dev)
265{ 267{
266 struct au1000_private *aup = netdev_priv(dev); 268 struct au1000_private *aup = netdev_priv(dev);
267 269
268 if (au1000_debug > 4) 270 netif_dbg(aup, drv, dev, "hard stop\n");
269 printk(KERN_INFO "%s: hard stop\n", dev->name);
270 271
271 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); 272 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
272 au_sync_delay(10); 273 au_sync_delay(10);
273} 274}
274 275
275static void enable_rx_tx(struct net_device *dev) 276static void au1000_enable_rx_tx(struct net_device *dev)
276{ 277{
277 struct au1000_private *aup = netdev_priv(dev); 278 struct au1000_private *aup = netdev_priv(dev);
278 279
279 if (au1000_debug > 4) 280 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
280 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
281 281
282 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); 282 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
283 au_sync_delay(10); 283 au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
297 spin_lock_irqsave(&aup->lock, flags); 297 spin_lock_irqsave(&aup->lock, flags);
298 298
299 if (phydev->link && (aup->old_speed != phydev->speed)) { 299 if (phydev->link && (aup->old_speed != phydev->speed)) {
300 // speed changed 300 /* speed changed */
301 301
302 switch(phydev->speed) { 302 switch (phydev->speed) {
303 case SPEED_10: 303 case SPEED_10:
304 case SPEED_100: 304 case SPEED_100:
305 break; 305 break;
306 default: 306 default:
307 printk(KERN_WARNING 307 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
308 "%s: Speed (%d) is not 10/100 ???\n", 308 phydev->speed);
309 dev->name, phydev->speed);
310 break; 309 break;
311 } 310 }
312 311
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
316 } 315 }
317 316
318 if (phydev->link && (aup->old_duplex != phydev->duplex)) { 317 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
319 // duplex mode changed 318 /* duplex mode changed */
320 319
321 /* switching duplex mode requires to disable rx and tx! */ 320 /* switching duplex mode requires to disable rx and tx! */
322 hard_stop(dev); 321 au1000_hard_stop(dev);
323 322
324 if (DUPLEX_FULL == phydev->duplex) 323 if (DUPLEX_FULL == phydev->duplex)
325 aup->mac->control = ((aup->mac->control 324 aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
331 | MAC_DISABLE_RX_OWN); 330 | MAC_DISABLE_RX_OWN);
332 au_sync_delay(1); 331 au_sync_delay(1);
333 332
334 enable_rx_tx(dev); 333 au1000_enable_rx_tx(dev);
335 aup->old_duplex = phydev->duplex; 334 aup->old_duplex = phydev->duplex;
336 335
337 status_change = 1; 336 status_change = 1;
338 } 337 }
339 338
340 if(phydev->link != aup->old_link) { 339 if (phydev->link != aup->old_link) {
341 // link state changed 340 /* link state changed */
342 341
343 if (!phydev->link) { 342 if (!phydev->link) {
344 /* link went down */ 343 /* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
354 353
355 if (status_change) { 354 if (status_change) {
356 if (phydev->link) 355 if (phydev->link)
357 printk(KERN_INFO "%s: link up (%d/%s)\n", 356 netdev_info(dev, "link up (%d/%s)\n",
358 dev->name, phydev->speed, 357 phydev->speed,
359 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 358 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
360 else 359 else
361 printk(KERN_INFO "%s: link down\n", dev->name); 360 netdev_info(dev, "link down\n");
362 } 361 }
363} 362}
364 363
365static int mii_probe (struct net_device *dev) 364static int au1000_mii_probe (struct net_device *dev)
366{ 365{
367 struct au1000_private *const aup = netdev_priv(dev); 366 struct au1000_private *const aup = netdev_priv(dev);
368 struct phy_device *phydev = NULL; 367 struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
373 if (aup->phy_addr) 372 if (aup->phy_addr)
374 phydev = aup->mii_bus->phy_map[aup->phy_addr]; 373 phydev = aup->mii_bus->phy_map[aup->phy_addr];
375 else 374 else
376 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n", 375 netdev_info(dev, "using PHY-less setup\n");
377 dev->name);
378 return 0; 376 return 0;
379 } else { 377 } else {
380 int phy_addr; 378 int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
391 /* try harder to find a PHY */ 389 /* try harder to find a PHY */
392 if (!phydev && (aup->mac_id == 1)) { 390 if (!phydev && (aup->mac_id == 1)) {
393 /* no PHY found, maybe we have a dual PHY? */ 391 /* no PHY found, maybe we have a dual PHY? */
394 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, " 392 dev_info(&dev->dev, ": no PHY found on MAC1, "
395 "let's see if it's attached to MAC0...\n"); 393 "let's see if it's attached to MAC0...\n");
396 394
397 /* find the first (lowest address) non-attached PHY on 395 /* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
417 } 415 }
418 416
419 if (!phydev) { 417 if (!phydev) {
420 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name); 418 netdev_err(dev, "no PHY found\n");
421 return -1; 419 return -1;
422 } 420 }
423 421
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
428 0, PHY_INTERFACE_MODE_MII); 426 0, PHY_INTERFACE_MODE_MII);
429 427
430 if (IS_ERR(phydev)) { 428 if (IS_ERR(phydev)) {
431 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 429 netdev_err(dev, "Could not attach to PHY\n");
432 return PTR_ERR(phydev); 430 return PTR_ERR(phydev);
433 } 431 }
434 432
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
449 aup->old_duplex = -1; 447 aup->old_duplex = -1;
450 aup->phy_dev = phydev; 448 aup->phy_dev = phydev;
451 449
452 printk(KERN_INFO "%s: attached PHY driver [%s] " 450 netdev_info(dev, "attached PHY driver [%s] "
453 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 451 "(mii_bus:phy_addr=%s, irq=%d)\n",
454 phydev->drv->name, dev_name(&phydev->dev), phydev->irq); 452 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
455 453
456 return 0; 454 return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
462 * has the virtual and dma address of a buffer suitable for 460 * has the virtual and dma address of a buffer suitable for
463 * both, receive and transmit operations. 461 * both, receive and transmit operations.
464 */ 462 */
465static db_dest_t *GetFreeDB(struct au1000_private *aup) 463static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
466{ 464{
467 db_dest_t *pDB; 465 db_dest_t *pDB;
468 pDB = aup->pDBfree; 466 pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
473 return pDB; 471 return pDB;
474} 472}
475 473
476void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) 474void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
477{ 475{
478 db_dest_t *pDBfree = aup->pDBfree; 476 db_dest_t *pDBfree = aup->pDBfree;
479 if (pDBfree) 477 if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
481 aup->pDBfree = pDB; 479 aup->pDBfree = pDB;
482} 480}
483 481
484static void reset_mac_unlocked(struct net_device *dev) 482static void au1000_reset_mac_unlocked(struct net_device *dev)
485{ 483{
486 struct au1000_private *const aup = netdev_priv(dev); 484 struct au1000_private *const aup = netdev_priv(dev);
487 int i; 485 int i;
488 486
489 hard_stop(dev); 487 au1000_hard_stop(dev);
490 488
491 *aup->enable = MAC_EN_CLOCK_ENABLE; 489 *aup->enable = MAC_EN_CLOCK_ENABLE;
492 au_sync_delay(2); 490 au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
507 505
508} 506}
509 507
510static void reset_mac(struct net_device *dev) 508static void au1000_reset_mac(struct net_device *dev)
511{ 509{
512 struct au1000_private *const aup = netdev_priv(dev); 510 struct au1000_private *const aup = netdev_priv(dev);
513 unsigned long flags; 511 unsigned long flags;
514 512
515 if (au1000_debug > 4) 513 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
516 printk(KERN_INFO "%s: reset mac, aup %x\n", 514 (unsigned)aup);
517 dev->name, (unsigned)aup);
518 515
519 spin_lock_irqsave(&aup->lock, flags); 516 spin_lock_irqsave(&aup->lock, flags);
520 517
521 reset_mac_unlocked (dev); 518 au1000_reset_mac_unlocked (dev);
522 519
523 spin_unlock_irqrestore(&aup->lock, flags); 520 spin_unlock_irqrestore(&aup->lock, flags);
524} 521}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
529 * these are not descriptors sitting in memory. 526 * these are not descriptors sitting in memory.
530 */ 527 */
531static void 528static void
532setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) 529au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
533{ 530{
534 int i; 531 int i;
535 532
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
582 info->regdump_len = 0; 579 info->regdump_len = 0;
583} 580}
584 581
582static void au1000_set_msglevel(struct net_device *dev, u32 value)
583{
584 struct au1000_private *aup = netdev_priv(dev);
585 aup->msg_enable = value;
586}
587
588static u32 au1000_get_msglevel(struct net_device *dev)
589{
590 struct au1000_private *aup = netdev_priv(dev);
591 return aup->msg_enable;
592}
593
585static const struct ethtool_ops au1000_ethtool_ops = { 594static const struct ethtool_ops au1000_ethtool_ops = {
586 .get_settings = au1000_get_settings, 595 .get_settings = au1000_get_settings,
587 .set_settings = au1000_set_settings, 596 .set_settings = au1000_set_settings,
588 .get_drvinfo = au1000_get_drvinfo, 597 .get_drvinfo = au1000_get_drvinfo,
589 .get_link = ethtool_op_get_link, 598 .get_link = ethtool_op_get_link,
599 .get_msglevel = au1000_get_msglevel,
600 .set_msglevel = au1000_set_msglevel,
590}; 601};
591 602
592 603
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
606 int i; 617 int i;
607 u32 control; 618 u32 control;
608 619
609 if (au1000_debug > 4) 620 netif_dbg(aup, hw, dev, "au1000_init\n");
610 printk("%s: au1000_init\n", dev->name);
611 621
612 /* bring the device out of reset */ 622 /* bring the device out of reset */
613 enable_mac(dev, 1); 623 au1000_enable_mac(dev, 1);
614 624
615 spin_lock_irqsave(&aup->lock, flags); 625 spin_lock_irqsave(&aup->lock, flags);
616 626
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
649 return 0; 659 return 0;
650} 660}
651 661
652static inline void update_rx_stats(struct net_device *dev, u32 status) 662static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
653{ 663{
654 struct net_device_stats *ps = &dev->stats; 664 struct net_device_stats *ps = &dev->stats;
655 665
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
667 ps->rx_crc_errors++; 677 ps->rx_crc_errors++;
668 if (status & RX_COLL) 678 if (status & RX_COLL)
669 ps->collisions++; 679 ps->collisions++;
670 } 680 } else
671 else
672 ps->rx_bytes += status & RX_FRAME_LEN_MASK; 681 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
673 682
674} 683}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
685 db_dest_t *pDB; 694 db_dest_t *pDB;
686 u32 frmlen; 695 u32 frmlen;
687 696
688 if (au1000_debug > 5) 697 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
689 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
690 698
691 prxd = aup->rx_dma_ring[aup->rx_head]; 699 prxd = aup->rx_dma_ring[aup->rx_head];
692 buff_stat = prxd->buff_stat; 700 buff_stat = prxd->buff_stat;
693 while (buff_stat & RX_T_DONE) { 701 while (buff_stat & RX_T_DONE) {
694 status = prxd->status; 702 status = prxd->status;
695 pDB = aup->rx_db_inuse[aup->rx_head]; 703 pDB = aup->rx_db_inuse[aup->rx_head];
696 update_rx_stats(dev, status); 704 au1000_update_rx_stats(dev, status);
697 if (!(status & RX_ERROR)) { 705 if (!(status & RX_ERROR)) {
698 706
699 /* good frame */ 707 /* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
701 frmlen -= 4; /* Remove FCS */ 709 frmlen -= 4; /* Remove FCS */
702 skb = dev_alloc_skb(frmlen + 2); 710 skb = dev_alloc_skb(frmlen + 2);
703 if (skb == NULL) { 711 if (skb == NULL) {
704 printk(KERN_ERR 712 netdev_err(dev, "Memory squeeze, dropping packet.\n");
705 "%s: Memory squeeze, dropping packet.\n",
706 dev->name);
707 dev->stats.rx_dropped++; 713 dev->stats.rx_dropped++;
708 continue; 714 continue;
709 } 715 }
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
713 skb_put(skb, frmlen); 719 skb_put(skb, frmlen);
714 skb->protocol = eth_type_trans(skb, dev); 720 skb->protocol = eth_type_trans(skb, dev);
715 netif_rx(skb); /* pass the packet to upper layers */ 721 netif_rx(skb); /* pass the packet to upper layers */
716 } 722 } else {
717 else {
718 if (au1000_debug > 4) { 723 if (au1000_debug > 4) {
719 if (status & RX_MISSED_FRAME) 724 if (status & RX_MISSED_FRAME)
720 printk("rx miss\n"); 725 printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
747 return 0; 752 return 0;
748} 753}
749 754
750static void update_tx_stats(struct net_device *dev, u32 status) 755static void au1000_update_tx_stats(struct net_device *dev, u32 status)
751{ 756{
752 struct au1000_private *aup = netdev_priv(dev); 757 struct au1000_private *aup = netdev_priv(dev);
753 struct net_device_stats *ps = &dev->stats; 758 struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
760 ps->tx_errors++; 765 ps->tx_errors++;
761 ps->tx_aborted_errors++; 766 ps->tx_aborted_errors++;
762 } 767 }
763 } 768 } else {
764 else {
765 ps->tx_errors++; 769 ps->tx_errors++;
766 ps->tx_aborted_errors++; 770 ps->tx_aborted_errors++;
767 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) 771 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
783 ptxd = aup->tx_dma_ring[aup->tx_tail]; 787 ptxd = aup->tx_dma_ring[aup->tx_tail];
784 788
785 while (ptxd->buff_stat & TX_T_DONE) { 789 while (ptxd->buff_stat & TX_T_DONE) {
786 update_tx_stats(dev, ptxd->status); 790 au1000_update_tx_stats(dev, ptxd->status);
787 ptxd->buff_stat &= ~TX_T_DONE; 791 ptxd->buff_stat &= ~TX_T_DONE;
788 ptxd->len = 0; 792 ptxd->len = 0;
789 au_sync(); 793 au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
817 int retval; 821 int retval;
818 struct au1000_private *aup = netdev_priv(dev); 822 struct au1000_private *aup = netdev_priv(dev);
819 823
820 if (au1000_debug > 4) 824 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
821 printk("%s: open: dev=%p\n", dev->name, dev);
822 825
823 if ((retval = request_irq(dev->irq, au1000_interrupt, 0, 826 retval = request_irq(dev->irq, au1000_interrupt, 0,
824 dev->name, dev))) { 827 dev->name, dev);
825 printk(KERN_ERR "%s: unable to get IRQ %d\n", 828 if (retval) {
826 dev->name, dev->irq); 829 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
827 return retval; 830 return retval;
828 } 831 }
829 832
830 if ((retval = au1000_init(dev))) { 833 retval = au1000_init(dev);
831 printk(KERN_ERR "%s: error in au1000_init\n", dev->name); 834 if (retval) {
835 netdev_err(dev, "error in au1000_init\n");
832 free_irq(dev->irq, dev); 836 free_irq(dev->irq, dev);
833 return retval; 837 return retval;
834 } 838 }
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
841 845
842 netif_start_queue(dev); 846 netif_start_queue(dev);
843 847
844 if (au1000_debug > 4) 848 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
845 printk("%s: open: Initialization done.\n", dev->name);
846 849
847 return 0; 850 return 0;
848} 851}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
852 unsigned long flags; 855 unsigned long flags;
853 struct au1000_private *const aup = netdev_priv(dev); 856 struct au1000_private *const aup = netdev_priv(dev);
854 857
855 if (au1000_debug > 4) 858 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
856 printk("%s: close: dev=%p\n", dev->name, dev);
857 859
858 if (aup->phy_dev) 860 if (aup->phy_dev)
859 phy_stop(aup->phy_dev); 861 phy_stop(aup->phy_dev);
860 862
861 spin_lock_irqsave(&aup->lock, flags); 863 spin_lock_irqsave(&aup->lock, flags);
862 864
863 reset_mac_unlocked (dev); 865 au1000_reset_mac_unlocked (dev);
864 866
865 /* stop the device */ 867 /* stop the device */
866 netif_stop_queue(dev); 868 netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
884 db_dest_t *pDB; 886 db_dest_t *pDB;
885 int i; 887 int i;
886 888
887 if (au1000_debug > 5) 889 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
888 printk("%s: tx: aup %x len=%d, data=%p, head %d\n", 890 (unsigned)aup, skb->len,
889 dev->name, (unsigned)aup, skb->len,
890 skb->data, aup->tx_head); 891 skb->data, aup->tx_head);
891 892
892 ptxd = aup->tx_dma_ring[aup->tx_head]; 893 ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
896 netif_stop_queue(dev); 897 netif_stop_queue(dev);
897 aup->tx_full = 1; 898 aup->tx_full = 1;
898 return NETDEV_TX_BUSY; 899 return NETDEV_TX_BUSY;
899 } 900 } else if (buff_stat & TX_T_DONE) {
900 else if (buff_stat & TX_T_DONE) { 901 au1000_update_tx_stats(dev, ptxd->status);
901 update_tx_stats(dev, ptxd->status);
902 ptxd->len = 0; 902 ptxd->len = 0;
903 } 903 }
904 904
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
910 pDB = aup->tx_db_inuse[aup->tx_head]; 910 pDB = aup->tx_db_inuse[aup->tx_head];
911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); 911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
912 if (skb->len < ETH_ZLEN) { 912 if (skb->len < ETH_ZLEN) {
913 for (i=skb->len; i<ETH_ZLEN; i++) { 913 for (i = skb->len; i < ETH_ZLEN; i++) {
914 ((char *)pDB->vaddr)[i] = 0; 914 ((char *)pDB->vaddr)[i] = 0;
915 } 915 }
916 ptxd->len = ETH_ZLEN; 916 ptxd->len = ETH_ZLEN;
917 } 917 } else
918 else
919 ptxd->len = skb->len; 918 ptxd->len = skb->len;
920 919
921 ps->tx_packets++; 920 ps->tx_packets++;
@@ -925,7 +924,6 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
925 au_sync(); 924 au_sync();
926 dev_kfree_skb(skb); 925 dev_kfree_skb(skb);
927 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); 926 aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1);
928 dev->trans_start = jiffies;
929 return NETDEV_TX_OK; 927 return NETDEV_TX_OK;
930} 928}
931 929
@@ -935,10 +933,10 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
935 */ 933 */
936static void au1000_tx_timeout(struct net_device *dev) 934static void au1000_tx_timeout(struct net_device *dev)
937{ 935{
938 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev); 936 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
939 reset_mac(dev); 937 au1000_reset_mac(dev);
940 au1000_init(dev); 938 au1000_init(dev);
941 dev->trans_start = jiffies; 939 dev->trans_start = jiffies; /* prevent tx timeout */
942 netif_wake_queue(dev); 940 netif_wake_queue(dev);
943} 941}
944 942
@@ -946,8 +944,7 @@ static void au1000_multicast_list(struct net_device *dev)
946{ 944{
947 struct au1000_private *aup = netdev_priv(dev); 945 struct au1000_private *aup = netdev_priv(dev);
948 946
949 if (au1000_debug > 4) 947 netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
950 printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
951 948
952 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 949 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
953 aup->mac->control |= MAC_PROMISCUOUS; 950 aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +952,14 @@ static void au1000_multicast_list(struct net_device *dev)
955 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { 952 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
956 aup->mac->control |= MAC_PASS_ALL_MULTI; 953 aup->mac->control |= MAC_PASS_ALL_MULTI;
957 aup->mac->control &= ~MAC_PROMISCUOUS; 954 aup->mac->control &= ~MAC_PROMISCUOUS;
958 printk(KERN_INFO "%s: Pass all multicast\n", dev->name); 955 netdev_info(dev, "Pass all multicast\n");
959 } else { 956 } else {
960 struct dev_mc_list *mclist; 957 struct netdev_hw_addr *ha;
961 u32 mc_filter[2]; /* Multicast hash filter */ 958 u32 mc_filter[2]; /* Multicast hash filter */
962 959
963 mc_filter[1] = mc_filter[0] = 0; 960 mc_filter[1] = mc_filter[0] = 0;
964 netdev_for_each_mc_addr(mclist, dev) 961 netdev_for_each_mc_addr(ha, dev)
965 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, 962 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
966 (long *)mc_filter); 963 (long *)mc_filter);
967 aup->mac->multi_hash_high = mc_filter[1]; 964 aup->mac->multi_hash_high = mc_filter[1];
968 aup->mac->multi_hash_low = mc_filter[0]; 965 aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +972,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
975{ 972{
976 struct au1000_private *aup = netdev_priv(dev); 973 struct au1000_private *aup = netdev_priv(dev);
977 974
978 if (!netif_running(dev)) return -EINVAL; 975 if (!netif_running(dev))
976 return -EINVAL;
979 977
980 if (!aup->phy_dev) return -EINVAL; // PHY not controllable 978 if (!aup->phy_dev)
979 return -EINVAL; /* PHY not controllable */
981 980
982 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 981 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
983} 982}
@@ -996,7 +995,7 @@ static const struct net_device_ops au1000_netdev_ops = {
996 995
997static int __devinit au1000_probe(struct platform_device *pdev) 996static int __devinit au1000_probe(struct platform_device *pdev)
998{ 997{
999 static unsigned version_printed = 0; 998 static unsigned version_printed;
1000 struct au1000_private *aup = NULL; 999 struct au1000_private *aup = NULL;
1001 struct au1000_eth_platform_data *pd; 1000 struct au1000_eth_platform_data *pd;
1002 struct net_device *dev = NULL; 1001 struct net_device *dev = NULL;
@@ -1007,40 +1006,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1007 1006
1008 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1007 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1009 if (!base) { 1008 if (!base) {
1010 printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n"); 1009 dev_err(&pdev->dev, "failed to retrieve base register\n");
1011 err = -ENODEV; 1010 err = -ENODEV;
1012 goto out; 1011 goto out;
1013 } 1012 }
1014 1013
1015 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1014 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1016 if (!macen) { 1015 if (!macen) {
1017 printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n"); 1016 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1018 err = -ENODEV; 1017 err = -ENODEV;
1019 goto out; 1018 goto out;
1020 } 1019 }
1021 1020
1022 irq = platform_get_irq(pdev, 0); 1021 irq = platform_get_irq(pdev, 0);
1023 if (irq < 0) { 1022 if (irq < 0) {
1024 printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n"); 1023 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1025 err = -ENODEV; 1024 err = -ENODEV;
1026 goto out; 1025 goto out;
1027 } 1026 }
1028 1027
1029 if (!request_mem_region(base->start, resource_size(base), pdev->name)) { 1028 if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
1030 printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n"); 1029 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1031 err = -ENXIO; 1030 err = -ENXIO;
1032 goto out; 1031 goto out;
1033 } 1032 }
1034 1033
1035 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { 1034 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
1036 printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n"); 1035 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1037 err = -ENXIO; 1036 err = -ENXIO;
1038 goto err_request; 1037 goto err_request;
1039 } 1038 }
1040 1039
1041 dev = alloc_etherdev(sizeof(struct au1000_private)); 1040 dev = alloc_etherdev(sizeof(struct au1000_private));
1042 if (!dev) { 1041 if (!dev) {
1043 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); 1042 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1044 err = -ENOMEM; 1043 err = -ENOMEM;
1045 goto err_alloc; 1044 goto err_alloc;
1046 } 1045 }
@@ -1050,6 +1049,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1050 aup = netdev_priv(dev); 1049 aup = netdev_priv(dev);
1051 1050
1052 spin_lock_init(&aup->lock); 1051 spin_lock_init(&aup->lock);
1052 aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
1053 1053
1054 /* Allocate the data buffers */ 1054 /* Allocate the data buffers */
1055 /* Snooping works fine with eth on all au1xxx */ 1055 /* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1057,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1057 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1057 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1058 &aup->dma_addr, 0); 1058 &aup->dma_addr, 0);
1059 if (!aup->vaddr) { 1059 if (!aup->vaddr) {
1060 printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n"); 1060 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1061 err = -ENOMEM; 1061 err = -ENOMEM;
1062 goto err_vaddr; 1062 goto err_vaddr;
1063 } 1063 }
@@ -1065,7 +1065,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1065 /* aup->mac is the base address of the MAC's registers */ 1065 /* aup->mac is the base address of the MAC's registers */
1066 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base)); 1066 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
1067 if (!aup->mac) { 1067 if (!aup->mac) {
1068 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n"); 1068 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1069 err = -ENXIO; 1069 err = -ENXIO;
1070 goto err_remap1; 1070 goto err_remap1;
1071 } 1071 }
@@ -1073,7 +1073,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1073 /* Setup some variables for quick register address access */ 1073 /* Setup some variables for quick register address access */
1074 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen)); 1074 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
1075 if (!aup->enable) { 1075 if (!aup->enable) {
1076 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n"); 1076 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1077 err = -ENXIO; 1077 err = -ENXIO;
1078 goto err_remap2; 1078 goto err_remap2;
1079 } 1079 }
@@ -1083,14 +1083,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1083 if (prom_get_ethernet_addr(ethaddr) == 0) 1083 if (prom_get_ethernet_addr(ethaddr) == 0)
1084 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); 1084 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1085 else { 1085 else {
1086 printk(KERN_INFO "%s: No MAC address found\n", 1086 netdev_info(dev, "No MAC address found\n");
1087 dev->name);
1088 /* Use the hard coded MAC addresses */ 1087 /* Use the hard coded MAC addresses */
1089 } 1088 }
1090 1089
1091 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1090 au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1092 } else if (pdev->id == 1) 1091 } else if (pdev->id == 1)
1093 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1092 au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1094 1093
1095 /* 1094 /*
1096 * Assign to the Ethernet ports two consecutive MAC addresses 1095 * Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1103,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1104 1103
1105 pd = pdev->dev.platform_data; 1104 pd = pdev->dev.platform_data;
1106 if (!pd) { 1105 if (!pd) {
1107 printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n"); 1106 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
1108 aup->phy1_search_mac0 = 1; 1107 aup->phy1_search_mac0 = 1;
1109 } else { 1108 } else {
1110 aup->phy_static_config = pd->phy_static_config; 1109 aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1115,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1116 } 1115 }
1117 1116
1118 if (aup->phy_busid && aup->phy_busid > 0) { 1117 if (aup->phy_busid && aup->phy_busid > 0) {
1119 printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII" 1118 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
1120 "bus not supported yet\n"); 1119 "bus not supported yet\n");
1121 err = -ENODEV; 1120 err = -ENODEV;
1122 goto err_mdiobus_alloc; 1121 goto err_mdiobus_alloc;
@@ -1124,7 +1123,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1124 1123
1125 aup->mii_bus = mdiobus_alloc(); 1124 aup->mii_bus = mdiobus_alloc();
1126 if (aup->mii_bus == NULL) { 1125 if (aup->mii_bus == NULL) {
1127 printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n"); 1126 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1128 err = -ENOMEM; 1127 err = -ENOMEM;
1129 goto err_mdiobus_alloc; 1128 goto err_mdiobus_alloc;
1130 } 1129 }
@@ -1139,7 +1138,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1139 if (aup->mii_bus->irq == NULL) 1138 if (aup->mii_bus->irq == NULL)
1140 goto err_out; 1139 goto err_out;
1141 1140
1142 for(i = 0; i < PHY_MAX_ADDR; ++i) 1141 for (i = 0; i < PHY_MAX_ADDR; ++i)
1143 aup->mii_bus->irq[i] = PHY_POLL; 1142 aup->mii_bus->irq[i] = PHY_POLL;
1144 /* if known, set corresponding PHY IRQs */ 1143 /* if known, set corresponding PHY IRQs */
1145 if (aup->phy_static_config) 1144 if (aup->phy_static_config)
@@ -1148,11 +1147,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1148 1147
1149 err = mdiobus_register(aup->mii_bus); 1148 err = mdiobus_register(aup->mii_bus);
1150 if (err) { 1149 if (err) {
1151 printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n"); 1150 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1152 goto err_mdiobus_reg; 1151 goto err_mdiobus_reg;
1153 } 1152 }
1154 1153
1155 if (mii_probe(dev) != 0) 1154 if (au1000_mii_probe(dev) != 0)
1156 goto err_out; 1155 goto err_out;
1157 1156
1158 pDBfree = NULL; 1157 pDBfree = NULL;
@@ -1168,7 +1167,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1168 aup->pDBfree = pDBfree; 1167 aup->pDBfree = pDBfree;
1169 1168
1170 for (i = 0; i < NUM_RX_DMA; i++) { 1169 for (i = 0; i < NUM_RX_DMA; i++) {
1171 pDB = GetFreeDB(aup); 1170 pDB = au1000_GetFreeDB(aup);
1172 if (!pDB) { 1171 if (!pDB) {
1173 goto err_out; 1172 goto err_out;
1174 } 1173 }
@@ -1176,7 +1175,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1176 aup->rx_db_inuse[i] = pDB; 1175 aup->rx_db_inuse[i] = pDB;
1177 } 1176 }
1178 for (i = 0; i < NUM_TX_DMA; i++) { 1177 for (i = 0; i < NUM_TX_DMA; i++) {
1179 pDB = GetFreeDB(aup); 1178 pDB = au1000_GetFreeDB(aup);
1180 if (!pDB) { 1179 if (!pDB) {
1181 goto err_out; 1180 goto err_out;
1182 } 1181 }
@@ -1195,17 +1194,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1195 * The boot code uses the ethernet controller, so reset it to start 1194 * The boot code uses the ethernet controller, so reset it to start
1196 * fresh. au1000_init() expects that the device is in reset state. 1195 * fresh. au1000_init() expects that the device is in reset state.
1197 */ 1196 */
1198 reset_mac(dev); 1197 au1000_reset_mac(dev);
1199 1198
1200 err = register_netdev(dev); 1199 err = register_netdev(dev);
1201 if (err) { 1200 if (err) {
1202 printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n", 1201 netdev_err(dev, "Cannot register net device, aborting.\n");
1203 dev->name);
1204 goto err_out; 1202 goto err_out;
1205 } 1203 }
1206 1204
1207 printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n", 1205 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1208 dev->name, (unsigned long)base->start, irq); 1206 (unsigned long)base->start, irq);
1209 if (version_printed++ == 0) 1207 if (version_printed++ == 0)
1210 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1208 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1211 1209
@@ -1217,15 +1215,15 @@ err_out:
1217 1215
1218 /* here we should have a valid dev plus aup-> register addresses 1216 /* here we should have a valid dev plus aup-> register addresses
1219 * so we can reset the mac properly.*/ 1217 * so we can reset the mac properly.*/
1220 reset_mac(dev); 1218 au1000_reset_mac(dev);
1221 1219
1222 for (i = 0; i < NUM_RX_DMA; i++) { 1220 for (i = 0; i < NUM_RX_DMA; i++) {
1223 if (aup->rx_db_inuse[i]) 1221 if (aup->rx_db_inuse[i])
1224 ReleaseDB(aup, aup->rx_db_inuse[i]); 1222 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1225 } 1223 }
1226 for (i = 0; i < NUM_TX_DMA; i++) { 1224 for (i = 0; i < NUM_TX_DMA; i++) {
1227 if (aup->tx_db_inuse[i]) 1225 if (aup->tx_db_inuse[i])
1228 ReleaseDB(aup, aup->tx_db_inuse[i]); 1226 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1229 } 1227 }
1230err_mdiobus_reg: 1228err_mdiobus_reg:
1231 mdiobus_free(aup->mii_bus); 1229 mdiobus_free(aup->mii_bus);
@@ -1261,11 +1259,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
1261 1259
1262 for (i = 0; i < NUM_RX_DMA; i++) 1260 for (i = 0; i < NUM_RX_DMA; i++)
1263 if (aup->rx_db_inuse[i]) 1261 if (aup->rx_db_inuse[i])
1264 ReleaseDB(aup, aup->rx_db_inuse[i]); 1262 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1265 1263
1266 for (i = 0; i < NUM_TX_DMA; i++) 1264 for (i = 0; i < NUM_TX_DMA; i++)
1267 if (aup->tx_db_inuse[i]) 1265 if (aup->tx_db_inuse[i])
1268 ReleaseDB(aup, aup->tx_db_inuse[i]); 1266 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1269 1267
1270 dma_free_noncoherent(NULL, MAX_BUF_SIZE * 1268 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1271 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1269 (NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a29b8fd..d06ec008fbf1 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
35#define NUM_TX_BUFFS 4 35#define NUM_TX_BUFFS 4
36#define MAX_BUF_SIZE 2048 36#define MAX_BUF_SIZE 2048
37 37
38#define ETH_TX_TIMEOUT HZ/4 38#define ETH_TX_TIMEOUT (HZ/4)
39#define MAC_MIN_PKT_SIZE 64 39#define MAC_MIN_PKT_SIZE 64
40 40
41#define MULTICAST_FILTER_LIMIT 64 41#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
126 126
127 spinlock_t lock; /* Serialise access to device */ 127 spinlock_t lock; /* Serialise access to device */
128
129 u32 msg_enable;
128}; 130};
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b718dc60afc4..55c9958043c4 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -303,7 +303,6 @@ static void ax_block_output(struct net_device *dev, int count,
303 303
304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 304 ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
305 ei_status.dmaing &= ~0x01; 305 ei_status.dmaing &= ~0x01;
306 return;
307} 306}
308 307
309/* definitions for accessing MII/EEPROM interface */ 308/* definitions for accessing MII/EEPROM interface */
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d368ae..293f9c16e786 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1014,8 +1014,6 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1014 if (TX_BUFFS_AVAIL(bp) < 1) 1014 if (TX_BUFFS_AVAIL(bp) < 1)
1015 netif_stop_queue(dev); 1015 netif_stop_queue(dev);
1016 1016
1017 dev->trans_start = jiffies;
1018
1019out_unlock: 1017out_unlock:
1020 spin_unlock_irqrestore(&bp->lock, flags); 1018 spin_unlock_irqrestore(&bp->lock, flags);
1021 1019
@@ -1681,15 +1679,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
1681 1679
1682static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) 1680static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1683{ 1681{
1684 struct dev_mc_list *mclist; 1682 struct netdev_hw_addr *ha;
1685 int i, num_ents; 1683 int i, num_ents;
1686 1684
1687 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); 1685 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1688 i = 0; 1686 i = 0;
1689 netdev_for_each_mc_addr(mclist, dev) { 1687 netdev_for_each_mc_addr(ha, dev) {
1690 if (i == num_ents) 1688 if (i == num_ents)
1691 break; 1689 break;
1692 __b44_cam_write(bp, mclist->dmi_addr, i++ + 1); 1690 __b44_cam_write(bp, ha->addr, i++ + 1);
1693 } 1691 }
1694 return i+1; 1692 return i+1;
1695} 1693}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460aba3bae..faf5add894d7 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
341 } 341 }
342 342
343 skb_put(skb, len); 343 skb_put(skb, len);
344 skb->dev = dev;
345 skb->protocol = eth_type_trans(skb, dev); 344 skb->protocol = eth_type_trans(skb, dev);
346 priv->stats.rx_packets++; 345 priv->stats.rx_packets++;
347 priv->stats.rx_bytes += len; 346 priv->stats.rx_bytes += len;
348 dev->last_rx = jiffies;
349 netif_receive_skb(skb); 347 netif_receive_skb(skb);
350 348
351 } while (--budget > 0); 349 } while (--budget > 0);
@@ -567,7 +565,6 @@ static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
567 565
568 priv->stats.tx_bytes += skb->len; 566 priv->stats.tx_bytes += skb->len;
569 priv->stats.tx_packets++; 567 priv->stats.tx_packets++;
570 dev->trans_start = jiffies;
571 ret = NETDEV_TX_OK; 568 ret = NETDEV_TX_OK;
572 569
573out_unlock: 570out_unlock:
@@ -605,7 +602,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
605static void bcm_enet_set_multicast_list(struct net_device *dev) 602static void bcm_enet_set_multicast_list(struct net_device *dev)
606{ 603{
607 struct bcm_enet_priv *priv; 604 struct bcm_enet_priv *priv;
608 struct dev_mc_list *mc_list; 605 struct netdev_hw_addr *ha;
609 u32 val; 606 u32 val;
610 int i; 607 int i;
611 608
@@ -633,14 +630,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
633 } 630 }
634 631
635 i = 0; 632 i = 0;
636 netdev_for_each_mc_addr(mc_list, dev) { 633 netdev_for_each_mc_addr(ha, dev) {
637 u8 *dmi_addr; 634 u8 *dmi_addr;
638 u32 tmp; 635 u32 tmp;
639 636
640 if (i == 3) 637 if (i == 3)
641 break; 638 break;
642 /* update perfect match registers */ 639 /* update perfect match registers */
643 dmi_addr = mc_list->dmi_addr; 640 dmi_addr = ha->addr;
644 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 641 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
645 (dmi_addr[4] << 8) | dmi_addr[5]; 642 (dmi_addr[4] << 8) | dmi_addr[5];
646 enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 643 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +957,9 @@ static int bcm_enet_open(struct net_device *dev)
960 /* all set, enable mac and interrupts, start dma engine and 957 /* all set, enable mac and interrupts, start dma engine and
961 * kick rx dma channel */ 958 * kick rx dma channel */
962 wmb(); 959 wmb();
963 enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); 960 val = enet_readl(priv, ENET_CTL_REG);
961 val |= ENET_CTL_ENABLE_MASK;
962 enet_writel(priv, val, ENET_CTL_REG);
964 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 963 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
965 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 964 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
966 ENETDMA_CHANCFG_REG(priv->rx_chan)); 965 ENETDMA_CHANCFG_REG(priv->rx_chan));
@@ -1647,7 +1646,6 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
1647 if (!dev) 1646 if (!dev)
1648 return -ENOMEM; 1647 return -ENOMEM;
1649 priv = netdev_priv(dev); 1648 priv = netdev_priv(dev);
1650 memset(priv, 0, sizeof(*priv));
1651 1649
1652 ret = compute_hw_mtu(priv, dev->mtu); 1650 ret = compute_hw_mtu(priv, dev->mtu);
1653 if (ret) 1651 if (ret)
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b191c96..373c1a563474 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
84 84
85#define FW_VER_LEN 32 85#define FW_VER_LEN 32
86 86
87#define BE_MAX_VF 32
88
87struct be_dma_mem { 89struct be_dma_mem {
88 void *va; 90 void *va;
89 dma_addr_t dma; 91 dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
207/* Struct to remember the pages posted for rx frags */ 209/* Struct to remember the pages posted for rx frags */
208struct be_rx_page_info { 210struct be_rx_page_info {
209 struct page *page; 211 struct page *page;
210 dma_addr_t bus; 212 DEFINE_DMA_UNMAP_ADDR(bus);
211 u16 page_offset; 213 u16 page_offset;
212 bool last_page_user; 214 bool last_page_user;
213}; 215};
@@ -281,8 +283,15 @@ struct be_adapter {
281 u8 port_type; 283 u8 port_type;
282 u8 transceiver; 284 u8 transceiver;
283 u8 generation; /* BladeEngine ASIC generation */ 285 u8 generation; /* BladeEngine ASIC generation */
286
287 bool sriov_enabled;
288 u32 vf_if_handle[BE_MAX_VF];
289 u32 vf_pmac_id[BE_MAX_VF];
290 u8 base_eq_id;
284}; 291};
285 292
293#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
294
286/* BladeEngine Generation numbers */ 295/* BladeEngine Generation numbers */
287#define BE_GEN2 2 296#define BE_GEN2 2
288#define BE_GEN3 3 297#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac987cd..e79bf8b9af3b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -843,7 +843,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
843 * Uses mbox 843 * Uses mbox
844 */ 844 */
845int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 845int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
846 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 846 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
847 u32 domain)
847{ 848{
848 struct be_mcc_wrb *wrb; 849 struct be_mcc_wrb *wrb;
849 struct be_cmd_req_if_create *req; 850 struct be_cmd_req_if_create *req;
@@ -860,6 +861,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
860 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 861 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
861 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 862 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
862 863
864 req->hdr.domain = domain;
863 req->capability_flags = cpu_to_le32(cap_flags); 865 req->capability_flags = cpu_to_le32(cap_flags);
864 req->enable_flags = cpu_to_le32(en_flags); 866 req->enable_flags = cpu_to_le32(en_flags);
865 req->pmac_invalid = pmac_invalid; 867 req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1113,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1111 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1113 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1112 OPCODE_ETH_PROMISCUOUS, sizeof(*req)); 1114 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1113 1115
1116 /* In FW versions X.102.149/X.101.487 and later,
1117 * the port setting associated only with the
1118 * issuing pci function will take effect
1119 */
1114 if (port_num) 1120 if (port_num)
1115 req->port1_promiscuous = en; 1121 req->port1_promiscuous = en;
1116 else 1122 else
@@ -1157,13 +1163,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1157 req->interface_id = if_id; 1163 req->interface_id = if_id;
1158 if (netdev) { 1164 if (netdev) {
1159 int i; 1165 int i;
1160 struct dev_mc_list *mc; 1166 struct netdev_hw_addr *ha;
1161 1167
1162 req->num_mac = cpu_to_le16(netdev_mc_count(netdev)); 1168 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1163 1169
1164 i = 0; 1170 i = 0;
1165 netdev_for_each_mc_addr(mc, netdev) 1171 netdev_for_each_mc_addr(ha, netdev)
1166 memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); 1172 memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
1167 } else { 1173 } else {
1168 req->promiscuous = 1; 1174 req->promiscuous = 1;
1169 } 1175 }
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9a3714..763dc199e337 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
878extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 878extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
879extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 879extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
880 u32 en_flags, u8 *mac, bool pmac_invalid, 880 u32 en_flags, u8 *mac, bool pmac_invalid,
881 u32 *if_handle, u32 *pmac_id); 881 u32 *if_handle, u32 *pmac_id, u32 domain);
882extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 882extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
883extern int be_cmd_eq_create(struct be_adapter *adapter, 883extern int be_cmd_eq_create(struct be_adapter *adapter,
884 struct be_queue_info *eq, int eq_delay); 884 struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065e7897..200e98515909 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -276,8 +276,6 @@ be_get_ethtool_stats(struct net_device *netdev,
276 data[i] = (et_stats[i].size == sizeof(u64)) ? 276 data[i] = (et_stats[i].size == sizeof(u64)) ?
277 *(u64 *)p: *(u32 *)p; 277 *(u64 *)p: *(u32 *)p;
278 } 278 }
279
280 return;
281} 279}
282 280
283static void 281static void
@@ -466,7 +464,6 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
466 else 464 else
467 wol->wolopts = 0; 465 wol->wolopts = 0;
468 memset(&wol->sopass, 0, sizeof(wol->sopass)); 466 memset(&wol->sopass, 0, sizeof(wol->sopass));
469 return;
470} 467}
471 468
472static int 469static int
@@ -496,7 +493,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 493 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
497 &ddrdma_cmd.dma); 494 &ddrdma_cmd.dma);
498 if (!ddrdma_cmd.va) { 495 if (!ddrdma_cmd.va) {
499 dev_err(&adapter->pdev->dev, "Memory allocation failure \n"); 496 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
500 return -ENOMEM; 497 return -ENOMEM;
501 } 498 }
502 499
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b827637..063026de4957 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
99/* Number of entries posted */ 99/* Number of entries posted */
100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ 100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
101 101
102/********** SRIOV VF PCICFG OFFSET ********/
103#define SRIOV_VF_PCICFG_OFFSET (4096)
104
102/* Flashrom related descriptors */ 105/* Flashrom related descriptors */
103#define IMAGE_TYPE_FIRMWARE 160 106#define IMAGE_TYPE_FIRMWARE 160
104#define IMAGE_TYPE_BOOTCODE 224 107#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace802256..058d7f95f5ae 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27 27
28static unsigned int rx_frag_size = 2048; 28static unsigned int rx_frag_size = 2048;
29static unsigned int num_vfs;
29module_param(rx_frag_size, uint, S_IRUGO); 30module_param(rx_frag_size, uint, S_IRUGO);
31module_param(num_vfs, uint, S_IRUGO);
30MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
31 34
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 35static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 36 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
138 if (!is_valid_ether_addr(addr->sa_data)) 141 if (!is_valid_ether_addr(addr->sa_data))
139 return -EADDRNOTAVAIL; 142 return -EADDRNOTAVAIL;
140 143
144 /* MAC addr configuration will be done in hardware for VFs
145 * by their corresponding PFs. Just copy to netdev addr here
146 */
147 if (!be_physfn(adapter))
148 goto netdev_addr;
149
141 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 150 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
142 if (status) 151 if (status)
143 return status; 152 return status;
144 153
145 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 154 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
146 adapter->if_handle, &adapter->pmac_id); 155 adapter->if_handle, &adapter->pmac_id);
156netdev_addr:
147 if (!status) 157 if (!status)
148 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
149 159
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
386 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 396 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
387} 397}
388 398
399static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
400 bool unmap_single)
401{
402 dma_addr_t dma;
403
404 be_dws_le_to_cpu(wrb, sizeof(*wrb));
405
406 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
407 if (wrb->frag_len) {
408 if (unmap_single)
409 pci_unmap_single(pdev, dma, wrb->frag_len,
410 PCI_DMA_TODEVICE);
411 else
412 pci_unmap_page(pdev, dma, wrb->frag_len,
413 PCI_DMA_TODEVICE);
414 }
415}
389 416
390static int make_tx_wrbs(struct be_adapter *adapter, 417static int make_tx_wrbs(struct be_adapter *adapter,
391 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) 418 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
392{ 419{
393 u64 busaddr; 420 dma_addr_t busaddr;
394 u32 i, copied = 0; 421 int i, copied = 0;
395 struct pci_dev *pdev = adapter->pdev; 422 struct pci_dev *pdev = adapter->pdev;
396 struct sk_buff *first_skb = skb; 423 struct sk_buff *first_skb = skb;
397 struct be_queue_info *txq = &adapter->tx_obj.q; 424 struct be_queue_info *txq = &adapter->tx_obj.q;
398 struct be_eth_wrb *wrb; 425 struct be_eth_wrb *wrb;
399 struct be_eth_hdr_wrb *hdr; 426 struct be_eth_hdr_wrb *hdr;
427 bool map_single = false;
428 u16 map_head;
400 429
401 hdr = queue_head_node(txq); 430 hdr = queue_head_node(txq);
402 atomic_add(wrb_cnt, &txq->used);
403 queue_head_inc(txq); 431 queue_head_inc(txq);
432 map_head = txq->head;
404 433
405 if (skb->len > skb->data_len) { 434 if (skb->len > skb->data_len) {
406 int len = skb->len - skb->data_len; 435 int len = skb_headlen(skb);
407 busaddr = pci_map_single(pdev, skb->data, len, 436 busaddr = pci_map_single(pdev, skb->data, len,
408 PCI_DMA_TODEVICE); 437 PCI_DMA_TODEVICE);
438 if (pci_dma_mapping_error(pdev, busaddr))
439 goto dma_err;
440 map_single = true;
409 wrb = queue_head_node(txq); 441 wrb = queue_head_node(txq);
410 wrb_fill(wrb, busaddr, len); 442 wrb_fill(wrb, busaddr, len);
411 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 443 be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
419 busaddr = pci_map_page(pdev, frag->page, 451 busaddr = pci_map_page(pdev, frag->page,
420 frag->page_offset, 452 frag->page_offset,
421 frag->size, PCI_DMA_TODEVICE); 453 frag->size, PCI_DMA_TODEVICE);
454 if (pci_dma_mapping_error(pdev, busaddr))
455 goto dma_err;
422 wrb = queue_head_node(txq); 456 wrb = queue_head_node(txq);
423 wrb_fill(wrb, busaddr, frag->size); 457 wrb_fill(wrb, busaddr, frag->size);
424 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 458 be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
438 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 472 be_dws_cpu_to_le(hdr, sizeof(*hdr));
439 473
440 return copied; 474 return copied;
475dma_err:
476 txq->head = map_head;
477 while (copied) {
478 wrb = queue_head_node(txq);
479 unmap_tx_frag(pdev, wrb, map_single);
480 map_single = false;
481 copied -= wrb->frag_len;
482 queue_head_inc(txq);
483 }
484 return 0;
441} 485}
442 486
443static netdev_tx_t be_xmit(struct sk_buff *skb, 487static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
462 * *BEFORE* ringing the tx doorbell, so that we serialze the 506 * *BEFORE* ringing the tx doorbell, so that we serialze the
463 * tx compls of the current transmit which'll wake up the queue 507 * tx compls of the current transmit which'll wake up the queue
464 */ 508 */
509 atomic_add(wrb_cnt, &txq->used);
465 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= 510 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
466 txq->len) { 511 txq->len) {
467 netif_stop_queue(netdev); 512 netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
541{ 586{
542 struct be_adapter *adapter = netdev_priv(netdev); 587 struct be_adapter *adapter = netdev_priv(netdev);
543 588
589 if (!be_physfn(adapter))
590 return;
591
544 adapter->vlan_tag[vid] = 1; 592 adapter->vlan_tag[vid] = 1;
545 adapter->vlans_added++; 593 adapter->vlans_added++;
546 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 594 if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
551{ 599{
552 struct be_adapter *adapter = netdev_priv(netdev); 600 struct be_adapter *adapter = netdev_priv(netdev);
553 601
602 if (!be_physfn(adapter))
603 return;
604
554 adapter->vlan_tag[vid] = 0; 605 adapter->vlan_tag[vid] = 0;
555 vlan_group_set_device(adapter->vlan_grp, vid, NULL); 606 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
556 adapter->vlans_added--; 607 adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
588 return; 639 return;
589} 640}
590 641
642static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
643{
644 struct be_adapter *adapter = netdev_priv(netdev);
645 int status;
646
647 if (!adapter->sriov_enabled)
648 return -EPERM;
649
650 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
651 return -EINVAL;
652
653 status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
654 adapter->vf_pmac_id[vf]);
655
656 status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
657 &adapter->vf_pmac_id[vf]);
658 if (!status)
659 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
660 mac, vf);
661 return status;
662}
663
591static void be_rx_rate_update(struct be_adapter *adapter) 664static void be_rx_rate_update(struct be_adapter *adapter)
592{ 665{
593 struct be_drvr_stats *stats = drvr_stats(adapter); 666 struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
647 BUG_ON(!rx_page_info->page); 720 BUG_ON(!rx_page_info->page);
648 721
649 if (rx_page_info->last_page_user) { 722 if (rx_page_info->last_page_user) {
650 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), 723 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
651 adapter->big_page_size, PCI_DMA_FROMDEVICE); 724 adapter->big_page_size, PCI_DMA_FROMDEVICE);
652 rx_page_info->last_page_user = false; 725 rx_page_info->last_page_user = false;
653 } 726 }
@@ -757,7 +830,6 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
757 830
758done: 831done:
759 be_rx_stats_update(adapter, pktsize, num_rcvd); 832 be_rx_stats_update(adapter, pktsize, num_rcvd);
760 return;
761} 833}
762 834
763/* Process the RX completion indicated by rxcp when GRO is disabled */ 835/* Process the RX completion indicated by rxcp when GRO is disabled */
@@ -791,7 +863,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
791 863
792 skb->truesize = skb->len + sizeof(struct sk_buff); 864 skb->truesize = skb->len + sizeof(struct sk_buff);
793 skb->protocol = eth_type_trans(skb, adapter->netdev); 865 skb->protocol = eth_type_trans(skb, adapter->netdev);
794 skb->dev = adapter->netdev;
795 866
796 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 867 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
797 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 868 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -812,8 +883,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
812 } else { 883 } else {
813 netif_receive_skb(skb); 884 netif_receive_skb(skb);
814 } 885 }
815
816 return;
817} 886}
818 887
819/* Process the RX completion indicated by rxcp when GRO is enabled */ 888/* Process the RX completion indicated by rxcp when GRO is enabled */
@@ -893,7 +962,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
893 } 962 }
894 963
895 be_rx_stats_update(adapter, pkt_size, num_rcvd); 964 be_rx_stats_update(adapter, pkt_size, num_rcvd);
896 return;
897} 965}
898 966
899static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter) 967static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
@@ -959,7 +1027,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
959 } 1027 }
960 page_offset = page_info->page_offset; 1028 page_offset = page_info->page_offset;
961 page_info->page = pagep; 1029 page_info->page = pagep;
962 pci_unmap_addr_set(page_info, bus, page_dmaaddr); 1030 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
963 frag_dmaaddr = page_dmaaddr + page_info->page_offset; 1031 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
964 1032
965 rxd = queue_head_node(rxq); 1033 rxd = queue_head_node(rxq);
@@ -987,8 +1055,6 @@ static void be_post_rx_frags(struct be_adapter *adapter)
987 /* Let be_worker replenish when memory is available */ 1055 /* Let be_worker replenish when memory is available */
988 adapter->rx_post_starved = true; 1056 adapter->rx_post_starved = true;
989 } 1057 }
990
991 return;
992} 1058}
993 1059
994static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) 1060static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
@@ -1012,35 +1078,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1012 struct be_eth_wrb *wrb; 1078 struct be_eth_wrb *wrb;
1013 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1079 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1014 struct sk_buff *sent_skb; 1080 struct sk_buff *sent_skb;
1015 u64 busaddr; 1081 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1016 u16 cur_index, num_wrbs = 0; 1082 bool unmap_skb_hdr = true;
1017 1083
1018 cur_index = txq->tail; 1084 sent_skb = sent_skbs[txq->tail];
1019 sent_skb = sent_skbs[cur_index];
1020 BUG_ON(!sent_skb); 1085 BUG_ON(!sent_skb);
1021 sent_skbs[cur_index] = NULL; 1086 sent_skbs[txq->tail] = NULL;
1022 wrb = queue_tail_node(txq); 1087
1023 be_dws_le_to_cpu(wrb, sizeof(*wrb)); 1088 /* skip header wrb */
1024 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1025 if (busaddr != 0) {
1026 pci_unmap_single(adapter->pdev, busaddr,
1027 wrb->frag_len, PCI_DMA_TODEVICE);
1028 }
1029 num_wrbs++;
1030 queue_tail_inc(txq); 1089 queue_tail_inc(txq);
1031 1090
1032 while (cur_index != last_index) { 1091 do {
1033 cur_index = txq->tail; 1092 cur_index = txq->tail;
1034 wrb = queue_tail_node(txq); 1093 wrb = queue_tail_node(txq);
1035 be_dws_le_to_cpu(wrb, sizeof(*wrb)); 1094 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1036 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; 1095 skb_headlen(sent_skb)));
1037 if (busaddr != 0) { 1096 unmap_skb_hdr = false;
1038 pci_unmap_page(adapter->pdev, busaddr, 1097
1039 wrb->frag_len, PCI_DMA_TODEVICE);
1040 }
1041 num_wrbs++; 1098 num_wrbs++;
1042 queue_tail_inc(txq); 1099 queue_tail_inc(txq);
1043 } 1100 } while (cur_index != last_index);
1044 1101
1045 atomic_sub(num_wrbs, &txq->used); 1102 atomic_sub(num_wrbs, &txq->used);
1046 1103
@@ -1255,6 +1312,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1255 /* Ask BE to create Tx Event queue */ 1312 /* Ask BE to create Tx Event queue */
1256 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1313 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1257 goto tx_eq_free; 1314 goto tx_eq_free;
1315 adapter->base_eq_id = adapter->tx_eq.q.id;
1316
1258 /* Alloc TX eth compl queue */ 1317 /* Alloc TX eth compl queue */
1259 cq = &adapter->tx_obj.cq; 1318 cq = &adapter->tx_obj.cq;
1260 if (be_queue_alloc(adapter, cq, TX_CQ_LEN, 1319 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1441,7 @@ rx_eq_free:
1382/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1441/* There are 8 evt ids per func. Retruns the evt id's bit number */
1383static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) 1442static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1384{ 1443{
1385 return eq_id % 8; 1444 return eq_id - adapter->base_eq_id;
1386} 1445}
1387 1446
1388static irqreturn_t be_intx(int irq, void *dev) 1447static irqreturn_t be_intx(int irq, void *dev)
@@ -1557,7 +1616,27 @@ static void be_msix_enable(struct be_adapter *adapter)
1557 BE_NUM_MSIX_VECTORS); 1616 BE_NUM_MSIX_VECTORS);
1558 if (status == 0) 1617 if (status == 0)
1559 adapter->msix_enabled = true; 1618 adapter->msix_enabled = true;
1560 return; 1619}
1620
1621static void be_sriov_enable(struct be_adapter *adapter)
1622{
1623#ifdef CONFIG_PCI_IOV
1624 int status;
1625 if (be_physfn(adapter) && num_vfs) {
1626 status = pci_enable_sriov(adapter->pdev, num_vfs);
1627 adapter->sriov_enabled = status ? false : true;
1628 }
1629#endif
1630}
1631
1632static void be_sriov_disable(struct be_adapter *adapter)
1633{
1634#ifdef CONFIG_PCI_IOV
1635 if (adapter->sriov_enabled) {
1636 pci_disable_sriov(adapter->pdev);
1637 adapter->sriov_enabled = false;
1638 }
1639#endif
1561} 1640}
1562 1641
1563static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1642static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
@@ -1617,6 +1696,9 @@ static int be_irq_register(struct be_adapter *adapter)
1617 status = be_msix_register(adapter); 1696 status = be_msix_register(adapter);
1618 if (status == 0) 1697 if (status == 0)
1619 goto done; 1698 goto done;
1699 /* INTx is not supported for VF */
1700 if (!be_physfn(adapter))
1701 return status;
1620 } 1702 }
1621 1703
1622 /* INTx */ 1704 /* INTx */
@@ -1651,7 +1733,6 @@ static void be_irq_unregister(struct be_adapter *adapter)
1651 be_free_irq(adapter, &adapter->rx_eq); 1733 be_free_irq(adapter, &adapter->rx_eq);
1652done: 1734done:
1653 adapter->isr_registered = false; 1735 adapter->isr_registered = false;
1654 return;
1655} 1736}
1656 1737
1657static int be_open(struct net_device *netdev) 1738static int be_open(struct net_device *netdev)
@@ -1690,14 +1771,17 @@ static int be_open(struct net_device *netdev)
1690 goto ret_sts; 1771 goto ret_sts;
1691 be_link_status_update(adapter, link_up); 1772 be_link_status_update(adapter, link_up);
1692 1773
1693 status = be_vid_config(adapter); 1774 if (be_physfn(adapter))
1775 status = be_vid_config(adapter);
1694 if (status) 1776 if (status)
1695 goto ret_sts; 1777 goto ret_sts;
1696 1778
1697 status = be_cmd_set_flow_control(adapter, 1779 if (be_physfn(adapter)) {
1698 adapter->tx_fc, adapter->rx_fc); 1780 status = be_cmd_set_flow_control(adapter,
1699 if (status) 1781 adapter->tx_fc, adapter->rx_fc);
1700 goto ret_sts; 1782 if (status)
1783 goto ret_sts;
1784 }
1701 1785
1702 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 1786 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1703ret_sts: 1787ret_sts:
@@ -1723,7 +1807,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
1723 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 1807 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
1724 if (status) { 1808 if (status) {
1725 dev_err(&adapter->pdev->dev, 1809 dev_err(&adapter->pdev->dev,
1726 "Could not enable Wake-on-lan \n"); 1810 "Could not enable Wake-on-lan\n");
1727 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 1811 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
1728 cmd.dma); 1812 cmd.dma);
1729 return status; 1813 return status;
@@ -1745,22 +1829,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
1745static int be_setup(struct be_adapter *adapter) 1829static int be_setup(struct be_adapter *adapter)
1746{ 1830{
1747 struct net_device *netdev = adapter->netdev; 1831 struct net_device *netdev = adapter->netdev;
1748 u32 cap_flags, en_flags; 1832 u32 cap_flags, en_flags, vf = 0;
1749 int status; 1833 int status;
1834 u8 mac[ETH_ALEN];
1835
1836 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
1750 1837
1751 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1838 if (be_physfn(adapter)) {
1752 BE_IF_FLAGS_MCAST_PROMISCUOUS | 1839 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
1753 BE_IF_FLAGS_PROMISCUOUS | 1840 BE_IF_FLAGS_PROMISCUOUS |
1754 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1841 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1755 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1842 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
1756 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1843 }
1757 1844
1758 status = be_cmd_if_create(adapter, cap_flags, en_flags, 1845 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1759 netdev->dev_addr, false/* pmac_invalid */, 1846 netdev->dev_addr, false/* pmac_invalid */,
1760 &adapter->if_handle, &adapter->pmac_id); 1847 &adapter->if_handle, &adapter->pmac_id, 0);
1761 if (status != 0) 1848 if (status != 0)
1762 goto do_none; 1849 goto do_none;
1763 1850
1851 if (be_physfn(adapter)) {
1852 while (vf < num_vfs) {
1853 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
1854 | BE_IF_FLAGS_BROADCAST;
1855 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1856 mac, true, &adapter->vf_if_handle[vf],
1857 NULL, vf+1);
1858 if (status) {
1859 dev_err(&adapter->pdev->dev,
1860 "Interface Create failed for VF %d\n", vf);
1861 goto if_destroy;
1862 }
1863 vf++;
1864 } while (vf < num_vfs);
1865 } else if (!be_physfn(adapter)) {
1866 status = be_cmd_mac_addr_query(adapter, mac,
1867 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
1868 if (!status) {
1869 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
1870 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
1871 }
1872 }
1873
1764 status = be_tx_queues_create(adapter); 1874 status = be_tx_queues_create(adapter);
1765 if (status != 0) 1875 if (status != 0)
1766 goto if_destroy; 1876 goto if_destroy;
@@ -1782,6 +1892,9 @@ rx_qs_destroy:
1782tx_qs_destroy: 1892tx_qs_destroy:
1783 be_tx_queues_destroy(adapter); 1893 be_tx_queues_destroy(adapter);
1784if_destroy: 1894if_destroy:
1895 for (vf = 0; vf < num_vfs; vf++)
1896 if (adapter->vf_if_handle[vf])
1897 be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
1785 be_cmd_if_destroy(adapter, adapter->if_handle); 1898 be_cmd_if_destroy(adapter, adapter->if_handle);
1786do_none: 1899do_none:
1787 return status; 1900 return status;
@@ -2061,6 +2174,7 @@ static struct net_device_ops be_netdev_ops = {
2061 .ndo_vlan_rx_register = be_vlan_register, 2174 .ndo_vlan_rx_register = be_vlan_register,
2062 .ndo_vlan_rx_add_vid = be_vlan_add_vid, 2175 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2063 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 2176 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2177 .ndo_set_vf_mac = be_set_vf_mac
2064}; 2178};
2065 2179
2066static void be_netdev_init(struct net_device *netdev) 2180static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2216,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2102 iounmap(adapter->csr); 2216 iounmap(adapter->csr);
2103 if (adapter->db) 2217 if (adapter->db)
2104 iounmap(adapter->db); 2218 iounmap(adapter->db);
2105 if (adapter->pcicfg) 2219 if (adapter->pcicfg && be_physfn(adapter))
2106 iounmap(adapter->pcicfg); 2220 iounmap(adapter->pcicfg);
2107} 2221}
2108 2222
2109static int be_map_pci_bars(struct be_adapter *adapter) 2223static int be_map_pci_bars(struct be_adapter *adapter)
2110{ 2224{
2111 u8 __iomem *addr; 2225 u8 __iomem *addr;
2112 int pcicfg_reg; 2226 int pcicfg_reg, db_reg;
2113 2227
2114 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2228 if (be_physfn(adapter)) {
2115 pci_resource_len(adapter->pdev, 2)); 2229 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2116 if (addr == NULL) 2230 pci_resource_len(adapter->pdev, 2));
2117 return -ENOMEM; 2231 if (addr == NULL)
2118 adapter->csr = addr; 2232 return -ENOMEM;
2119 2233 adapter->csr = addr;
2120 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), 2234 }
2121 128 * 1024);
2122 if (addr == NULL)
2123 goto pci_map_err;
2124 adapter->db = addr;
2125 2235
2126 if (adapter->generation == BE_GEN2) 2236 if (adapter->generation == BE_GEN2) {
2127 pcicfg_reg = 1; 2237 pcicfg_reg = 1;
2128 else 2238 db_reg = 4;
2239 } else {
2129 pcicfg_reg = 0; 2240 pcicfg_reg = 0;
2130 2241 if (be_physfn(adapter))
2131 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), 2242 db_reg = 4;
2132 pci_resource_len(adapter->pdev, pcicfg_reg)); 2243 else
2244 db_reg = 0;
2245 }
2246 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2247 pci_resource_len(adapter->pdev, db_reg));
2133 if (addr == NULL) 2248 if (addr == NULL)
2134 goto pci_map_err; 2249 goto pci_map_err;
2135 adapter->pcicfg = addr; 2250 adapter->db = addr;
2251
2252 if (be_physfn(adapter)) {
2253 addr = ioremap_nocache(
2254 pci_resource_start(adapter->pdev, pcicfg_reg),
2255 pci_resource_len(adapter->pdev, pcicfg_reg));
2256 if (addr == NULL)
2257 goto pci_map_err;
2258 adapter->pcicfg = addr;
2259 } else
2260 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2136 2261
2137 return 0; 2262 return 0;
2138pci_map_err: 2263pci_map_err:
@@ -2246,6 +2371,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
2246 2371
2247 be_ctrl_cleanup(adapter); 2372 be_ctrl_cleanup(adapter);
2248 2373
2374 be_sriov_disable(adapter);
2375
2249 be_msix_disable(adapter); 2376 be_msix_disable(adapter);
2250 2377
2251 pci_set_drvdata(pdev, NULL); 2378 pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2397,20 @@ static int be_get_config(struct be_adapter *adapter)
2270 return status; 2397 return status;
2271 2398
2272 memset(mac, 0, ETH_ALEN); 2399 memset(mac, 0, ETH_ALEN);
2273 status = be_cmd_mac_addr_query(adapter, mac, 2400
2401 if (be_physfn(adapter)) {
2402 status = be_cmd_mac_addr_query(adapter, mac,
2274 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); 2403 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2275 if (status)
2276 return status;
2277 2404
2278 if (!is_valid_ether_addr(mac)) 2405 if (status)
2279 return -EADDRNOTAVAIL; 2406 return status;
2280 2407
2281 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 2408 if (!is_valid_ether_addr(mac))
2282 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2409 return -EADDRNOTAVAIL;
2410
2411 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2412 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2413 }
2283 2414
2284 if (adapter->cap & 0x400) 2415 if (adapter->cap & 0x400)
2285 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; 2416 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2427,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
2296 struct be_adapter *adapter; 2427 struct be_adapter *adapter;
2297 struct net_device *netdev; 2428 struct net_device *netdev;
2298 2429
2430
2299 status = pci_enable_device(pdev); 2431 status = pci_enable_device(pdev);
2300 if (status) 2432 if (status)
2301 goto do_none; 2433 goto do_none;
@@ -2344,24 +2476,28 @@ static int __devinit be_probe(struct pci_dev *pdev,
2344 } 2476 }
2345 } 2477 }
2346 2478
2479 be_sriov_enable(adapter);
2480
2347 status = be_ctrl_init(adapter); 2481 status = be_ctrl_init(adapter);
2348 if (status) 2482 if (status)
2349 goto free_netdev; 2483 goto free_netdev;
2350 2484
2351 /* sync up with fw's ready state */ 2485 /* sync up with fw's ready state */
2352 status = be_cmd_POST(adapter); 2486 if (be_physfn(adapter)) {
2353 if (status) 2487 status = be_cmd_POST(adapter);
2354 goto ctrl_clean; 2488 if (status)
2489 goto ctrl_clean;
2490
2491 status = be_cmd_reset_function(adapter);
2492 if (status)
2493 goto ctrl_clean;
2494 }
2355 2495
2356 /* tell fw we're ready to fire cmds */ 2496 /* tell fw we're ready to fire cmds */
2357 status = be_cmd_fw_init(adapter); 2497 status = be_cmd_fw_init(adapter);
2358 if (status) 2498 if (status)
2359 goto ctrl_clean; 2499 goto ctrl_clean;
2360 2500
2361 status = be_cmd_reset_function(adapter);
2362 if (status)
2363 goto ctrl_clean;
2364
2365 status = be_stats_init(adapter); 2501 status = be_stats_init(adapter);
2366 if (status) 2502 if (status)
2367 goto ctrl_clean; 2503 goto ctrl_clean;
@@ -2391,6 +2527,7 @@ ctrl_clean:
2391 be_ctrl_cleanup(adapter); 2527 be_ctrl_cleanup(adapter);
2392free_netdev: 2528free_netdev:
2393 be_msix_disable(adapter); 2529 be_msix_disable(adapter);
2530 be_sriov_disable(adapter);
2394 free_netdev(adapter->netdev); 2531 free_netdev(adapter->netdev);
2395 pci_set_drvdata(pdev, NULL); 2532 pci_set_drvdata(pdev, NULL);
2396rel_reg: 2533rel_reg:
@@ -2474,8 +2611,6 @@ static void be_shutdown(struct pci_dev *pdev)
2474 be_setup_wol(adapter, true); 2611 be_setup_wol(adapter, true);
2475 2612
2476 pci_disable_device(pdev); 2613 pci_disable_device(pdev);
2477
2478 return;
2479} 2614}
2480 2615
2481static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev, 2616static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
@@ -2557,7 +2692,6 @@ static void be_eeh_resume(struct pci_dev *pdev)
2557 return; 2692 return;
2558err: 2693err:
2559 dev_err(&adapter->pdev->dev, "EEH resume failed\n"); 2694 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
2560 return;
2561} 2695}
2562 2696
2563static struct pci_error_handlers be_eeh_handlers = { 2697static struct pci_error_handlers be_eeh_handlers = {
@@ -2587,6 +2721,13 @@ static int __init be_init_module(void)
2587 rx_frag_size = 2048; 2721 rx_frag_size = 2048;
2588 } 2722 }
2589 2723
2724 if (num_vfs > 32) {
2725 printk(KERN_WARNING DRV_NAME
2726 " : Module param num_vfs must not be greater than 32."
2727 "Using 32\n");
2728 num_vfs = 32;
2729 }
2730
2590 return pci_register_driver(&be_driver); 2731 return pci_register_driver(&be_driver);
2591} 2732}
2592module_init(be_init_module); 2733module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93cf03f6..39a54bad397f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -33,6 +33,7 @@
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <asm/div64.h>
36#include <asm/dpmc.h> 37#include <asm/dpmc.h>
37#include <asm/blackfin.h> 38#include <asm/blackfin.h>
38#include <asm/cacheflush.h> 39#include <asm/cacheflush.h>
@@ -80,9 +81,6 @@ static u16 pin_req[] = P_RMII0;
80static u16 pin_req[] = P_MII0; 81static u16 pin_req[] = P_MII0;
81#endif 82#endif
82 83
83static void bfin_mac_disable(void);
84static void bfin_mac_enable(void);
85
86static void desc_list_free(void) 84static void desc_list_free(void)
87{ 85{
88 struct net_dma_desc_rx *r; 86 struct net_dma_desc_rx *r;
@@ -202,6 +200,11 @@ static int desc_list_init(void)
202 goto init_error; 200 goto init_error;
203 } 201 }
204 skb_reserve(new_skb, NET_IP_ALIGN); 202 skb_reserve(new_skb, NET_IP_ALIGN);
203 /* Invidate the data cache of skb->data range when it is write back
204 * cache. It will prevent overwritting the new data from DMA
205 */
206 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
207 (unsigned long)new_skb->end);
205 r->skb = new_skb; 208 r->skb = new_skb;
206 209
207 /* 210 /*
@@ -254,7 +257,7 @@ init_error:
254 * MII operations 257 * MII operations
255 */ 258 */
256/* Wait until the previous MDC/MDIO transaction has completed */ 259/* Wait until the previous MDC/MDIO transaction has completed */
257static void bfin_mdio_poll(void) 260static int bfin_mdio_poll(void)
258{ 261{
259 int timeout_cnt = MAX_TIMEOUT_CNT; 262 int timeout_cnt = MAX_TIMEOUT_CNT;
260 263
@@ -264,22 +267,30 @@ static void bfin_mdio_poll(void)
264 if (timeout_cnt-- < 0) { 267 if (timeout_cnt-- < 0) {
265 printk(KERN_ERR DRV_NAME 268 printk(KERN_ERR DRV_NAME
266 ": wait MDC/MDIO transaction to complete timeout\n"); 269 ": wait MDC/MDIO transaction to complete timeout\n");
267 break; 270 return -ETIMEDOUT;
268 } 271 }
269 } 272 }
273
274 return 0;
270} 275}
271 276
272/* Read an off-chip register in a PHY through the MDC/MDIO port */ 277/* Read an off-chip register in a PHY through the MDC/MDIO port */
273static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum) 278static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
274{ 279{
275 bfin_mdio_poll(); 280 int ret;
281
282 ret = bfin_mdio_poll();
283 if (ret)
284 return ret;
276 285
277 /* read mode */ 286 /* read mode */
278 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) | 287 bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
279 SET_REGAD((u16) regnum) | 288 SET_REGAD((u16) regnum) |
280 STABUSY); 289 STABUSY);
281 290
282 bfin_mdio_poll(); 291 ret = bfin_mdio_poll();
292 if (ret)
293 return ret;
283 294
284 return (int) bfin_read_EMAC_STADAT(); 295 return (int) bfin_read_EMAC_STADAT();
285} 296}
@@ -288,7 +299,11 @@ static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
288static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum, 299static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
289 u16 value) 300 u16 value)
290{ 301{
291 bfin_mdio_poll(); 302 int ret;
303
304 ret = bfin_mdio_poll();
305 if (ret)
306 return ret;
292 307
293 bfin_write_EMAC_STADAT((u32) value); 308 bfin_write_EMAC_STADAT((u32) value);
294 309
@@ -298,9 +313,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
298 STAOP | 313 STAOP |
299 STABUSY); 314 STABUSY);
300 315
301 bfin_mdio_poll(); 316 return bfin_mdio_poll();
302
303 return 0;
304} 317}
305 318
306static int bfin_mdiobus_reset(struct mii_bus *bus) 319static int bfin_mdiobus_reset(struct mii_bus *bus)
@@ -458,6 +471,14 @@ static int mii_probe(struct net_device *dev)
458 * Ethtool support 471 * Ethtool support
459 */ 472 */
460 473
474/*
475 * interrupt routine for magic packet wakeup
476 */
477static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
478{
479 return IRQ_HANDLED;
480}
481
461static int 482static int
462bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 483bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
463{ 484{
@@ -492,11 +513,57 @@ static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
492 strcpy(info->bus_info, dev_name(&dev->dev)); 513 strcpy(info->bus_info, dev_name(&dev->dev));
493} 514}
494 515
516static void bfin_mac_ethtool_getwol(struct net_device *dev,
517 struct ethtool_wolinfo *wolinfo)
518{
519 struct bfin_mac_local *lp = netdev_priv(dev);
520
521 wolinfo->supported = WAKE_MAGIC;
522 wolinfo->wolopts = lp->wol;
523}
524
525static int bfin_mac_ethtool_setwol(struct net_device *dev,
526 struct ethtool_wolinfo *wolinfo)
527{
528 struct bfin_mac_local *lp = netdev_priv(dev);
529 int rc;
530
531 if (wolinfo->wolopts & (WAKE_MAGICSECURE |
532 WAKE_UCAST |
533 WAKE_MCAST |
534 WAKE_BCAST |
535 WAKE_ARP))
536 return -EOPNOTSUPP;
537
538 lp->wol = wolinfo->wolopts;
539
540 if (lp->wol && !lp->irq_wake_requested) {
541 /* register wake irq handler */
542 rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
543 IRQF_DISABLED, "EMAC_WAKE", dev);
544 if (rc)
545 return rc;
546 lp->irq_wake_requested = true;
547 }
548
549 if (!lp->wol && lp->irq_wake_requested) {
550 free_irq(IRQ_MAC_WAKEDET, dev);
551 lp->irq_wake_requested = false;
552 }
553
554 /* Make sure the PHY driver doesn't suspend */
555 device_init_wakeup(&dev->dev, lp->wol);
556
557 return 0;
558}
559
495static const struct ethtool_ops bfin_mac_ethtool_ops = { 560static const struct ethtool_ops bfin_mac_ethtool_ops = {
496 .get_settings = bfin_mac_ethtool_getsettings, 561 .get_settings = bfin_mac_ethtool_getsettings,
497 .set_settings = bfin_mac_ethtool_setsettings, 562 .set_settings = bfin_mac_ethtool_setsettings,
498 .get_link = ethtool_op_get_link, 563 .get_link = ethtool_op_get_link,
499 .get_drvinfo = bfin_mac_ethtool_getdrvinfo, 564 .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
565 .get_wol = bfin_mac_ethtool_getwol,
566 .set_wol = bfin_mac_ethtool_setwol,
500}; 567};
501 568
502/**************************************************************************/ 569/**************************************************************************/
@@ -509,10 +576,11 @@ void setup_system_regs(struct net_device *dev)
509 * Configure checksum support and rcve frame word alignment 576 * Configure checksum support and rcve frame word alignment
510 */ 577 */
511 sysctl = bfin_read_EMAC_SYSCTL(); 578 sysctl = bfin_read_EMAC_SYSCTL();
579 sysctl |= RXDWA;
512#if defined(BFIN_MAC_CSUM_OFFLOAD) 580#if defined(BFIN_MAC_CSUM_OFFLOAD)
513 sysctl |= RXDWA | RXCKS; 581 sysctl |= RXCKS;
514#else 582#else
515 sysctl |= RXDWA; 583 sysctl &= ~RXCKS;
516#endif 584#endif
517 bfin_write_EMAC_SYSCTL(sysctl); 585 bfin_write_EMAC_SYSCTL(sysctl);
518 586
@@ -551,6 +619,309 @@ static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
551 return 0; 619 return 0;
552} 620}
553 621
622#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
623#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)
624
625static int bfin_mac_hwtstamp_ioctl(struct net_device *netdev,
626 struct ifreq *ifr, int cmd)
627{
628 struct hwtstamp_config config;
629 struct bfin_mac_local *lp = netdev_priv(netdev);
630 u16 ptpctl;
631 u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;
632
633 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
634 return -EFAULT;
635
636 pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
637 __func__, config.flags, config.tx_type, config.rx_filter);
638
639 /* reserved for future extensions */
640 if (config.flags)
641 return -EINVAL;
642
643 if ((config.tx_type != HWTSTAMP_TX_OFF) &&
644 (config.tx_type != HWTSTAMP_TX_ON))
645 return -ERANGE;
646
647 ptpctl = bfin_read_EMAC_PTP_CTL();
648
649 switch (config.rx_filter) {
650 case HWTSTAMP_FILTER_NONE:
651 /*
652 * Dont allow any timestamping
653 */
654 ptpfv3 = 0xFFFFFFFF;
655 bfin_write_EMAC_PTP_FV3(ptpfv3);
656 break;
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
659 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
660 /*
661 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
662 * to enable all the field matches.
663 */
664 ptpctl &= ~0x1F00;
665 bfin_write_EMAC_PTP_CTL(ptpctl);
666 /*
667 * Keep the default values of the EMAC_PTP_FOFF register.
668 */
669 ptpfoff = 0x4A24170C;
670 bfin_write_EMAC_PTP_FOFF(ptpfoff);
671 /*
672 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
673 * registers.
674 */
675 ptpfv1 = 0x11040800;
676 bfin_write_EMAC_PTP_FV1(ptpfv1);
677 ptpfv2 = 0x0140013F;
678 bfin_write_EMAC_PTP_FV2(ptpfv2);
679 /*
680 * The default value (0xFFFC) allows the timestamping of both
681 * received Sync messages and Delay_Req messages.
682 */
683 ptpfv3 = 0xFFFFFFFC;
684 bfin_write_EMAC_PTP_FV3(ptpfv3);
685
686 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
687 break;
688 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
689 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
690 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
691 /* Clear all five comparison mask bits (bits[12:8]) in the
692 * EMAC_PTP_CTL register to enable all the field matches.
693 */
694 ptpctl &= ~0x1F00;
695 bfin_write_EMAC_PTP_CTL(ptpctl);
696 /*
697 * Keep the default values of the EMAC_PTP_FOFF register, except set
698 * the PTPCOF field to 0x2A.
699 */
700 ptpfoff = 0x2A24170C;
701 bfin_write_EMAC_PTP_FOFF(ptpfoff);
702 /*
703 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
704 * registers.
705 */
706 ptpfv1 = 0x11040800;
707 bfin_write_EMAC_PTP_FV1(ptpfv1);
708 ptpfv2 = 0x0140013F;
709 bfin_write_EMAC_PTP_FV2(ptpfv2);
710 /*
711 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
712 * the value to 0xFFF0.
713 */
714 ptpfv3 = 0xFFFFFFF0;
715 bfin_write_EMAC_PTP_FV3(ptpfv3);
716
717 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
718 break;
719 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
720 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
721 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
722 /*
723 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
724 * EFTM and PTPCM field comparison.
725 */
726 ptpctl &= ~0x1100;
727 bfin_write_EMAC_PTP_CTL(ptpctl);
728 /*
729 * Keep the default values of all the fields of the EMAC_PTP_FOFF
730 * register, except set the PTPCOF field to 0x0E.
731 */
732 ptpfoff = 0x0E24170C;
733 bfin_write_EMAC_PTP_FOFF(ptpfoff);
734 /*
735 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
736 * corresponds to PTP messages on the MAC layer.
737 */
738 ptpfv1 = 0x110488F7;
739 bfin_write_EMAC_PTP_FV1(ptpfv1);
740 ptpfv2 = 0x0140013F;
741 bfin_write_EMAC_PTP_FV2(ptpfv2);
742 /*
743 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
744 * messages, set the value to 0xFFF0.
745 */
746 ptpfv3 = 0xFFFFFFF0;
747 bfin_write_EMAC_PTP_FV3(ptpfv3);
748
749 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
750 break;
751 default:
752 return -ERANGE;
753 }
754
755 if (config.tx_type == HWTSTAMP_TX_OFF &&
756 bfin_mac_hwtstamp_is_none(config.rx_filter)) {
757 ptpctl &= ~PTP_EN;
758 bfin_write_EMAC_PTP_CTL(ptpctl);
759
760 SSYNC();
761 } else {
762 ptpctl |= PTP_EN;
763 bfin_write_EMAC_PTP_CTL(ptpctl);
764
765 /*
766 * clear any existing timestamp
767 */
768 bfin_read_EMAC_PTP_RXSNAPLO();
769 bfin_read_EMAC_PTP_RXSNAPHI();
770
771 bfin_read_EMAC_PTP_TXSNAPLO();
772 bfin_read_EMAC_PTP_TXSNAPHI();
773
774 /*
775 * Set registers so that rollover occurs soon to test this.
776 */
777 bfin_write_EMAC_PTP_TIMELO(0x00000000);
778 bfin_write_EMAC_PTP_TIMEHI(0xFF800000);
779
780 SSYNC();
781
782 lp->compare.last_update = 0;
783 timecounter_init(&lp->clock,
784 &lp->cycles,
785 ktime_to_ns(ktime_get_real()));
786 timecompare_update(&lp->compare, 0);
787 }
788
789 lp->stamp_cfg = config;
790 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
791 -EFAULT : 0;
792}
793
794static void bfin_dump_hwtamp(char *s, ktime_t *hw, ktime_t *ts, struct timecompare *cmp)
795{
796 ktime_t sys = ktime_get_real();
797
798 pr_debug("%s %s hardware:%d,%d transform system:%d,%d system:%d,%d, cmp:%lld, %lld\n",
799 __func__, s, hw->tv.sec, hw->tv.nsec, ts->tv.sec, ts->tv.nsec, sys.tv.sec,
800 sys.tv.nsec, cmp->offset, cmp->skew);
801}
802
803static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
804{
805 struct bfin_mac_local *lp = netdev_priv(netdev);
806 union skb_shared_tx *shtx = skb_tx(skb);
807
808 if (shtx->hardware) {
809 int timeout_cnt = MAX_TIMEOUT_CNT;
810
811 /* When doing time stamping, keep the connection to the socket
812 * a while longer
813 */
814 shtx->in_progress = 1;
815
816 /*
817 * The timestamping is done at the EMAC module's MII/RMII interface
818 * when the module sees the Start of Frame of an event message packet. This
819 * interface is the closest possible place to the physical Ethernet transmission
820 * medium, providing the best timing accuracy.
821 */
822 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
823 udelay(1);
824 if (timeout_cnt == 0)
825 printk(KERN_ERR DRV_NAME
826 ": fails to timestamp the TX packet\n");
827 else {
828 struct skb_shared_hwtstamps shhwtstamps;
829 u64 ns;
830 u64 regval;
831
832 regval = bfin_read_EMAC_PTP_TXSNAPLO();
833 regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
834 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
835 ns = timecounter_cyc2time(&lp->clock,
836 regval);
837 timecompare_update(&lp->compare, ns);
838 shhwtstamps.hwtstamp = ns_to_ktime(ns);
839 shhwtstamps.syststamp =
840 timecompare_transform(&lp->compare, ns);
841 skb_tstamp_tx(skb, &shhwtstamps);
842
843 bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
844 }
845 }
846}
847
848static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
849{
850 struct bfin_mac_local *lp = netdev_priv(netdev);
851 u32 valid;
852 u64 regval, ns;
853 struct skb_shared_hwtstamps *shhwtstamps;
854
855 if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
856 return;
857
858 valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
859 if (!valid)
860 return;
861
862 shhwtstamps = skb_hwtstamps(skb);
863
864 regval = bfin_read_EMAC_PTP_RXSNAPLO();
865 regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
866 ns = timecounter_cyc2time(&lp->clock, regval);
867 timecompare_update(&lp->compare, ns);
868 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
869 shhwtstamps->hwtstamp = ns_to_ktime(ns);
870 shhwtstamps->syststamp = timecompare_transform(&lp->compare, ns);
871
872 bfin_dump_hwtamp("RX", &shhwtstamps->hwtstamp, &shhwtstamps->syststamp, &lp->compare);
873}
874
875/*
876 * bfin_read_clock - read raw cycle counter (to be used by time counter)
877 */
878static cycle_t bfin_read_clock(const struct cyclecounter *tc)
879{
880 u64 stamp;
881
882 stamp = bfin_read_EMAC_PTP_TIMELO();
883 stamp |= (u64)bfin_read_EMAC_PTP_TIMEHI() << 32ULL;
884
885 return stamp;
886}
887
888#define PTP_CLK 25000000
889
890static void bfin_mac_hwtstamp_init(struct net_device *netdev)
891{
892 struct bfin_mac_local *lp = netdev_priv(netdev);
893 u64 append;
894
895 /* Initialize hardware timer */
896 append = PTP_CLK * (1ULL << 32);
897 do_div(append, get_sclk());
898 bfin_write_EMAC_PTP_ADDEND((u32)append);
899
900 memset(&lp->cycles, 0, sizeof(lp->cycles));
901 lp->cycles.read = bfin_read_clock;
902 lp->cycles.mask = CLOCKSOURCE_MASK(64);
903 lp->cycles.mult = 1000000000 / PTP_CLK;
904 lp->cycles.shift = 0;
905
906 /* Synchronize our NIC clock against system wall clock */
907 memset(&lp->compare, 0, sizeof(lp->compare));
908 lp->compare.source = &lp->clock;
909 lp->compare.target = ktime_get_real;
910 lp->compare.num_samples = 10;
911
912 /* Initialize hwstamp config */
913 lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
914 lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
915}
916
917#else
918# define bfin_mac_hwtstamp_is_none(cfg) 0
919# define bfin_mac_hwtstamp_init(dev)
920# define bfin_mac_hwtstamp_ioctl(dev, ifr, cmd) (-EOPNOTSUPP)
921# define bfin_rx_hwtstamp(dev, skb)
922# define bfin_tx_hwtstamp(dev, skb)
923#endif
924
554static void adjust_tx_list(void) 925static void adjust_tx_list(void)
555{ 926{
556 int timeout_cnt = MAX_TIMEOUT_CNT; 927 int timeout_cnt = MAX_TIMEOUT_CNT;
@@ -608,18 +979,32 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
608{ 979{
609 u16 *data; 980 u16 *data;
610 u32 data_align = (unsigned long)(skb->data) & 0x3; 981 u32 data_align = (unsigned long)(skb->data) & 0x3;
982 union skb_shared_tx *shtx = skb_tx(skb);
983
611 current_tx_ptr->skb = skb; 984 current_tx_ptr->skb = skb;
612 985
613 if (data_align == 0x2) { 986 if (data_align == 0x2) {
614 /* move skb->data to current_tx_ptr payload */ 987 /* move skb->data to current_tx_ptr payload */
615 data = (u16 *)(skb->data) - 1; 988 data = (u16 *)(skb->data) - 1;
616 *data = (u16)(skb->len); 989 *data = (u16)(skb->len);
990 /*
991 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
992 * a DMA_Length_Word field associated with the packet. The lower 12 bits
993 * of this field are the length of the packet payload in bytes and the higher
994 * 4 bits are the timestamping enable field.
995 */
996 if (shtx->hardware)
997 *data |= 0x1000;
998
617 current_tx_ptr->desc_a.start_addr = (u32)data; 999 current_tx_ptr->desc_a.start_addr = (u32)data;
618 /* this is important! */ 1000 /* this is important! */
619 blackfin_dcache_flush_range((u32)data, 1001 blackfin_dcache_flush_range((u32)data,
620 (u32)((u8 *)data + skb->len + 4)); 1002 (u32)((u8 *)data + skb->len + 4));
621 } else { 1003 } else {
622 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len); 1004 *((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1005 /* enable timestamping for the sent packet */
1006 if (shtx->hardware)
1007 *((u16 *)(current_tx_ptr->packet)) |= 0x1000;
623 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data, 1008 memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
624 skb->len); 1009 skb->len);
625 current_tx_ptr->desc_a.start_addr = 1010 current_tx_ptr->desc_a.start_addr =
@@ -653,20 +1038,42 @@ static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
653 1038
654out: 1039out:
655 adjust_tx_list(); 1040 adjust_tx_list();
1041
1042 bfin_tx_hwtstamp(dev, skb);
1043
656 current_tx_ptr = current_tx_ptr->next; 1044 current_tx_ptr = current_tx_ptr->next;
657 dev->trans_start = jiffies;
658 dev->stats.tx_packets++; 1045 dev->stats.tx_packets++;
659 dev->stats.tx_bytes += (skb->len); 1046 dev->stats.tx_bytes += (skb->len);
660 return NETDEV_TX_OK; 1047 return NETDEV_TX_OK;
661} 1048}
662 1049
1050#define IP_HEADER_OFF 0
1051#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
1052 RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
1053
663static void bfin_mac_rx(struct net_device *dev) 1054static void bfin_mac_rx(struct net_device *dev)
664{ 1055{
665 struct sk_buff *skb, *new_skb; 1056 struct sk_buff *skb, *new_skb;
666 unsigned short len; 1057 unsigned short len;
1058 struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1059#if defined(BFIN_MAC_CSUM_OFFLOAD)
1060 unsigned int i;
1061 unsigned char fcs[ETH_FCS_LEN + 1];
1062#endif
1063
1064 /* check if frame status word reports an error condition
1065 * we which case we simply drop the packet
1066 */
1067 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1068 printk(KERN_NOTICE DRV_NAME
1069 ": rx: receive error - packet dropped\n");
1070 dev->stats.rx_dropped++;
1071 goto out;
1072 }
667 1073
668 /* allocate a new skb for next time receive */ 1074 /* allocate a new skb for next time receive */
669 skb = current_rx_ptr->skb; 1075 skb = current_rx_ptr->skb;
1076
670 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1077 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
671 if (!new_skb) { 1078 if (!new_skb) {
672 printk(KERN_NOTICE DRV_NAME 1079 printk(KERN_NOTICE DRV_NAME
@@ -676,34 +1083,59 @@ static void bfin_mac_rx(struct net_device *dev)
676 } 1083 }
677 /* reserve 2 bytes for RXDWA padding */ 1084 /* reserve 2 bytes for RXDWA padding */
678 skb_reserve(new_skb, NET_IP_ALIGN); 1085 skb_reserve(new_skb, NET_IP_ALIGN);
679 current_rx_ptr->skb = new_skb;
680 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
681
682 /* Invidate the data cache of skb->data range when it is write back 1086 /* Invidate the data cache of skb->data range when it is write back
683 * cache. It will prevent overwritting the new data from DMA 1087 * cache. It will prevent overwritting the new data from DMA
684 */ 1088 */
685 blackfin_dcache_invalidate_range((unsigned long)new_skb->head, 1089 blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
686 (unsigned long)new_skb->end); 1090 (unsigned long)new_skb->end);
687 1091
1092 current_rx_ptr->skb = new_skb;
1093 current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
1094
688 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN); 1095 len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1096 /* Deduce Ethernet FCS length from Ethernet payload length */
1097 len -= ETH_FCS_LEN;
689 skb_put(skb, len); 1098 skb_put(skb, len);
690 blackfin_dcache_invalidate_range((unsigned long)skb->head,
691 (unsigned long)skb->tail);
692 1099
693 skb->protocol = eth_type_trans(skb, dev); 1100 skb->protocol = eth_type_trans(skb, dev);
1101
1102 bfin_rx_hwtstamp(dev, skb);
1103
694#if defined(BFIN_MAC_CSUM_OFFLOAD) 1104#if defined(BFIN_MAC_CSUM_OFFLOAD)
695 skb->csum = current_rx_ptr->status.ip_payload_csum; 1105 /* Checksum offloading only works for IPv4 packets with the standard IP header
696 skb->ip_summed = CHECKSUM_COMPLETE; 1106 * length of 20 bytes, because the blackfin MAC checksum calculation is
1107 * based on that assumption. We must NOT use the calculated checksum if our
1108 * IP version or header break that assumption.
1109 */
1110 if (skb->data[IP_HEADER_OFF] == 0x45) {
1111 skb->csum = current_rx_ptr->status.ip_payload_csum;
1112 /*
1113 * Deduce Ethernet FCS from hardware generated IP payload checksum.
1114 * IP checksum is based on 16-bit one's complement algorithm.
1115 * To deduce a value from checksum is equal to add its inversion.
1116 * If the IP payload len is odd, the inversed FCS should also
1117 * begin from odd address and leave first byte zero.
1118 */
1119 if (skb->len % 2) {
1120 fcs[0] = 0;
1121 for (i = 0; i < ETH_FCS_LEN; i++)
1122 fcs[i + 1] = ~skb->data[skb->len + i];
1123 skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
1124 } else {
1125 for (i = 0; i < ETH_FCS_LEN; i++)
1126 fcs[i] = ~skb->data[skb->len + i];
1127 skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
1128 }
1129 skb->ip_summed = CHECKSUM_COMPLETE;
1130 }
697#endif 1131#endif
698 1132
699 netif_rx(skb); 1133 netif_rx(skb);
700 dev->stats.rx_packets++; 1134 dev->stats.rx_packets++;
701 dev->stats.rx_bytes += len; 1135 dev->stats.rx_bytes += len;
1136out:
702 current_rx_ptr->status.status_word = 0x00000000; 1137 current_rx_ptr->status.status_word = 0x00000000;
703 current_rx_ptr = current_rx_ptr->next; 1138 current_rx_ptr = current_rx_ptr->next;
704
705out:
706 return;
707} 1139}
708 1140
709/* interrupt routine to handle rx and error signal */ 1141/* interrupt routine to handle rx and error signal */
@@ -755,8 +1187,9 @@ static void bfin_mac_disable(void)
755/* 1187/*
756 * Enable Interrupts, Receive, and Transmit 1188 * Enable Interrupts, Receive, and Transmit
757 */ 1189 */
758static void bfin_mac_enable(void) 1190static int bfin_mac_enable(void)
759{ 1191{
1192 int ret;
760 u32 opmode; 1193 u32 opmode;
761 1194
762 pr_debug("%s: %s\n", DRV_NAME, __func__); 1195 pr_debug("%s: %s\n", DRV_NAME, __func__);
@@ -766,7 +1199,9 @@ static void bfin_mac_enable(void)
766 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config); 1199 bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);
767 1200
768 /* Wait MII done */ 1201 /* Wait MII done */
769 bfin_mdio_poll(); 1202 ret = bfin_mdio_poll();
1203 if (ret)
1204 return ret;
770 1205
771 /* We enable only RX here */ 1206 /* We enable only RX here */
772 /* ASTP : Enable Automatic Pad Stripping 1207 /* ASTP : Enable Automatic Pad Stripping
@@ -790,6 +1225,8 @@ static void bfin_mac_enable(void)
790#endif 1225#endif
791 /* Turn on the EMAC rx */ 1226 /* Turn on the EMAC rx */
792 bfin_write_EMAC_OPMODE(opmode); 1227 bfin_write_EMAC_OPMODE(opmode);
1228
1229 return 0;
793} 1230}
794 1231
795/* Our watchdog timed out. Called by the networking layer */ 1232/* Our watchdog timed out. Called by the networking layer */
@@ -805,21 +1242,21 @@ static void bfin_mac_timeout(struct net_device *dev)
805 bfin_mac_enable(); 1242 bfin_mac_enable();
806 1243
807 /* We can accept TX packets again */ 1244 /* We can accept TX packets again */
808 dev->trans_start = jiffies; 1245 dev->trans_start = jiffies; /* prevent tx timeout */
809 netif_wake_queue(dev); 1246 netif_wake_queue(dev);
810} 1247}
811 1248
812static void bfin_mac_multicast_hash(struct net_device *dev) 1249static void bfin_mac_multicast_hash(struct net_device *dev)
813{ 1250{
814 u32 emac_hashhi, emac_hashlo; 1251 u32 emac_hashhi, emac_hashlo;
815 struct dev_mc_list *dmi; 1252 struct netdev_hw_addr *ha;
816 char *addrs; 1253 char *addrs;
817 u32 crc; 1254 u32 crc;
818 1255
819 emac_hashhi = emac_hashlo = 0; 1256 emac_hashhi = emac_hashlo = 0;
820 1257
821 netdev_for_each_mc_addr(dmi, dev) { 1258 netdev_for_each_mc_addr(ha, dev) {
822 addrs = dmi->dmi_addr; 1259 addrs = ha->addr;
823 1260
824 /* skip non-multicast addresses */ 1261 /* skip non-multicast addresses */
825 if (!(*addrs & 1)) 1262 if (!(*addrs & 1))
@@ -836,8 +1273,6 @@ static void bfin_mac_multicast_hash(struct net_device *dev)
836 1273
837 bfin_write_EMAC_HASHHI(emac_hashhi); 1274 bfin_write_EMAC_HASHHI(emac_hashhi);
838 bfin_write_EMAC_HASHLO(emac_hashlo); 1275 bfin_write_EMAC_HASHLO(emac_hashlo);
839
840 return;
841} 1276}
842 1277
843/* 1278/*
@@ -853,7 +1288,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
853 if (dev->flags & IFF_PROMISC) { 1288 if (dev->flags & IFF_PROMISC) {
854 printk(KERN_INFO "%s: set to promisc mode\n", dev->name); 1289 printk(KERN_INFO "%s: set to promisc mode\n", dev->name);
855 sysctl = bfin_read_EMAC_OPMODE(); 1290 sysctl = bfin_read_EMAC_OPMODE();
856 sysctl |= RAF; 1291 sysctl |= PR;
857 bfin_write_EMAC_OPMODE(sysctl); 1292 bfin_write_EMAC_OPMODE(sysctl);
858 } else if (dev->flags & IFF_ALLMULTI) { 1293 } else if (dev->flags & IFF_ALLMULTI) {
859 /* accept all multicast */ 1294 /* accept all multicast */
@@ -874,6 +1309,16 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
874 } 1309 }
875} 1310}
876 1311
1312static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
1313{
1314 switch (cmd) {
1315 case SIOCSHWTSTAMP:
1316 return bfin_mac_hwtstamp_ioctl(netdev, ifr, cmd);
1317 default:
1318 return -EOPNOTSUPP;
1319 }
1320}
1321
877/* 1322/*
878 * this puts the device in an inactive state 1323 * this puts the device in an inactive state
879 */ 1324 */
@@ -894,7 +1339,7 @@ static void bfin_mac_shutdown(struct net_device *dev)
894static int bfin_mac_open(struct net_device *dev) 1339static int bfin_mac_open(struct net_device *dev)
895{ 1340{
896 struct bfin_mac_local *lp = netdev_priv(dev); 1341 struct bfin_mac_local *lp = netdev_priv(dev);
897 int retval; 1342 int ret;
898 pr_debug("%s: %s\n", dev->name, __func__); 1343 pr_debug("%s: %s\n", dev->name, __func__);
899 1344
900 /* 1345 /*
@@ -908,18 +1353,21 @@ static int bfin_mac_open(struct net_device *dev)
908 } 1353 }
909 1354
910 /* initial rx and tx list */ 1355 /* initial rx and tx list */
911 retval = desc_list_init(); 1356 ret = desc_list_init();
912 1357 if (ret)
913 if (retval) 1358 return ret;
914 return retval;
915 1359
916 phy_start(lp->phydev); 1360 phy_start(lp->phydev);
917 phy_write(lp->phydev, MII_BMCR, BMCR_RESET); 1361 phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
918 setup_system_regs(dev); 1362 setup_system_regs(dev);
919 setup_mac_addr(dev->dev_addr); 1363 setup_mac_addr(dev->dev_addr);
1364
920 bfin_mac_disable(); 1365 bfin_mac_disable();
921 bfin_mac_enable(); 1366 ret = bfin_mac_enable();
1367 if (ret)
1368 return ret;
922 pr_debug("hardware init finished\n"); 1369 pr_debug("hardware init finished\n");
1370
923 netif_start_queue(dev); 1371 netif_start_queue(dev);
924 netif_carrier_on(dev); 1372 netif_carrier_on(dev);
925 1373
@@ -958,6 +1406,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
958 .ndo_set_mac_address = bfin_mac_set_mac_address, 1406 .ndo_set_mac_address = bfin_mac_set_mac_address,
959 .ndo_tx_timeout = bfin_mac_timeout, 1407 .ndo_tx_timeout = bfin_mac_timeout,
960 .ndo_set_multicast_list = bfin_mac_set_multicast_list, 1408 .ndo_set_multicast_list = bfin_mac_set_multicast_list,
1409 .ndo_do_ioctl = bfin_mac_ioctl,
961 .ndo_validate_addr = eth_validate_addr, 1410 .ndo_validate_addr = eth_validate_addr,
962 .ndo_change_mtu = eth_change_mtu, 1411 .ndo_change_mtu = eth_change_mtu,
963#ifdef CONFIG_NET_POLL_CONTROLLER 1412#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1017,6 +1466,11 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1017 } 1466 }
1018 pd = pdev->dev.platform_data; 1467 pd = pdev->dev.platform_data;
1019 lp->mii_bus = platform_get_drvdata(pd); 1468 lp->mii_bus = platform_get_drvdata(pd);
1469 if (!lp->mii_bus) {
1470 dev_err(&pdev->dev, "Cannot get mii_bus!\n");
1471 rc = -ENODEV;
1472 goto out_err_mii_bus_probe;
1473 }
1020 lp->mii_bus->priv = ndev; 1474 lp->mii_bus->priv = ndev;
1021 1475
1022 rc = mii_probe(ndev); 1476 rc = mii_probe(ndev);
@@ -1049,6 +1503,8 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1049 goto out_err_reg_ndev; 1503 goto out_err_reg_ndev;
1050 } 1504 }
1051 1505
1506 bfin_mac_hwtstamp_init(ndev);
1507
1052 /* now, print out the card info, in a short format.. */ 1508 /* now, print out the card info, in a short format.. */
1053 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1509 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1054 1510
@@ -1060,6 +1516,7 @@ out_err_request_irq:
1060out_err_mii_probe: 1516out_err_mii_probe:
1061 mdiobus_unregister(lp->mii_bus); 1517 mdiobus_unregister(lp->mii_bus);
1062 mdiobus_free(lp->mii_bus); 1518 mdiobus_free(lp->mii_bus);
1519out_err_mii_bus_probe:
1063 peripheral_free_list(pin_req); 1520 peripheral_free_list(pin_req);
1064out_err_probe_mac: 1521out_err_probe_mac:
1065 platform_set_drvdata(pdev, NULL); 1522 platform_set_drvdata(pdev, NULL);
@@ -1092,9 +1549,16 @@ static int __devexit bfin_mac_remove(struct platform_device *pdev)
1092static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) 1549static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1093{ 1550{
1094 struct net_device *net_dev = platform_get_drvdata(pdev); 1551 struct net_device *net_dev = platform_get_drvdata(pdev);
1552 struct bfin_mac_local *lp = netdev_priv(net_dev);
1095 1553
1096 if (netif_running(net_dev)) 1554 if (lp->wol) {
1097 bfin_mac_close(net_dev); 1555 bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
1556 bfin_write_EMAC_WKUP_CTL(MPKE);
1557 enable_irq_wake(IRQ_MAC_WAKEDET);
1558 } else {
1559 if (netif_running(net_dev))
1560 bfin_mac_close(net_dev);
1561 }
1098 1562
1099 return 0; 1563 return 0;
1100} 1564}
@@ -1102,9 +1566,16 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1102static int bfin_mac_resume(struct platform_device *pdev) 1566static int bfin_mac_resume(struct platform_device *pdev)
1103{ 1567{
1104 struct net_device *net_dev = platform_get_drvdata(pdev); 1568 struct net_device *net_dev = platform_get_drvdata(pdev);
1569 struct bfin_mac_local *lp = netdev_priv(net_dev);
1105 1570
1106 if (netif_running(net_dev)) 1571 if (lp->wol) {
1107 bfin_mac_open(net_dev); 1572 bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
1573 bfin_write_EMAC_WKUP_CTL(0);
1574 disable_irq_wake(IRQ_MAC_WAKEDET);
1575 } else {
1576 if (netif_running(net_dev))
1577 bfin_mac_open(net_dev);
1578 }
1108 1579
1109 return 0; 1580 return 0;
1110} 1581}
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index 052b5dce3e3c..1ae7b82ceeee 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -7,6 +7,12 @@
7 * 7 *
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10#ifndef _BFIN_MAC_H_
11#define _BFIN_MAC_H_
12
13#include <linux/net_tstamp.h>
14#include <linux/clocksource.h>
15#include <linux/timecompare.h>
10 16
11#define BFIN_MAC_CSUM_OFFLOAD 17#define BFIN_MAC_CSUM_OFFLOAD
12 18
@@ -60,6 +66,9 @@ struct bfin_mac_local {
60 unsigned char Mac[6]; /* MAC address of the board */ 66 unsigned char Mac[6]; /* MAC address of the board */
61 spinlock_t lock; 67 spinlock_t lock;
62 68
69 int wol; /* Wake On Lan */
70 int irq_wake_requested;
71
63 /* MII and PHY stuffs */ 72 /* MII and PHY stuffs */
64 int old_link; /* used by bf537_adjust_link */ 73 int old_link; /* used by bf537_adjust_link */
65 int old_speed; 74 int old_speed;
@@ -67,6 +76,15 @@ struct bfin_mac_local {
67 76
68 struct phy_device *phydev; 77 struct phy_device *phydev;
69 struct mii_bus *mii_bus; 78 struct mii_bus *mii_bus;
79
80#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
81 struct cyclecounter cycles;
82 struct timecounter clock;
83 struct timecompare compare;
84 struct hwtstamp_config stamp_cfg;
85#endif
70}; 86};
71 87
72extern void bfin_get_ether_addr(char *addr); 88extern void bfin_get_ether_addr(char *addr);
89
90#endif
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007f1991..39250b2ca886 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -167,7 +167,6 @@ static inline void
167dbdma_st32(volatile __u32 __iomem *a, unsigned long x) 167dbdma_st32(volatile __u32 __iomem *a, unsigned long x)
168{ 168{
169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); 169 __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory");
170 return;
171} 170}
172 171
173static inline unsigned long 172static inline unsigned long
@@ -382,8 +381,6 @@ bmac_init_registers(struct net_device *dev)
382 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); 381 bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets);
383 382
384 bmwrite(dev, INTDISABLE, EnableNormal); 383 bmwrite(dev, INTDISABLE, EnableNormal);
385
386 return;
387} 384}
388 385
389#if 0 386#if 0
@@ -972,7 +969,7 @@ bmac_remove_multi(struct net_device *dev,
972 */ 969 */
973static void bmac_set_multicast(struct net_device *dev) 970static void bmac_set_multicast(struct net_device *dev)
974{ 971{
975 struct dev_mc_list *dmi; 972 struct netdev_hw_addr *ha;
976 struct bmac_data *bp = netdev_priv(dev); 973 struct bmac_data *bp = netdev_priv(dev);
977 int num_addrs = netdev_mc_count(dev); 974 int num_addrs = netdev_mc_count(dev);
978 unsigned short rx_cfg; 975 unsigned short rx_cfg;
@@ -1001,8 +998,8 @@ static void bmac_set_multicast(struct net_device *dev)
1001 rx_cfg = bmac_rx_on(dev, 0, 0); 998 rx_cfg = bmac_rx_on(dev, 0, 0);
1002 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 999 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1003 } else { 1000 } else {
1004 netdev_for_each_mc_addr(dmi, dev) 1001 netdev_for_each_mc_addr(ha, dev)
1005 bmac_addhash(bp, dmi->dmi_addr); 1002 bmac_addhash(bp, ha->addr);
1006 bmac_update_hash_table_mask(dev, bp); 1003 bmac_update_hash_table_mask(dev, bp);
1007 rx_cfg = bmac_rx_on(dev, 1, 0); 1004 rx_cfg = bmac_rx_on(dev, 1, 0);
1008 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1005 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1013,7 @@ static void bmac_set_multicast(struct net_device *dev)
1016 1013
1017static void bmac_set_multicast(struct net_device *dev) 1014static void bmac_set_multicast(struct net_device *dev)
1018{ 1015{
1019 struct dev_mc_list *dmi; 1016 struct netdev_hw_addr *ha;
1020 char *addrs; 1017 char *addrs;
1021 int i; 1018 int i;
1022 unsigned short rx_cfg; 1019 unsigned short rx_cfg;
@@ -1040,8 +1037,8 @@ static void bmac_set_multicast(struct net_device *dev)
1040 1037
1041 for(i = 0; i < 4; i++) hash_table[i] = 0; 1038 for(i = 0; i < 4; i++) hash_table[i] = 0;
1042 1039
1043 netdev_for_each_mc_addr(dmi, dev) { 1040 netdev_for_each_mc_addr(ha, dev) {
1044 addrs = dmi->dmi_addr; 1041 addrs = ha->addr;
1045 1042
1046 if(!(*addrs & 1)) 1043 if(!(*addrs & 1))
1047 continue; 1044 continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a257babd1bb4..188e356c30a3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -58,11 +58,11 @@
58#include "bnx2_fw.h" 58#include "bnx2_fw.h"
59 59
60#define DRV_MODULE_NAME "bnx2" 60#define DRV_MODULE_NAME "bnx2"
61#define DRV_MODULE_VERSION "2.0.8" 61#define DRV_MODULE_VERSION "2.0.15"
62#define DRV_MODULE_RELDATE "Feb 15, 2010" 62#define DRV_MODULE_RELDATE "May 4, 2010"
63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw" 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw" 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw" 65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw" 66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw" 67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68 68
@@ -651,34 +651,32 @@ bnx2_napi_enable(struct bnx2 *bp)
651} 651}
652 652
653static void 653static void
654bnx2_netif_stop(struct bnx2 *bp) 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
655{ 655{
656 bnx2_cnic_stop(bp); 656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
657 if (netif_running(bp->dev)) { 658 if (netif_running(bp->dev)) {
658 int i;
659
660 bnx2_napi_disable(bp); 659 bnx2_napi_disable(bp);
661 netif_tx_disable(bp->dev); 660 netif_tx_disable(bp->dev);
662 /* prevent tx timeout */
663 for (i = 0; i < bp->dev->num_tx_queues; i++) {
664 struct netdev_queue *txq;
665
666 txq = netdev_get_tx_queue(bp->dev, i);
667 txq->trans_start = jiffies;
668 }
669 } 661 }
670 bnx2_disable_int_sync(bp); 662 bnx2_disable_int_sync(bp);
663 netif_carrier_off(bp->dev); /* prevent tx timeout */
671} 664}
672 665
673static void 666static void
674bnx2_netif_start(struct bnx2 *bp) 667bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
675{ 668{
676 if (atomic_dec_and_test(&bp->intr_sem)) { 669 if (atomic_dec_and_test(&bp->intr_sem)) {
677 if (netif_running(bp->dev)) { 670 if (netif_running(bp->dev)) {
678 netif_tx_wake_all_queues(bp->dev); 671 netif_tx_wake_all_queues(bp->dev);
672 spin_lock_bh(&bp->phy_lock);
673 if (bp->link_up)
674 netif_carrier_on(bp->dev);
675 spin_unlock_bh(&bp->phy_lock);
679 bnx2_napi_enable(bp); 676 bnx2_napi_enable(bp);
680 bnx2_enable_int(bp); 677 bnx2_enable_int(bp);
681 bnx2_cnic_start(bp); 678 if (start_cnic)
679 bnx2_cnic_start(bp);
682 } 680 }
683 } 681 }
684} 682}
@@ -2670,7 +2668,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670 } 2668 }
2671 2669
2672 rx_pg->page = page; 2670 rx_pg->page = page;
2673 pci_unmap_addr_set(rx_pg, mapping, mapping); 2671 dma_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2672 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2673 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0; 2674 return 0;
@@ -2685,7 +2683,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2685 if (!page) 2683 if (!page)
2686 return; 2684 return;
2687 2685
2688 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2686 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE); 2687 PCI_DMA_FROMDEVICE);
2690 2688
2691 __free_page(page); 2689 __free_page(page);
@@ -2717,7 +2715,8 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2717 } 2715 }
2718 2716
2719 rx_buf->skb = skb; 2717 rx_buf->skb = skb;
2720 pci_unmap_addr_set(rx_buf, mapping, mapping); 2718 rx_buf->desc = (struct l2_fhdr *) skb->data;
2719 dma_unmap_addr_set(rx_buf, mapping, mapping);
2721 2720
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2721 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2722 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2816,7 +2815,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2816 } 2815 }
2817 } 2816 }
2818 2817
2819 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2818 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE); 2819 skb_headlen(skb), PCI_DMA_TODEVICE);
2821 2820
2822 tx_buf->skb = NULL; 2821 tx_buf->skb = NULL;
@@ -2826,7 +2825,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2826 sw_cons = NEXT_TX_BD(sw_cons); 2825 sw_cons = NEXT_TX_BD(sw_cons);
2827 2826
2828 pci_unmap_page(bp->pdev, 2827 pci_unmap_page(bp->pdev,
2829 pci_unmap_addr( 2828 dma_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2829 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping), 2830 mapping),
2832 skb_shinfo(skb)->frags[i].size, 2831 skb_shinfo(skb)->frags[i].size,
@@ -2908,8 +2907,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2908 if (prod != cons) { 2907 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page; 2908 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL; 2909 cons_rx_pg->page = NULL;
2911 pci_unmap_addr_set(prod_rx_pg, mapping, 2910 dma_unmap_addr_set(prod_rx_pg, mapping,
2912 pci_unmap_addr(cons_rx_pg, mapping)); 2911 dma_unmap_addr(cons_rx_pg, mapping));
2913 2912
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2913 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2914 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2933,18 +2932,19 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2932 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934 2933
2935 pci_dma_sync_single_for_device(bp->pdev, 2934 pci_dma_sync_single_for_device(bp->pdev,
2936 pci_unmap_addr(cons_rx_buf, mapping), 2935 dma_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2936 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938 2937
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2938 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2940 2939
2941 prod_rx_buf->skb = skb; 2940 prod_rx_buf->skb = skb;
2941 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2942 2942
2943 if (cons == prod) 2943 if (cons == prod)
2944 return; 2944 return;
2945 2945
2946 pci_unmap_addr_set(prod_rx_buf, mapping, 2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2947 pci_unmap_addr(cons_rx_buf, mapping)); 2947 dma_unmap_addr(cons_rx_buf, mapping));
2948 2948
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3017,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3017 /* Don't unmap yet. If we're unable to allocate a new 3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr. 3018 * page, we need to recycle the page and the DMA addr.
3019 */ 3019 */
3020 mapping_old = pci_unmap_addr(rx_pg, mapping); 3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1) 3021 if (i == pages - 1)
3022 frag_len -= 4; 3022 frag_len -= 4;
3023 3023
@@ -3072,6 +3072,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod; 3072 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3073 struct l2_fhdr *rx_hdr; 3073 struct l2_fhdr *rx_hdr;
3074 int rx_pkt = 0, pg_ring_used = 0; 3074 int rx_pkt = 0, pg_ring_used = 0;
3075 struct pci_dev *pdev = bp->pdev;
3075 3076
3076 hw_cons = bnx2_get_hw_rx_cons(bnapi); 3077 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3077 sw_cons = rxr->rx_cons; 3078 sw_cons = rxr->rx_cons;
@@ -3084,7 +3085,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3084 while (sw_cons != hw_cons) { 3085 while (sw_cons != hw_cons) {
3085 unsigned int len, hdr_len; 3086 unsigned int len, hdr_len;
3086 u32 status; 3087 u32 status;
3087 struct sw_bd *rx_buf; 3088 struct sw_bd *rx_buf, *next_rx_buf;
3088 struct sk_buff *skb; 3089 struct sk_buff *skb;
3089 dma_addr_t dma_addr; 3090 dma_addr_t dma_addr;
3090 u16 vtag = 0; 3091 u16 vtag = 0;
@@ -3095,16 +3096,23 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3095 3096
3096 rx_buf = &rxr->rx_buf_ring[sw_ring_cons]; 3097 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3097 skb = rx_buf->skb; 3098 skb = rx_buf->skb;
3099 prefetchw(skb);
3098 3100
3101 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3102 next_rx_buf =
3103 &rxr->rx_buf_ring[
3104 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3105 prefetch(next_rx_buf->desc);
3106 }
3099 rx_buf->skb = NULL; 3107 rx_buf->skb = NULL;
3100 3108
3101 dma_addr = pci_unmap_addr(rx_buf, mapping); 3109 dma_addr = dma_unmap_addr(rx_buf, mapping);
3102 3110
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3111 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3112 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3105 PCI_DMA_FROMDEVICE); 3113 PCI_DMA_FROMDEVICE);
3106 3114
3107 rx_hdr = (struct l2_fhdr *) skb->data; 3115 rx_hdr = rx_buf->desc;
3108 len = rx_hdr->l2_fhdr_pkt_len; 3116 len = rx_hdr->l2_fhdr_pkt_len;
3109 status = rx_hdr->l2_fhdr_status; 3117 status = rx_hdr->l2_fhdr_status;
3110 3118
@@ -3205,10 +3213,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3205 3213
3206#ifdef BCM_VLAN 3214#ifdef BCM_VLAN
3207 if (hw_vlan) 3215 if (hw_vlan)
3208 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag); 3216 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3209 else 3217 else
3210#endif 3218#endif
3211 netif_receive_skb(skb); 3219 napi_gro_receive(&bnapi->napi, skb);
3212 3220
3213 rx_pkt++; 3221 rx_pkt++;
3214 3222
@@ -3546,7 +3554,6 @@ bnx2_set_rx_mode(struct net_device *dev)
3546 } 3554 }
3547 else { 3555 else {
3548 /* Accept one or more multicast(s). */ 3556 /* Accept one or more multicast(s). */
3549 struct dev_mc_list *mclist;
3550 u32 mc_filter[NUM_MC_HASH_REGISTERS]; 3557 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3551 u32 regidx; 3558 u32 regidx;
3552 u32 bit; 3559 u32 bit;
@@ -3554,8 +3561,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3554 3561
3555 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3562 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3556 3563
3557 netdev_for_each_mc_addr(mclist, dev) { 3564 netdev_for_each_mc_addr(ha, dev) {
3558 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3565 crc = ether_crc_le(ETH_ALEN, ha->addr);
3559 bit = crc & 0xff; 3566 bit = crc & 0xff;
3560 regidx = (bit & 0xe0) >> 5; 3567 regidx = (bit & 0xe0) >> 5;
3561 bit &= 0x1f; 3568 bit &= 0x1f;
@@ -4759,8 +4766,12 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4759 rc = bnx2_alloc_bad_rbuf(bp); 4766 rc = bnx2_alloc_bad_rbuf(bp);
4760 } 4767 }
4761 4768
4762 if (bp->flags & BNX2_FLAG_USING_MSIX) 4769 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4763 bnx2_setup_msix_tbl(bp); 4770 bnx2_setup_msix_tbl(bp);
4771 /* Prevent MSIX table reads and write from timing out */
4772 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4773 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4774 }
4764 4775
4765 return rc; 4776 return rc;
4766} 4777}
@@ -5312,7 +5323,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5312 } 5323 }
5313 5324
5314 pci_unmap_single(bp->pdev, 5325 pci_unmap_single(bp->pdev,
5315 pci_unmap_addr(tx_buf, mapping), 5326 dma_unmap_addr(tx_buf, mapping),
5316 skb_headlen(skb), 5327 skb_headlen(skb),
5317 PCI_DMA_TODEVICE); 5328 PCI_DMA_TODEVICE);
5318 5329
@@ -5323,7 +5334,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5323 for (k = 0; k < last; k++, j++) { 5334 for (k = 0; k < last; k++, j++) {
5324 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5335 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325 pci_unmap_page(bp->pdev, 5336 pci_unmap_page(bp->pdev,
5326 pci_unmap_addr(tx_buf, mapping), 5337 dma_unmap_addr(tx_buf, mapping),
5327 skb_shinfo(skb)->frags[k].size, 5338 skb_shinfo(skb)->frags[k].size,
5328 PCI_DMA_TODEVICE); 5339 PCI_DMA_TODEVICE);
5329 } 5340 }
@@ -5353,7 +5364,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5353 continue; 5364 continue;
5354 5365
5355 pci_unmap_single(bp->pdev, 5366 pci_unmap_single(bp->pdev,
5356 pci_unmap_addr(rx_buf, mapping), 5367 dma_unmap_addr(rx_buf, mapping),
5357 bp->rx_buf_use_size, 5368 bp->rx_buf_use_size,
5358 PCI_DMA_FROMDEVICE); 5369 PCI_DMA_FROMDEVICE);
5359 5370
@@ -5759,11 +5770,11 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5759 rx_buf = &rxr->rx_buf_ring[rx_start_idx]; 5770 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5760 rx_skb = rx_buf->skb; 5771 rx_skb = rx_buf->skb;
5761 5772
5762 rx_hdr = (struct l2_fhdr *) rx_skb->data; 5773 rx_hdr = rx_buf->desc;
5763 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5774 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5764 5775
5765 pci_dma_sync_single_for_cpu(bp->pdev, 5776 pci_dma_sync_single_for_cpu(bp->pdev,
5766 pci_unmap_addr(rx_buf, mapping), 5777 dma_unmap_addr(rx_buf, mapping),
5767 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5778 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5768 5779
5769 if (rx_hdr->l2_fhdr_status & 5780 if (rx_hdr->l2_fhdr_status &
@@ -6273,12 +6284,12 @@ bnx2_reset_task(struct work_struct *work)
6273 return; 6284 return;
6274 } 6285 }
6275 6286
6276 bnx2_netif_stop(bp); 6287 bnx2_netif_stop(bp, true);
6277 6288
6278 bnx2_init_nic(bp, 1); 6289 bnx2_init_nic(bp, 1);
6279 6290
6280 atomic_set(&bp->intr_sem, 1); 6291 atomic_set(&bp->intr_sem, 1);
6281 bnx2_netif_start(bp); 6292 bnx2_netif_start(bp, true);
6282 rtnl_unlock(); 6293 rtnl_unlock();
6283} 6294}
6284 6295
@@ -6286,14 +6297,23 @@ static void
6286bnx2_dump_state(struct bnx2 *bp) 6297bnx2_dump_state(struct bnx2 *bp)
6287{ 6298{
6288 struct net_device *dev = bp->dev; 6299 struct net_device *dev = bp->dev;
6300 u32 mcp_p0, mcp_p1;
6289 6301
6290 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem)); 6302 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6291 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n", 6303 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6292 REG_RD(bp, BNX2_EMAC_TX_STATUS), 6304 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6305 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6306 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6293 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL)); 6307 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6309 mcp_p0 = BNX2_MCP_STATE_P0;
6310 mcp_p1 = BNX2_MCP_STATE_P1;
6311 } else {
6312 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6313 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6314 }
6294 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n", 6315 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6295 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0), 6316 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6296 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6297 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n", 6317 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6298 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS)); 6318 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6299 if (bp->flags & BNX2_FLAG_USING_MSIX) 6319 if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6320,7 +6340,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6320 struct bnx2 *bp = netdev_priv(dev); 6340 struct bnx2 *bp = netdev_priv(dev);
6321 6341
6322 if (netif_running(dev)) 6342 if (netif_running(dev))
6323 bnx2_netif_stop(bp); 6343 bnx2_netif_stop(bp, false);
6324 6344
6325 bp->vlgrp = vlgrp; 6345 bp->vlgrp = vlgrp;
6326 6346
@@ -6331,7 +6351,7 @@ bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6331 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN) 6351 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6332 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1); 6352 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6333 6353
6334 bnx2_netif_start(bp); 6354 bnx2_netif_start(bp, false);
6335} 6355}
6336#endif 6356#endif
6337 6357
@@ -6423,7 +6443,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6423 6443
6424 tx_buf = &txr->tx_buf_ring[ring_prod]; 6444 tx_buf = &txr->tx_buf_ring[ring_prod];
6425 tx_buf->skb = skb; 6445 tx_buf->skb = skb;
6426 pci_unmap_addr_set(tx_buf, mapping, mapping); 6446 dma_unmap_addr_set(tx_buf, mapping, mapping);
6427 6447
6428 txbd = &txr->tx_desc_ring[ring_prod]; 6448 txbd = &txr->tx_desc_ring[ring_prod];
6429 6449
@@ -6448,7 +6468,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6448 len, PCI_DMA_TODEVICE); 6468 len, PCI_DMA_TODEVICE);
6449 if (pci_dma_mapping_error(bp->pdev, mapping)) 6469 if (pci_dma_mapping_error(bp->pdev, mapping))
6450 goto dma_error; 6470 goto dma_error;
6451 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6471 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6452 mapping); 6472 mapping);
6453 6473
6454 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6474 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6485,7 +6505,7 @@ dma_error:
6485 ring_prod = TX_RING_IDX(prod); 6505 ring_prod = TX_RING_IDX(prod);
6486 tx_buf = &txr->tx_buf_ring[ring_prod]; 6506 tx_buf = &txr->tx_buf_ring[ring_prod];
6487 tx_buf->skb = NULL; 6507 tx_buf->skb = NULL;
6488 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6508 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6489 skb_headlen(skb), PCI_DMA_TODEVICE); 6509 skb_headlen(skb), PCI_DMA_TODEVICE);
6490 6510
6491 /* unmap remaining mapped pages */ 6511 /* unmap remaining mapped pages */
@@ -6493,7 +6513,7 @@ dma_error:
6493 prod = NEXT_TX_BD(prod); 6513 prod = NEXT_TX_BD(prod);
6494 ring_prod = TX_RING_IDX(prod); 6514 ring_prod = TX_RING_IDX(prod);
6495 tx_buf = &txr->tx_buf_ring[ring_prod]; 6515 tx_buf = &txr->tx_buf_ring[ring_prod];
6496 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6516 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6497 skb_shinfo(skb)->frags[i].size, 6517 skb_shinfo(skb)->frags[i].size,
6498 PCI_DMA_TODEVICE); 6518 PCI_DMA_TODEVICE);
6499 } 6519 }
@@ -7051,9 +7071,9 @@ bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7051 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS; 7071 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7052 7072
7053 if (netif_running(bp->dev)) { 7073 if (netif_running(bp->dev)) {
7054 bnx2_netif_stop(bp); 7074 bnx2_netif_stop(bp, true);
7055 bnx2_init_nic(bp, 0); 7075 bnx2_init_nic(bp, 0);
7056 bnx2_netif_start(bp); 7076 bnx2_netif_start(bp, true);
7057 } 7077 }
7058 7078
7059 return 0; 7079 return 0;
@@ -7083,7 +7103,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7083 /* Reset will erase chipset stats; save them */ 7103 /* Reset will erase chipset stats; save them */
7084 bnx2_save_stats(bp); 7104 bnx2_save_stats(bp);
7085 7105
7086 bnx2_netif_stop(bp); 7106 bnx2_netif_stop(bp, true);
7087 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7107 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7088 bnx2_free_skbs(bp); 7108 bnx2_free_skbs(bp);
7089 bnx2_free_mem(bp); 7109 bnx2_free_mem(bp);
@@ -7111,7 +7131,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7111 bnx2_setup_cnic_irq_info(bp); 7131 bnx2_setup_cnic_irq_info(bp);
7112 mutex_unlock(&bp->cnic_lock); 7132 mutex_unlock(&bp->cnic_lock);
7113#endif 7133#endif
7114 bnx2_netif_start(bp); 7134 bnx2_netif_start(bp, true);
7115 } 7135 }
7116 return 0; 7136 return 0;
7117} 7137}
@@ -7364,7 +7384,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7364 if (etest->flags & ETH_TEST_FL_OFFLINE) { 7384 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7365 int i; 7385 int i;
7366 7386
7367 bnx2_netif_stop(bp); 7387 bnx2_netif_stop(bp, true);
7368 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG); 7388 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7369 bnx2_free_skbs(bp); 7389 bnx2_free_skbs(bp);
7370 7390
@@ -7383,7 +7403,7 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7383 bnx2_shutdown_chip(bp); 7403 bnx2_shutdown_chip(bp);
7384 else { 7404 else {
7385 bnx2_init_nic(bp, 1); 7405 bnx2_init_nic(bp, 1);
7386 bnx2_netif_start(bp); 7406 bnx2_netif_start(bp, true);
7387 } 7407 }
7388 7408
7389 /* wait for link up */ 7409 /* wait for link up */
@@ -8291,7 +8311,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8291 memcpy(dev->dev_addr, bp->mac_addr, 6); 8311 memcpy(dev->dev_addr, bp->mac_addr, 6);
8292 memcpy(dev->perm_addr, bp->mac_addr, 6); 8312 memcpy(dev->perm_addr, bp->mac_addr, 6);
8293 8313
8294 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 8314 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8295 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG); 8315 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8296 if (CHIP_NUM(bp) == CHIP_NUM_5709) { 8316 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8297 dev->features |= NETIF_F_IPV6_CSUM; 8317 dev->features |= NETIF_F_IPV6_CSUM;
@@ -8377,7 +8397,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8377 return 0; 8397 return 0;
8378 8398
8379 flush_scheduled_work(); 8399 flush_scheduled_work();
8380 bnx2_netif_stop(bp); 8400 bnx2_netif_stop(bp, true);
8381 netif_device_detach(dev); 8401 netif_device_detach(dev);
8382 del_timer_sync(&bp->timer); 8402 del_timer_sync(&bp->timer);
8383 bnx2_shutdown_chip(bp); 8403 bnx2_shutdown_chip(bp);
@@ -8399,7 +8419,7 @@ bnx2_resume(struct pci_dev *pdev)
8399 bnx2_set_power_state(bp, PCI_D0); 8419 bnx2_set_power_state(bp, PCI_D0);
8400 netif_device_attach(dev); 8420 netif_device_attach(dev);
8401 bnx2_init_nic(bp, 1); 8421 bnx2_init_nic(bp, 1);
8402 bnx2_netif_start(bp); 8422 bnx2_netif_start(bp, true);
8403 return 0; 8423 return 0;
8404} 8424}
8405 8425
@@ -8426,7 +8446,7 @@ static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8426 } 8446 }
8427 8447
8428 if (netif_running(dev)) { 8448 if (netif_running(dev)) {
8429 bnx2_netif_stop(bp); 8449 bnx2_netif_stop(bp, true);
8430 del_timer_sync(&bp->timer); 8450 del_timer_sync(&bp->timer);
8431 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET); 8451 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8432 } 8452 }
@@ -8483,7 +8503,7 @@ static void bnx2_io_resume(struct pci_dev *pdev)
8483 8503
8484 rtnl_lock(); 8504 rtnl_lock();
8485 if (netif_running(dev)) 8505 if (netif_running(dev))
8486 bnx2_netif_start(bp); 8506 bnx2_netif_start(bp, true);
8487 8507
8488 netif_device_attach(dev); 8508 netif_device_attach(dev);
8489 rtnl_unlock(); 8509 rtnl_unlock();
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637ab..ddaa3fc99876 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6347,6 +6347,8 @@ struct l2_fhdr {
6347#define BNX2_MCP_SCRATCH 0x00160000 6347#define BNX2_MCP_SCRATCH 0x00160000
6348#define BNX2_MCP_STATE_P1 0x0016f9c8 6348#define BNX2_MCP_STATE_P1 0x0016f9c8
6349#define BNX2_MCP_STATE_P0 0x0016fdc8 6349#define BNX2_MCP_STATE_P0 0x0016fdc8
6350#define BNX2_MCP_STATE_P1_5708 0x001699c8
6351#define BNX2_MCP_STATE_P0_5708 0x00169dc8
6350 6352
6351#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH 6353#define BNX2_SHM_HDR_SIGNATURE BNX2_MCP_SCRATCH
6352#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000 6354#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK 0xffff0000
@@ -6551,17 +6553,18 @@ struct l2_fhdr {
6551 6553
6552struct sw_bd { 6554struct sw_bd {
6553 struct sk_buff *skb; 6555 struct sk_buff *skb;
6554 DECLARE_PCI_UNMAP_ADDR(mapping) 6556 struct l2_fhdr *desc;
6557 DEFINE_DMA_UNMAP_ADDR(mapping);
6555}; 6558};
6556 6559
6557struct sw_pg { 6560struct sw_pg {
6558 struct page *page; 6561 struct page *page;
6559 DECLARE_PCI_UNMAP_ADDR(mapping) 6562 DEFINE_DMA_UNMAP_ADDR(mapping);
6560}; 6563};
6561 6564
6562struct sw_tx_bd { 6565struct sw_tx_bd {
6563 struct sk_buff *skb; 6566 struct sk_buff *skb;
6564 DECLARE_PCI_UNMAP_ADDR(mapping) 6567 DEFINE_DMA_UNMAP_ADDR(mapping);
6565 unsigned short is_gso; 6568 unsigned short is_gso;
6566 unsigned short nr_frags; 6569 unsigned short nr_frags;
6567}; 6570};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a68308..8bd23687c530 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
24#define BCM_VLAN 1 24#define BCM_VLAN 1
25#endif 25#endif
26 26
27#define BNX2X_MULTI_QUEUE
28
29#define BNX2X_NEW_NAPI
30
31
32
27#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
28#define BCM_CNIC 1 34#define BCM_CNIC 1
29#include "cnic_if.h" 35#include "cnic_if.h"
30#endif 36#endif
31 37
32#define BNX2X_MULTI_QUEUE
33
34#define BNX2X_NEW_NAPI
35
36 38
39#ifdef BCM_CNIC
40#define BNX2X_MIN_MSIX_VEC_CNT 3
41#define BNX2X_MSIX_VEC_FP_START 2
42#else
43#define BNX2X_MIN_MSIX_VEC_CNT 2
44#define BNX2X_MSIX_VEC_FP_START 1
45#endif
37 46
38#include <linux/mdio.h> 47#include <linux/mdio.h>
39#include "bnx2x_reg.h" 48#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
83 __func__, __LINE__, \ 92 __func__, __LINE__, \
84 bp->dev ? (bp->dev->name) : "?", \ 93 bp->dev ? (bp->dev->name) : "?", \
85 ##__args); \ 94 ##__args); \
86} while (0) 95 } while (0)
96
97#define BNX2X_ERROR(__fmt, __args...) do { \
98 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
99 } while (0)
100
87 101
88/* before we have a dev->name use dev_info() */ 102/* before we have a dev->name use dev_info() */
89#define BNX2X_DEV_INFO(__fmt, __args...) \ 103#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
155#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 169#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
156#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 170#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
157 171
172#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
173#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
174
158#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 175#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
159#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 176#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
160 177
178#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
179 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
180
161 181
162/* fast path */ 182/* fast path */
163 183
164struct sw_rx_bd { 184struct sw_rx_bd {
165 struct sk_buff *skb; 185 struct sk_buff *skb;
166 DECLARE_PCI_UNMAP_ADDR(mapping) 186 DEFINE_DMA_UNMAP_ADDR(mapping);
167}; 187};
168 188
169struct sw_tx_bd { 189struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
176 196
177struct sw_rx_page { 197struct sw_rx_page {
178 struct page *page; 198 struct page *page;
179 DECLARE_PCI_UNMAP_ADDR(mapping) 199 DEFINE_DMA_UNMAP_ADDR(mapping);
180}; 200};
181 201
182union db_prod { 202union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
261 u32 hw_csum_err; 281 u32 hw_csum_err;
262}; 282};
263 283
264#define BNX2X_NUM_Q_STATS 11 284#define BNX2X_NUM_Q_STATS 13
265#define Q_STATS_OFFSET32(stat_name) \ 285#define Q_STATS_OFFSET32(stat_name) \
266 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) 286 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
267 287
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
767 u32 nig_timer_max; 787 u32 nig_timer_max;
768}; 788};
769 789
770#define BNX2X_NUM_STATS 41 790#define BNX2X_NUM_STATS 43
771#define STATS_OFFSET32(stat_name) \ 791#define STATS_OFFSET32(stat_name) \
772 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 792 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
773 793
@@ -818,6 +838,12 @@ struct attn_route {
818 u32 sig[4]; 838 u32 sig[4];
819}; 839};
820 840
841typedef enum {
842 BNX2X_RECOVERY_DONE,
843 BNX2X_RECOVERY_INIT,
844 BNX2X_RECOVERY_WAIT,
845} bnx2x_recovery_state_t;
846
821struct bnx2x { 847struct bnx2x {
822 /* Fields used in the tx and intr/napi performance paths 848 /* Fields used in the tx and intr/napi performance paths
823 * are grouped together in the beginning of the structure 849 * are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
835 struct pci_dev *pdev; 861 struct pci_dev *pdev;
836 862
837 atomic_t intr_sem; 863 atomic_t intr_sem;
864
865 bnx2x_recovery_state_t recovery_state;
866 int is_leader;
838#ifdef BCM_CNIC 867#ifdef BCM_CNIC
839 struct msix_entry msix_table[MAX_CONTEXT+2]; 868 struct msix_entry msix_table[MAX_CONTEXT+2];
840#else 869#else
@@ -842,7 +871,6 @@ struct bnx2x {
842#endif 871#endif
843#define INT_MODE_INTx 1 872#define INT_MODE_INTx 1
844#define INT_MODE_MSI 2 873#define INT_MODE_MSI 2
845#define INT_MODE_MSIX 3
846 874
847 int tx_ring_size; 875 int tx_ring_size;
848 876
@@ -924,8 +952,7 @@ struct bnx2x {
924 int mrrs; 952 int mrrs;
925 953
926 struct delayed_work sp_task; 954 struct delayed_work sp_task;
927 struct work_struct reset_task; 955 struct delayed_work reset_task;
928
929 struct timer_list timer; 956 struct timer_list timer;
930 int current_interval; 957 int current_interval;
931 958
@@ -961,6 +988,8 @@ struct bnx2x {
961 u16 rx_quick_cons_trip; 988 u16 rx_quick_cons_trip;
962 u16 rx_ticks_int; 989 u16 rx_ticks_int;
963 u16 rx_ticks; 990 u16 rx_ticks;
991/* Maximal coalescing timeout in us */
992#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
964 993
965 u32 lin_cnt; 994 u32 lin_cnt;
966 995
@@ -1075,6 +1104,7 @@ struct bnx2x {
1075#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) 1104#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1076#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) 1105#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1077 1106
1107 char fw_ver[32];
1078 const struct firmware *firmware; 1108 const struct firmware *firmware;
1079}; 1109};
1080 1110
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1125#define LOAD_DIAG 2 1155#define LOAD_DIAG 2
1126#define UNLOAD_NORMAL 0 1156#define UNLOAD_NORMAL 0
1127#define UNLOAD_CLOSE 1 1157#define UNLOAD_CLOSE 1
1158#define UNLOAD_RECOVERY 2
1128 1159
1129 1160
1130/* DMAE command defines */ 1161/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1152#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT 1183#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1153 1184
1154#define DMAE_LEN32_RD_MAX 0x80 1185#define DMAE_LEN32_RD_MAX 0x80
1155#define DMAE_LEN32_WR_MAX 0x400 1186#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
1156 1187
1157#define DMAE_COMP_VAL 0xe0d0d0ae 1188#define DMAE_COMP_VAL 0xe0d0d0ae
1158 1189
@@ -1294,8 +1325,12 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1294 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ 1325 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1295 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) 1326 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1296 1327
1328#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1329 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1330 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1331 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1297 1332
1298#define MULTI_FLAGS(bp) \ 1333#define RSS_FLAGS(bp) \
1299 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ 1334 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
1300 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \ 1335 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
1301 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \ 1336 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1333#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 1368#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1334#endif 1369#endif
1335 1370
1371#define BNX2X_VPD_LEN 128
1372#define VENDOR_ID_LEN 4
1373
1336/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ 1374/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1337 1375
1338#endif /* bnx2x.h */ 1376#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 760069345b11..fd1f29e0317d 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -683,7 +683,7 @@ struct drv_func_mb {
683#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000 683#define DRV_MSG_CODE_GET_MANUF_KEY 0x82000000
684#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000 684#define DRV_MSG_CODE_LOAD_L2B_PRAM 0x90000000
685 /* 685 /*
686 * The optic module verification commands requris bootcode 686 * The optic module verification commands require bootcode
687 * v5.0.6 or later 687 * v5.0.6 or later
688 */ 688 */
689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000 689#define DRV_MSG_CODE_VRFY_OPT_MDL 0xa0000000
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c359e89..ff70be898765 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause & 1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; 1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result); 1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result); 1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE && 1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; 1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617 1617
1618 bnx2x_pause_resolve(vars, pause_result); 1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n", 1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result); 1620 pause_result);
1621 } 1621 }
1622 } 1622 }
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1974 } 1974 }
1975 } 1975 }
1976 1976
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n", 1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed); 1978 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" 1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
1980 " autoneg 0x%x\n", 1980 " autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3852 SPEED_AUTO_NEG) && 3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask & 3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { 3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); 3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type, 3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD, 3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20); 3858 MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4234 ext_phy_addr, 4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD, 4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1); 4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1); 4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238 4238
4239 } else if ((params->req_line_speed == 4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) && 4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask & 4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { 4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243 4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); 4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type, 4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD, 4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0); 4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..57ff5b3bcce6 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-7" 60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/02/28" 61#define DRV_MODULE_RELDATE "2010/18/04"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 102
103static int int_mode; 103static int int_mode;
104module_param(int_mode, int, 0); 104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); 105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
106 107
107static int dropless_fc; 108static int dropless_fc;
108module_param(dropless_fc, int, 0); 109module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len) 354 u32 addr, u32 len)
354{ 355{
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
355 int offset = 0; 357 int offset = 0;
356 358
357 while (len > DMAE_LEN32_WR_MAX) { 359 while (len > dmae_wr_max) {
358 bnx2x_write_dmae(bp, phys_addr + offset, 360 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX); 361 addr + offset, dmae_wr_max);
360 offset += DMAE_LEN32_WR_MAX * 4; 362 offset += dmae_wr_max * 4;
361 len -= DMAE_LEN32_WR_MAX; 363 len -= dmae_wr_max;
362 } 364 }
363 365
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
508 510
509static void bnx2x_fw_dump(struct bnx2x *bp) 511static void bnx2x_fw_dump(struct bnx2x *bp)
510{ 512{
513 u32 addr;
511 u32 mark, offset; 514 u32 mark, offset;
512 __be32 data[9]; 515 __be32 data[9];
513 int word; 516 int word;
514 517
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 518 if (BP_NOMCP(bp)) {
516 mark = ((mark + 0x3) & ~0x3); 519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
517 pr_err("begin fw dump (mark 0x%x)\n", mark); 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
518 527
519 pr_err(""); 528 pr_err("");
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
521 for (word = 0; word < 8; word++) 530 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
523 offset + 4*word));
524 data[8] = 0x0; 532 data[8] = 0x0;
525 pr_cont("%s", (char *)data); 533 pr_cont("%s", (char *)data);
526 } 534 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
528 for (word = 0; word < 8; word++) 536 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
530 offset + 4*word));
531 data[8] = 0x0; 538 data[8] = 0x0;
532 pr_cont("%s", (char *)data); 539 pr_cont("%s", (char *)data);
533 } 540 }
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
546 553
547 /* Indices */ 554 /* Indices */
548 /* Common */ 555 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" 556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" 557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " spq_prod_idx(%u)\n", 558 " spq_prod_idx(0x%x)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554 561
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
556 for_each_queue(bp, i) { 563 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i]; 564 struct bnx2x_fastpath *fp = &bp->fp[i];
558 565
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" 567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n", 568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons, 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" 572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n", 573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge, 574 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx), 575 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index); 576 fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
573 for_each_queue(bp, i) { 580 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i]; 581 struct bnx2x_fastpath *fp = &bp->fp[i];
575 582
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" 588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), 589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index, 590 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod); 591 fp->tx_db.data.prod);
584 } 592 }
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
764 * General service functions 772 * General service functions
765 */ 773 */
766 774
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update) 810 u8 storm, u16 index, u8 op, u8 update)
769{ 811{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
842 /* unmap first bd */ 884 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 889
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
872 914
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), 917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); 918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
877 if (--nbd) 919 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 } 921 }
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1023 1065
1024 default: 1066 default:
1025 BNX2X_ERR("unexpected MC reply (%d) " 1067 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state); 1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1027 break; 1070 break;
1028 } 1071 }
1029 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1086 if (!page) 1129 if (!page)
1087 return; 1130 return;
1088 1131
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091 __free_pages(page, PAGES_PER_SGE_SHIFT); 1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 1135
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1115 if (unlikely(page == NULL)) 1158 if (unlikely(page == NULL))
1116 return -ENOMEM; 1159 return -ENOMEM;
1117 1160
1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, 1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 PCI_DMA_FROMDEVICE); 1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 __free_pages(page, PAGES_PER_SGE_SHIFT); 1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM; 1165 return -ENOMEM;
1123 } 1166 }
1124 1167
1125 sw_buf->page = page; 1168 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping); 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1127 1170
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1143 if (unlikely(skb == NULL)) 1186 if (unlikely(skb == NULL))
1144 return -ENOMEM; 1187 return -ENOMEM;
1145 1188
1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, 1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 PCI_DMA_FROMDEVICE); 1190 DMA_FROM_DEVICE);
1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149 dev_kfree_skb(skb); 1192 dev_kfree_skb(skb);
1150 return -ENOMEM; 1193 return -ENOMEM;
1151 } 1194 }
1152 1195
1153 rx_buf->skb = skb; 1196 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping); 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1155 1198
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 1218
1176 pci_dma_sync_single_for_device(bp->pdev, 1219 dma_sync_single_for_device(&bp->pdev->dev,
1177 pci_unmap_addr(cons_rx_buf, mapping), 1220 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1179 1222
1180 prod_rx_buf->skb = cons_rx_buf->skb; 1223 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping, 1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping)); 1225 dma_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd; 1226 *prod_bd = *cons_bd;
1184} 1227}
1185 1228
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1283 1326
1284 /* move empty skb from pool to prod and map it */ 1327 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289 1332
1290 /* move partial skb from cons to pool (don't unmap yet) */ 1333 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf; 1334 fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1302 1345
1303#ifdef BNX2X_STOP_ON_ERROR 1346#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue); 1347 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__ 1348#ifdef _ASM_GENERIC_INT_L64_H
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", 1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else 1350#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1331 max(frag_size, (u32)len_on_bd)); 1374 max(frag_size, (u32)len_on_bd));
1332 1375
1333#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1334 if (pages > 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx); 1379 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", 1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1361 } 1403 }
1362 1404
1363 /* Unmap the page as we r going to pass it to the stack */ 1405 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1406 dma_unmap_page(&bp->pdev->dev,
1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1366 1409
1367 /* Add one frag and update the appropriate fields in the skb */ 1410 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1389 /* Unmap skb in the pool anyway, as we are going to change 1432 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */ 1434 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1394 1437
1395 if (likely(new_skb)) { 1438 if (likely(new_skb)) {
1396 /* fix ip xsum and give it to the stack */ 1439 /* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1441#ifdef BCM_VLAN 1484#ifdef BCM_VLAN
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe && 1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe)) 1486 (!is_not_hwaccel_vlan_cqe))
1444 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe. 1488 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag)); 1489 vlan_tag), skb);
1447 else 1490 else
1448#endif 1491#endif
1449 netif_receive_skb(skb); 1492 napi_gro_receive(&fp->napi, skb);
1450 } else { 1493 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n"); 1495 " - dropping packet!\n");
@@ -1539,7 +1582,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1539 struct sw_rx_bd *rx_buf = NULL; 1582 struct sw_rx_bd *rx_buf = NULL;
1540 struct sk_buff *skb; 1583 struct sk_buff *skb;
1541 union eth_rx_cqe *cqe; 1584 union eth_rx_cqe *cqe;
1542 u8 cqe_fp_flags; 1585 u8 cqe_fp_flags, cqe_fp_status_flags;
1543 u16 len, pad; 1586 u16 len, pad;
1544 1587
1545 comp_ring_cons = RCQ_BD(sw_comp_cons); 1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
@@ -1555,6 +1598,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1555 1598
1556 cqe = &fp->rx_comp_ring[comp_ring_cons]; 1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1557 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags; 1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1558 1602
1559 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x" 1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1560 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags), 1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
@@ -1573,7 +1617,6 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1573 rx_buf = &fp->rx_buf_ring[bd_cons]; 1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1574 skb = rx_buf->skb; 1618 skb = rx_buf->skb;
1575 prefetch(skb); 1619 prefetch(skb);
1576 prefetch((u8 *)skb + 256);
1577 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len); 1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1578 pad = cqe->fast_path_cqe.placement_offset; 1621 pad = cqe->fast_path_cqe.placement_offset;
1579 1622
@@ -1620,11 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1620 } 1663 }
1621 } 1664 }
1622 1665
1623 pci_dma_sync_single_for_device(bp->pdev, 1666 dma_sync_single_for_device(&bp->pdev->dev,
1624 pci_unmap_addr(rx_buf, mapping), 1667 dma_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH, 1668 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE); 1669 DMA_FROM_DEVICE);
1627 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128); 1670 prefetch(((char *)(skb)) + 128);
1629 1671
1630 /* is this an error packet? */ 1672 /* is this an error packet? */
@@ -1665,10 +1707,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1665 1707
1666 } else 1708 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 1709 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668 pci_unmap_single(bp->pdev, 1710 dma_unmap_single(&bp->pdev->dev,
1669 pci_unmap_addr(rx_buf, mapping), 1711 dma_unmap_addr(rx_buf, mapping),
1670 bp->rx_buf_size, 1712 bp->rx_buf_size,
1671 PCI_DMA_FROMDEVICE); 1713 DMA_FROM_DEVICE);
1672 skb_reserve(skb, pad); 1714 skb_reserve(skb, pad);
1673 skb_put(skb, len); 1715 skb_put(skb, len);
1674 1716
@@ -1684,6 +1726,12 @@ reuse_rx:
1684 1726
1685 skb->protocol = eth_type_trans(skb, bp->dev); 1727 skb->protocol = eth_type_trans(skb, bp->dev);
1686 1728
1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1734
1687 skb->ip_summed = CHECKSUM_NONE; 1735 skb->ip_summed = CHECKSUM_NONE;
1688 if (bp->rx_csum) { 1736 if (bp->rx_csum) {
1689 if (likely(BNX2X_RX_CSUM_OK(cqe))) 1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
@@ -1699,11 +1747,11 @@ reuse_rx:
1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 1747 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1748 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN)) 1749 PARSING_FLAGS_VLAN))
1702 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1750 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); 1751 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1704 else 1752 else
1705#endif 1753#endif
1706 netif_receive_skb(skb); 1754 napi_gro_receive(&fp->napi, skb);
1707 1755
1708 1756
1709next_rx: 1757next_rx:
@@ -1831,8 +1879,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1831 return IRQ_HANDLED; 1879 return IRQ_HANDLED;
1832 } 1880 }
1833 1881
1834 if (status) 1882 if (unlikely(status))
1835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", 1883 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1836 status); 1884 status);
1837 1885
1838 return IRQ_HANDLED; 1886 return IRQ_HANDLED;
@@ -1900,6 +1948,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1900 int func = BP_FUNC(bp); 1948 int func = BP_FUNC(bp);
1901 u32 hw_lock_control_reg; 1949 u32 hw_lock_control_reg;
1902 1950
1951 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1903 /* Validating that the resource is within range */ 1953 /* Validating that the resource is within range */
1904 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1954 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 DP(NETIF_MSG_HW, 1955 DP(NETIF_MSG_HW,
@@ -2254,11 +2304,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2254 2304
2255static u8 bnx2x_link_test(struct bnx2x *bp) 2305static u8 bnx2x_link_test(struct bnx2x *bp)
2256{ 2306{
2257 u8 rc; 2307 u8 rc = 0;
2258 2308
2259 bnx2x_acquire_phy_lock(bp); 2309 if (!BP_NOMCP(bp)) {
2260 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2310 bnx2x_acquire_phy_lock(bp);
2261 bnx2x_release_phy_lock(bp); 2311 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312 bnx2x_release_phy_lock(bp);
2313 } else
2314 BNX2X_ERR("Bootcode is missing - can not test link\n");
2262 2315
2263 return rc; 2316 return rc;
2264} 2317}
@@ -2387,10 +2440,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2387 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater 2440 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 than zero */ 2441 than zero */
2389 m_fair_vn.vn_credit_delta = 2442 m_fair_vn.vn_credit_delta =
2390 max((u32)(vn_min_rate * (T_FAIR_COEF / 2443 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2391 (8 * bp->vn_weight_sum))), 2444 (8 * bp->vn_weight_sum))),
2392 (u32)(bp->cmng.fair_vars.fair_threshold * 2)); 2445 (bp->cmng.fair_vars.fair_threshold * 2));
2393 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", 2446 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2394 m_fair_vn.vn_credit_delta); 2447 m_fair_vn.vn_credit_delta);
2395 } 2448 }
2396 2449
@@ -2410,6 +2463,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2410/* This function is called upon link interrupt */ 2463/* This function is called upon link interrupt */
2411static void bnx2x_link_attn(struct bnx2x *bp) 2464static void bnx2x_link_attn(struct bnx2x *bp)
2412{ 2465{
2466 u32 prev_link_status = bp->link_vars.link_status;
2413 /* Make sure that we are synced with the current statistics */ 2467 /* Make sure that we are synced with the current statistics */
2414 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 2469
@@ -2442,8 +2496,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2496 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 } 2497 }
2444 2498
2445 /* indicate link status */ 2499 /* indicate link status only if link status actually changed */
2446 bnx2x_link_report(bp); 2500 if (prev_link_status != bp->link_vars.link_status)
2501 bnx2x_link_report(bp);
2447 2502
2448 if (IS_E1HMF(bp)) { 2503 if (IS_E1HMF(bp)) {
2449 int port = BP_PORT(bp); 2504 int port = BP_PORT(bp);
@@ -2560,7 +2615,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2560 return rc; 2615 return rc;
2561} 2616}
2562 2617
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); 2618static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565static void bnx2x_set_rx_mode(struct net_device *dev); 2619static void bnx2x_set_rx_mode(struct net_device *dev);
2566 2620
@@ -2696,12 +2750,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696{ 2750{
2697 struct eth_spe *spe; 2751 struct eth_spe *spe;
2698 2752
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2701 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705#ifdef BNX2X_STOP_ON_ERROR 2753#ifdef BNX2X_STOP_ON_ERROR
2706 if (unlikely(bp->panic)) 2754 if (unlikely(bp->panic))
2707 return -EIO; 2755 return -EIO;
@@ -2720,8 +2768,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2720 2768
2721 /* CID needs port number to be encoded int it */ 2769 /* CID needs port number to be encoded int it */
2722 spe->hdr.conn_and_cmd_data = 2770 spe->hdr.conn_and_cmd_data =
2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2771 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2724 HW_CID(bp, cid))); 2772 HW_CID(bp, cid));
2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2773 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2726 if (common) 2774 if (common)
2727 spe->hdr.type |= 2775 spe->hdr.type |=
@@ -2732,6 +2780,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 2780
2733 bp->spq_left--; 2781 bp->spq_left--;
2734 2782
2783 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2735 bnx2x_sp_prod_update(bp); 2790 bnx2x_sp_prod_update(bp);
2736 spin_unlock_bh(&bp->spq_lock); 2791 spin_unlock_bh(&bp->spq_lock);
2737 return 0; 2792 return 0;
@@ -2740,12 +2795,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2740/* acquire split MCP access lock register */ 2795/* acquire split MCP access lock register */
2741static int bnx2x_acquire_alr(struct bnx2x *bp) 2796static int bnx2x_acquire_alr(struct bnx2x *bp)
2742{ 2797{
2743 u32 i, j, val; 2798 u32 j, val;
2744 int rc = 0; 2799 int rc = 0;
2745 2800
2746 might_sleep(); 2801 might_sleep();
2747 i = 100; 2802 for (j = 0; j < 1000; j++) {
2748 for (j = 0; j < i*10; j++) {
2749 val = (1UL << 31); 2803 val = (1UL << 31);
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 2804 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 2805 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2819,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
2765/* release split MCP access lock register */ 2819/* release split MCP access lock register */
2766static void bnx2x_release_alr(struct bnx2x *bp) 2820static void bnx2x_release_alr(struct bnx2x *bp)
2767{ 2821{
2768 u32 val = 0; 2822 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2769
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771} 2823}
2772 2824
2773static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2825static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2875,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2823 2875
2824 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2876 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2825 aeu_mask, asserted); 2877 aeu_mask, asserted);
2826 aeu_mask &= ~(asserted & 0xff); 2878 aeu_mask &= ~(asserted & 0x3ff);
2827 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2828 2880
2829 REG_WR(bp, aeu_addr, aeu_mask); 2881 REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2962,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 bp->link_params.ext_phy_config); 2962 bp->link_params.ext_phy_config);
2911 2963
2912 /* log the failure */ 2964 /* log the failure */
2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 2965 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2914 "Please contact Dell Support for assistance.\n"); 2966 " the driver to shutdown the card to prevent permanent"
2967 " damage. Please contact OEM Support for assistance\n");
2915} 2968}
2916 2969
2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2970static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3157,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3104 } 3157 }
3105} 3158}
3106 3159
3107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3160static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3168#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3169#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170/*
3171 * should be run under rtnl lock
3172 */
3173static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174{
3175 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178 barrier();
3179 mmiowb();
3180}
3181
3182/*
3183 * should be run under rtnl lock
3184 */
3185static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186{
3187 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188 val |= (1 << 16);
3189 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190 barrier();
3191 mmiowb();
3192}
3193
3194/*
3195 * should be run under rtnl lock
3196 */
3197static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202}
3203
3204/*
3205 * should be run under rtnl lock
3206 */
3207static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208{
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215 barrier();
3216 mmiowb();
3217}
3218
3219/*
3220 * should be run under rtnl lock
3221 */
3222static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223{
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230 barrier();
3231 mmiowb();
3232
3233 return val1;
3234}
3235
3236/*
3237 * should be run under rtnl lock
3238 */
3239static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240{
3241 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242}
3243
3244static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245{
3246 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248}
3249
3250static inline void _print_next_block(int idx, const char *blk)
3251{
3252 if (idx)
3253 pr_cont(", ");
3254 pr_cont("%s", blk);
3255}
3256
3257static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258{
3259 int i = 0;
3260 u32 cur_bit = 0;
3261 for (i = 0; sig; i++) {
3262 cur_bit = ((u32)0x1 << i);
3263 if (sig & cur_bit) {
3264 switch (cur_bit) {
3265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266 _print_next_block(par_num++, "BRB");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269 _print_next_block(par_num++, "PARSER");
3270 break;
3271 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272 _print_next_block(par_num++, "TSDM");
3273 break;
3274 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275 _print_next_block(par_num++, "SEARCHER");
3276 break;
3277 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278 _print_next_block(par_num++, "TSEMI");
3279 break;
3280 }
3281
3282 /* Clear the bit */
3283 sig &= ~cur_bit;
3284 }
3285 }
3286
3287 return par_num;
3288}
3289
3290static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291{
3292 int i = 0;
3293 u32 cur_bit = 0;
3294 for (i = 0; sig; i++) {
3295 cur_bit = ((u32)0x1 << i);
3296 if (sig & cur_bit) {
3297 switch (cur_bit) {
3298 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299 _print_next_block(par_num++, "PBCLIENT");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302 _print_next_block(par_num++, "QM");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305 _print_next_block(par_num++, "XSDM");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308 _print_next_block(par_num++, "XSEMI");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311 _print_next_block(par_num++, "DOORBELLQ");
3312 break;
3313 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314 _print_next_block(par_num++, "VAUX PCI CORE");
3315 break;
3316 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317 _print_next_block(par_num++, "DEBUG");
3318 break;
3319 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "USDM");
3321 break;
3322 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323 _print_next_block(par_num++, "USEMI");
3324 break;
3325 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326 _print_next_block(par_num++, "UPB");
3327 break;
3328 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSDM");
3330 break;
3331 }
3332
3333 /* Clear the bit */
3334 sig &= ~cur_bit;
3335 }
3336 }
3337
3338 return par_num;
3339}
3340
3341static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342{
3343 int i = 0;
3344 u32 cur_bit = 0;
3345 for (i = 0; sig; i++) {
3346 cur_bit = ((u32)0x1 << i);
3347 if (sig & cur_bit) {
3348 switch (cur_bit) {
3349 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350 _print_next_block(par_num++, "CSEMI");
3351 break;
3352 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353 _print_next_block(par_num++, "PXP");
3354 break;
3355 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356 _print_next_block(par_num++,
3357 "PXPPCICLOCKCLIENT");
3358 break;
3359 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360 _print_next_block(par_num++, "CFC");
3361 break;
3362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363 _print_next_block(par_num++, "CDU");
3364 break;
3365 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366 _print_next_block(par_num++, "IGU");
3367 break;
3368 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369 _print_next_block(par_num++, "MISC");
3370 break;
3371 }
3372
3373 /* Clear the bit */
3374 sig &= ~cur_bit;
3375 }
3376 }
3377
3378 return par_num;
3379}
3380
3381static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382{
3383 int i = 0;
3384 u32 cur_bit = 0;
3385 for (i = 0; sig; i++) {
3386 cur_bit = ((u32)0x1 << i);
3387 if (sig & cur_bit) {
3388 switch (cur_bit) {
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390 _print_next_block(par_num++, "MCP ROM");
3391 break;
3392 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393 _print_next_block(par_num++, "MCP UMP RX");
3394 break;
3395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396 _print_next_block(par_num++, "MCP UMP TX");
3397 break;
3398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399 _print_next_block(par_num++, "MCP SCPAD");
3400 break;
3401 }
3402
3403 /* Clear the bit */
3404 sig &= ~cur_bit;
3405 }
3406 }
3407
3408 return par_num;
3409}
3410
3411static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412 u32 sig2, u32 sig3)
3413{
3414 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416 int par_num = 0;
3417 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418 "[0]:0x%08x [1]:0x%08x "
3419 "[2]:0x%08x [3]:0x%08x\n",
3420 sig0 & HW_PRTY_ASSERT_SET_0,
3421 sig1 & HW_PRTY_ASSERT_SET_1,
3422 sig2 & HW_PRTY_ASSERT_SET_2,
3423 sig3 & HW_PRTY_ASSERT_SET_3);
3424 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425 bp->dev->name);
3426 par_num = bnx2x_print_blocks_with_parity0(
3427 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428 par_num = bnx2x_print_blocks_with_parity1(
3429 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430 par_num = bnx2x_print_blocks_with_parity2(
3431 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432 par_num = bnx2x_print_blocks_with_parity3(
3433 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434 printk("\n");
3435 return true;
3436 } else
3437 return false;
3438}
3439
3440static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3108{ 3441{
3109 struct attn_route attn; 3442 struct attn_route attn;
3110 struct attn_route group_mask; 3443 int port = BP_PORT(bp);
3444
3445 attn.sig[0] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447 port*4);
3448 attn.sig[1] = REG_RD(bp,
3449 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450 port*4);
3451 attn.sig[2] = REG_RD(bp,
3452 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453 port*4);
3454 attn.sig[3] = REG_RD(bp,
3455 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456 port*4);
3457
3458 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459 attn.sig[3]);
3460}
3461
3462static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463{
3464 struct attn_route attn, *group_mask;
3111 int port = BP_PORT(bp); 3465 int port = BP_PORT(bp);
3112 int index; 3466 int index;
3113 u32 reg_addr; 3467 u32 reg_addr;
@@ -3118,6 +3472,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3118 try to handle this event */ 3472 try to handle this event */
3119 bnx2x_acquire_alr(bp); 3473 bnx2x_acquire_alr(bp);
3120 3474
3475 if (bnx2x_chk_parity_attn(bp)) {
3476 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477 bnx2x_set_reset_in_progress(bp);
3478 schedule_delayed_work(&bp->reset_task, 0);
3479 /* Disable HW interrupts */
3480 bnx2x_int_disable(bp);
3481 bnx2x_release_alr(bp);
3482 /* In case of parity errors don't handle attentions so that
3483 * other function would "see" parity errors.
3484 */
3485 return;
3486 }
3487
3121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 3488 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3489 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3490 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3494,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3127 3494
3128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3495 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129 if (deasserted & (1 << index)) { 3496 if (deasserted & (1 << index)) {
3130 group_mask = bp->attn_group[index]; 3497 group_mask = &bp->attn_group[index];
3131 3498
3132 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3499 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133 index, group_mask.sig[0], group_mask.sig[1], 3500 index, group_mask->sig[0], group_mask->sig[1],
3134 group_mask.sig[2], group_mask.sig[3]); 3501 group_mask->sig[2], group_mask->sig[3]);
3135 3502
3136 bnx2x_attn_int_deasserted3(bp, 3503 bnx2x_attn_int_deasserted3(bp,
3137 attn.sig[3] & group_mask.sig[3]); 3504 attn.sig[3] & group_mask->sig[3]);
3138 bnx2x_attn_int_deasserted1(bp, 3505 bnx2x_attn_int_deasserted1(bp,
3139 attn.sig[1] & group_mask.sig[1]); 3506 attn.sig[1] & group_mask->sig[1]);
3140 bnx2x_attn_int_deasserted2(bp, 3507 bnx2x_attn_int_deasserted2(bp,
3141 attn.sig[2] & group_mask.sig[2]); 3508 attn.sig[2] & group_mask->sig[2]);
3142 bnx2x_attn_int_deasserted0(bp, 3509 bnx2x_attn_int_deasserted0(bp,
3143 attn.sig[0] & group_mask.sig[0]); 3510 attn.sig[0] & group_mask->sig[0]);
3144
3145 if ((attn.sig[0] & group_mask.sig[0] &
3146 HW_PRTY_ASSERT_SET_0) ||
3147 (attn.sig[1] & group_mask.sig[1] &
3148 HW_PRTY_ASSERT_SET_1) ||
3149 (attn.sig[2] & group_mask.sig[2] &
3150 HW_PRTY_ASSERT_SET_2))
3151 BNX2X_ERR("FATAL HW block parity attention\n");
3152 } 3511 }
3153 } 3512 }
3154 3513
@@ -3172,7 +3531,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3172 3531
3173 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 3532 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3174 aeu_mask, deasserted); 3533 aeu_mask, deasserted);
3175 aeu_mask |= (deasserted & 0xff); 3534 aeu_mask |= (deasserted & 0x3ff);
3176 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3535 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3177 3536
3178 REG_WR(bp, reg_addr, aeu_mask); 3537 REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3575,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3216 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3575 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3217 u16 status; 3576 u16 status;
3218 3577
3219
3220 /* Return here if interrupt is disabled */ 3578 /* Return here if interrupt is disabled */
3221 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 3579 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3222 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 3580 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3585,23 @@ static void bnx2x_sp_task(struct work_struct *work)
3227/* if (status == 0) */ 3585/* if (status == 0) */
3228/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 3586/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3229 3587
3230 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 3588 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3231 3589
3232 /* HW attentions */ 3590 /* HW attentions */
3233 if (status & 0x1) 3591 if (status & 0x1) {
3234 bnx2x_attn_int(bp); 3592 bnx2x_attn_int(bp);
3593 status &= ~0x1;
3594 }
3595
3596 /* CStorm events: STAT_QUERY */
3597 if (status & 0x2) {
3598 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599 status &= ~0x2;
3600 }
3601
3602 if (unlikely(status))
3603 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604 status);
3235 3605
3236 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3606 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3237 IGU_INT_NOP, 1); 3607 IGU_INT_NOP, 1);
@@ -3243,7 +3613,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3243 IGU_INT_NOP, 1); 3613 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 3614 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245 IGU_INT_ENABLE, 1); 3615 IGU_INT_ENABLE, 1);
3246
3247} 3616}
3248 3617
3249static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3618static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4316,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947 u32 lo; 4316 u32 lo;
3948 u32 hi; 4317 u32 hi;
3949 } diff; 4318 } diff;
3950 u32 nig_timer_max;
3951 4319
3952 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) 4320 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953 bnx2x_bmac_stats_update(bp); 4321 bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4346,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3978 4346
3979 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 4347 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3980 4348
3981 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 4349 if (!BP_NOMCP(bp)) {
3982 if (nig_timer_max != estats->nig_timer_max) { 4350 u32 nig_timer_max =
3983 estats->nig_timer_max = nig_timer_max; 4351 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3984 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max); 4352 if (nig_timer_max != estats->nig_timer_max) {
4353 estats->nig_timer_max = nig_timer_max;
4354 BNX2X_ERR("NIG timer max (%u)\n",
4355 estats->nig_timer_max);
4356 }
3985 } 4357 }
3986 4358
3987 return 0; 4359 return 0;
@@ -4025,21 +4397,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4025 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 4397 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4026 bp->stats_counter) { 4398 bp->stats_counter) {
4027 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" 4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028 " xstorm counter (%d) != stats_counter (%d)\n", 4400 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4029 i, xclient->stats_counter, bp->stats_counter); 4401 i, xclient->stats_counter, bp->stats_counter);
4030 return -1; 4402 return -1;
4031 } 4403 }
4032 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 4404 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4033 bp->stats_counter) { 4405 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" 4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035 " tstorm counter (%d) != stats_counter (%d)\n", 4407 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4036 i, tclient->stats_counter, bp->stats_counter); 4408 i, tclient->stats_counter, bp->stats_counter);
4037 return -2; 4409 return -2;
4038 } 4410 }
4039 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != 4411 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040 bp->stats_counter) { 4412 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" 4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042 " ustorm counter (%d) != stats_counter (%d)\n", 4414 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4043 i, uclient->stats_counter, bp->stats_counter); 4415 i, uclient->stats_counter, bp->stats_counter);
4044 return -4; 4416 return -4;
4045 } 4417 }
@@ -4059,6 +4431,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4059 qstats->total_bytes_received_lo, 4431 qstats->total_bytes_received_lo,
4060 le32_to_cpu(tclient->rcv_unicast_bytes.lo)); 4432 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061 4433
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444 SUB_64(qstats->total_bytes_received_hi,
4445 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446 qstats->total_bytes_received_lo,
4447 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
4062 qstats->valid_bytes_received_hi = 4449 qstats->valid_bytes_received_hi =
4063 qstats->total_bytes_received_hi; 4450 qstats->total_bytes_received_hi;
4064 qstats->valid_bytes_received_lo = 4451 qstats->valid_bytes_received_lo =
@@ -4307,47 +4694,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4307 bnx2x_drv_stats_update(bp); 4694 bnx2x_drv_stats_update(bp);
4308 4695
4309 if (netif_msg_timer(bp)) { 4696 if (netif_msg_timer(bp)) {
4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
4312 struct tstorm_per_client_stats *old_tclient =
4313 &bp->fp->old_tclient;
4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4315 struct bnx2x_eth_stats *estats = &bp->eth_stats; 4697 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4316 struct net_device_stats *nstats = &bp->dev->stats;
4317 int i; 4698 int i;
4318 4699
4319 netdev_printk(KERN_DEBUG, bp->dev, "\n"); 4700 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4701 bp->dev->name,
4321 " tx pkt (%lx)\n",
4322 bnx2x_tx_avail(fp0_tx),
4323 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4324 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4325 " rx pkt (%lx)\n",
4326 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327 fp0_rx->rx_comp_cons),
4328 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4329 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4330 "brb truncate %u\n",
4331 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332 qstats->driver_xoff,
4333 estats->brb_drop_lo, estats->brb_truncate_lo); 4702 estats->brb_drop_lo, estats->brb_truncate_lo);
4334 printk(KERN_DEBUG "tstats: checksum_discard %u "
4335 "packets_too_big_discard %lu no_buff_discard %lu "
4336 "mac_discard %u mac_filter_discard %u "
4337 "xxovrflow_discard %u brb_truncate_discard %u "
4338 "ttl0_discard %u\n",
4339 le32_to_cpu(old_tclient->checksum_discard),
4340 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341 bnx2x_hilo(&qstats->no_buff_discard_hi),
4342 estats->mac_discard, estats->mac_filter_discard,
4343 estats->xxoverflow_discard, estats->brb_truncate_discard,
4344 le32_to_cpu(old_tclient->ttl0_discard));
4345 4703
4346 for_each_queue(bp, i) { 4704 for_each_queue(bp, i) {
4347 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i, 4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4348 bnx2x_fp(bp, i, tx_pkt), 4706 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4349 bnx2x_fp(bp, i, rx_pkt), 4707
4350 bnx2x_fp(bp, i, rx_calls)); 4708 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4709 " rx pkt(%lu) rx calls(%lu %lu)\n",
4710 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711 fp->rx_comp_cons),
4712 le16_to_cpu(*fp->rx_cons_sb),
4713 bnx2x_hilo(&qstats->
4714 total_unicast_packets_received_hi),
4715 fp->rx_calls, fp->rx_pkt);
4716 }
4717
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721 struct netdev_queue *txq =
4722 netdev_get_tx_queue(bp->dev, i);
4723
4724 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4725 " tx pkt(%lu) tx calls (%lu)"
4726 " %s (Xoff events %u)\n",
4727 fp->name, bnx2x_tx_avail(fp),
4728 le16_to_cpu(*fp->tx_cons_sb),
4729 bnx2x_hilo(&qstats->
4730 total_unicast_packets_transmitted_hi),
4731 fp->tx_pkt,
4732 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733 qstats->driver_xoff);
4351 } 4734 }
4352 } 4735 }
4353 4736
@@ -4468,6 +4851,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468{ 4851{
4469 enum bnx2x_stats_state state = bp->stats_state; 4852 enum bnx2x_stats_state state = bp->stats_state;
4470 4853
4854 if (unlikely(bp->panic))
4855 return;
4856
4471 bnx2x_stats_stm[state][event].action(bp); 4857 bnx2x_stats_stm[state][event].action(bp);
4472 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 4858 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473 4859
@@ -4940,9 +5326,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4940 } 5326 }
4941 5327
4942 if (fp->tpa_state[i] == BNX2X_TPA_START) 5328 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev, 5329 dma_unmap_single(&bp->pdev->dev,
4944 pci_unmap_addr(rx_buf, mapping), 5330 dma_unmap_addr(rx_buf, mapping),
4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5331 bp->rx_buf_size, DMA_FROM_DEVICE);
4946 5332
4947 dev_kfree_skb(skb); 5333 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL; 5334 rx_buf->skb = NULL;
@@ -4978,7 +5364,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4978 fp->disable_tpa = 1; 5364 fp->disable_tpa = 1;
4979 break; 5365 break;
4980 } 5366 }
4981 pci_unmap_addr_set((struct sw_rx_bd *) 5367 dma_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i], 5368 &bp->fp->tpa_pool[i],
4983 mapping, 0); 5369 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP; 5370 fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5458,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5072 5458
5073 fp->rx_bd_prod = ring_prod; 5459 fp->rx_bd_prod = ring_prod;
5074 /* must not have more available CQEs than BDs */ 5460 /* must not have more available CQEs than BDs */
5075 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT), 5461 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5076 cqe_ring_prod); 5462 cqe_ring_prod);
5077 fp->rx_pkt = fp->rx_calls = 0; 5463 fp->rx_pkt = fp->rx_calls = 0;
5078 5464
5079 /* Warning! 5465 /* Warning!
@@ -5179,8 +5565,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5179 context->ustorm_st_context.common.flags |= 5565 context->ustorm_st_context.common.flags |=
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; 5566 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5181 context->ustorm_st_context.common.sge_buff_size = 5567 context->ustorm_st_context.common.sge_buff_size =
5182 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, 5568 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5183 (u32)0xffff); 5569 0xffff);
5184 context->ustorm_st_context.common.sge_page_base_hi = 5570 context->ustorm_st_context.common.sge_page_base_hi =
5185 U64_HI(fp->rx_sge_mapping); 5571 U64_HI(fp->rx_sge_mapping);
5186 context->ustorm_st_context.common.sge_page_base_lo = 5572 context->ustorm_st_context.common.sge_page_base_lo =
@@ -5369,10 +5755,10 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5369 u32 offset; 5755 u32 offset;
5370 u16 max_agg_size; 5756 u16 max_agg_size;
5371 5757
5372 if (is_multi(bp)) { 5758 tstorm_config.config_flags = RSS_FLAGS(bp);
5373 tstorm_config.config_flags = MULTI_FLAGS(bp); 5759
5760 if (is_multi(bp))
5374 tstorm_config.rss_result_mask = MULTI_MASK; 5761 tstorm_config.rss_result_mask = MULTI_MASK;
5375 }
5376 5762
5377 /* Enable TPA if needed */ 5763 /* Enable TPA if needed */
5378 if (bp->flags & TPA_ENABLE_FLAG) 5764 if (bp->flags & TPA_ENABLE_FLAG)
@@ -5477,10 +5863,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5477 } 5863 }
5478 5864
5479 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 5865 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480 max_agg_size = 5866 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5867 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5483 (u32)0xffff);
5484 for_each_queue(bp, i) { 5868 for_each_queue(bp, i) {
5485 struct bnx2x_fastpath *fp = &bp->fp[i]; 5869 struct bnx2x_fastpath *fp = &bp->fp[i];
5486 5870
@@ -5566,7 +5950,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5566 } 5950 }
5567 5951
5568 5952
5569 /* Store it to internal memory */ 5953 /* Store cmng structures to internal memory */
5570 if (bp->port.pmf) 5954 if (bp->port.pmf)
5571 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 5955 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572 REG_WR(bp, BAR_XSTRORM_INTMEM + 5956 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6042,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5658 6042
5659static int bnx2x_gunzip_init(struct bnx2x *bp) 6043static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{ 6044{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, 6045 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping); 6046 &bp->gunzip_mapping, GFP_KERNEL);
5663 if (bp->gunzip_buf == NULL) 6047 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1; 6048 goto gunzip_nomem1;
5665 6049
@@ -5679,12 +6063,13 @@ gunzip_nomem3:
5679 bp->strm = NULL; 6063 bp->strm = NULL;
5680 6064
5681gunzip_nomem2: 6065gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6066 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping); 6067 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL; 6068 bp->gunzip_buf = NULL;
5685 6069
5686gunzip_nomem1: 6070gunzip_nomem1:
5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n"); 6071 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6072 " un-compression\n");
5688 return -ENOMEM; 6073 return -ENOMEM;
5689} 6074}
5690 6075
@@ -5696,8 +6081,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
5696 bp->strm = NULL; 6081 bp->strm = NULL;
5697 6082
5698 if (bp->gunzip_buf) { 6083 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6084 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping); 6085 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL; 6086 bp->gunzip_buf = NULL;
5702 } 6087 }
5703} 6088}
@@ -5735,8 +6120,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5735 6120
5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6121 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737 if (bp->gunzip_outlen & 0x3) 6122 if (bp->gunzip_outlen & 0x3)
5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6123 netdev_err(bp->dev, "Firmware decompression error:"
5739 bp->gunzip_outlen); 6124 " gunzip_outlen (%d) not aligned\n",
6125 bp->gunzip_outlen);
5740 bp->gunzip_outlen >>= 2; 6126 bp->gunzip_outlen >>= 2;
5741 6127
5742 zlib_inflateEnd(bp->strm); 6128 zlib_inflateEnd(bp->strm);
@@ -5962,6 +6348,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
5962 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 6348 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5963} 6349}
5964 6350
6351static const struct {
6352 u32 addr;
6353 u32 mask;
6354} bnx2x_parity_mask[] = {
6355 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6356 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6357 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6358 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6359 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6360 {QM_REG_QM_PRTY_MASK, 0x0},
6361 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6362 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6363 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6364 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6365 {CDU_REG_CDU_PRTY_MASK, 0x0},
6366 {CFC_REG_CFC_PRTY_MASK, 0x0},
6367 {DBG_REG_DBG_PRTY_MASK, 0x0},
6368 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6369 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6370 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6371 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6372 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6373 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6374 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6376 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6377 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6378 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6379 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6380 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6381 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6382 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6383};
6384
6385static void enable_blocks_parity(struct bnx2x *bp)
6386{
6387 int i, mask_arr_len =
6388 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6389
6390 for (i = 0; i < mask_arr_len; i++)
6391 REG_WR(bp, bnx2x_parity_mask[i].addr,
6392 bnx2x_parity_mask[i].mask);
6393}
6394
5965 6395
5966static void bnx2x_reset_common(struct bnx2x *bp) 6396static void bnx2x_reset_common(struct bnx2x *bp)
5967{ 6397{
@@ -5992,10 +6422,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
5992 6422
5993static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6423static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994{ 6424{
6425 int is_required;
5995 u32 val; 6426 u32 val;
5996 u8 port; 6427 int port;
5997 u8 is_required = 0; 6428
6429 if (BP_NOMCP(bp))
6430 return;
5998 6431
6432 is_required = 0;
5999 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6433 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000 SHARED_HW_CFG_FAN_FAILURE_MASK; 6434 SHARED_HW_CFG_FAN_FAILURE_MASK;
6001 6435
@@ -6034,7 +6468,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6034 /* set to active low mode */ 6468 /* set to active low mode */
6035 val = REG_RD(bp, MISC_REG_SPIO_INT); 6469 val = REG_RD(bp, MISC_REG_SPIO_INT);
6036 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6470 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6471 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038 REG_WR(bp, MISC_REG_SPIO_INT, val); 6472 REG_WR(bp, MISC_REG_SPIO_INT, val);
6039 6473
6040 /* enable interrupt to signal the IGU */ 6474 /* enable interrupt to signal the IGU */
@@ -6200,10 +6634,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6200 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 6634 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6201 6635
6202 REG_WR(bp, SRC_REG_SOFT_RST, 1); 6636 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6203 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) { 6637 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6204 REG_WR(bp, i, 0xc0cac01a); 6638 REG_WR(bp, i, random32());
6205 /* TODO: replace with something meaningful */
6206 }
6207 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6639 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6208#ifdef BCM_CNIC 6640#ifdef BCM_CNIC
6209 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672); 6641 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
@@ -6221,7 +6653,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6221 6653
6222 if (sizeof(union cdu_context) != 1024) 6654 if (sizeof(union cdu_context) != 1024)
6223 /* we currently assume that a context is 1024 bytes */ 6655 /* we currently assume that a context is 1024 bytes */
6224 pr_alert("please adjust the size of cdu_context(%ld)\n", 6656 dev_alert(&bp->pdev->dev, "please adjust the size "
6657 "of cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context)); 6658 (long)sizeof(union cdu_context));
6226 6659
6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6660 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6738,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6305 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6738 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6306 6739
6307 enable_blocks_attention(bp); 6740 enable_blocks_attention(bp);
6741 if (CHIP_PARITY_SUPPORTED(bp))
6742 enable_blocks_parity(bp);
6308 6743
6309 if (!BP_NOMCP(bp)) { 6744 if (!BP_NOMCP(bp)) {
6310 bnx2x_acquire_phy_lock(bp); 6745 bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6758,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6323 u32 low, high; 6758 u32 low, high;
6324 u32 val; 6759 u32 val;
6325 6760
6326 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); 6761 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6327 6762
6328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6763 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6329 6764
@@ -6342,6 +6777,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6777 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6778 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6344#endif 6779#endif
6780
6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6781 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6346 6782
6347 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 6783 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6970,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
6534 u32 addr, val; 6970 u32 addr, val;
6535 int i; 6971 int i;
6536 6972
6537 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); 6973 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6538 6974
6539 /* set MSI reconfigure capability */ 6975 /* set MSI reconfigure capability */
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 6976 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7128,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6692#define BNX2X_PCI_FREE(x, y, size) \ 7128#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \ 7129 do { \
6694 if (x) { \ 7130 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \ 7131 dma_free_coherent(&bp->pdev->dev, size, x, y); \
6696 x = NULL; \ 7132 x = NULL; \
6697 y = 0; \ 7133 y = 0; \
6698 } \ 7134 } \
@@ -6773,7 +7209,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 7209
6774#define BNX2X_PCI_ALLOC(x, y, size) \ 7210#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \ 7211 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \ 7212 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6777 if (x == NULL) \ 7213 if (x == NULL) \
6778 goto alloc_mem_err; \ 7214 goto alloc_mem_err; \
6779 memset(x, 0, size); \ 7215 memset(x, 0, size); \
@@ -6906,9 +7342,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6906 if (skb == NULL) 7342 if (skb == NULL)
6907 continue; 7343 continue;
6908 7344
6909 pci_unmap_single(bp->pdev, 7345 dma_unmap_single(&bp->pdev->dev,
6910 pci_unmap_addr(rx_buf, mapping), 7346 dma_unmap_addr(rx_buf, mapping),
6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 7347 bp->rx_buf_size, DMA_FROM_DEVICE);
6912 7348
6913 rx_buf->skb = NULL; 7349 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb); 7350 dev_kfree_skb(skb);
@@ -6987,7 +7423,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6987 7423
6988 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 7424 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6989 BNX2X_NUM_QUEUES(bp) + offset); 7425 BNX2X_NUM_QUEUES(bp) + offset);
6990 if (rc) { 7426
7427 /*
7428 * reconfigure number of tx/rx queues according to available
7429 * MSI-X vectors
7430 */
7431 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7432 /* vectors available for FP */
7433 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7434
7435 DP(NETIF_MSG_IFUP,
7436 "Trying to use less MSI-X vectors: %d\n", rc);
7437
7438 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7439
7440 if (rc) {
7441 DP(NETIF_MSG_IFUP,
7442 "MSI-X is not attainable rc %d\n", rc);
7443 return rc;
7444 }
7445
7446 bp->num_queues = min(bp->num_queues, fp_vec);
7447
7448 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7449 bp->num_queues);
7450 } else if (rc) {
6991 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 7451 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6992 return rc; 7452 return rc;
6993 } 7453 }
@@ -7028,10 +7488,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7028 } 7488 }
7029 7489
7030 i = BNX2X_NUM_QUEUES(bp); 7490 i = BNX2X_NUM_QUEUES(bp);
7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 7491 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7032 bp->msix_table[0].vector, 7492 " ... fp[%d] %d\n",
7033 0, bp->msix_table[offset].vector, 7493 bp->msix_table[0].vector,
7034 i - 1, bp->msix_table[offset + i - 1].vector); 7494 0, bp->msix_table[offset].vector,
7495 i - 1, bp->msix_table[offset + i - 1].vector);
7035 7496
7036 return 0; 7497 return 0;
7037} 7498}
@@ -7409,8 +7870,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
7409 bp->num_queues = 1; 7870 bp->num_queues = 1;
7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7871 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7411 break; 7872 break;
7412
7413 case INT_MODE_MSIX:
7414 default: 7873 default:
7415 /* Set number of queues according to bp->multi_mode value */ 7874 /* Set number of queues according to bp->multi_mode value */
7416 bnx2x_set_num_queues_msix(bp); 7875 bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8115,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7656 if (bp->state == BNX2X_STATE_OPEN) 8115 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 8116 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif 8117#endif
8118 bnx2x_inc_load_cnt(bp);
7659 8119
7660 return 0; 8120 return 0;
7661 8121
@@ -7843,33 +8303,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7843 } 8303 }
7844} 8304}
7845 8305
7846/* must be called with rtnl_lock */ 8306static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7847static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7848{ 8307{
7849 int port = BP_PORT(bp); 8308 int port = BP_PORT(bp);
7850 u32 reset_code = 0; 8309 u32 reset_code = 0;
7851 int i, cnt, rc; 8310 int i, cnt, rc;
7852 8311
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
7858 /* Set "drop all" */
7859 bp->rx_mode = BNX2X_RX_MODE_NONE;
7860 bnx2x_set_storm_rx_mode(bp);
7861
7862 /* Disable HW interrupts, NAPI and Tx */
7863 bnx2x_netif_stop(bp, 1);
7864
7865 del_timer_sync(&bp->timer);
7866 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7869
7870 /* Release IRQs */
7871 bnx2x_free_irq(bp, false);
7872
7873 /* Wait until tx fastpath tasks complete */ 8312 /* Wait until tx fastpath tasks complete */
7874 for_each_queue(bp, i) { 8313 for_each_queue(bp, i) {
7875 struct bnx2x_fastpath *fp = &bp->fp[i]; 8314 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8449,70 @@ unload_error:
8010 if (!BP_NOMCP(bp)) 8449 if (!BP_NOMCP(bp))
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 8450 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012 8451
8452}
8453
8454static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8455{
8456 u32 val;
8457
8458 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8459
8460 if (CHIP_IS_E1(bp)) {
8461 int port = BP_PORT(bp);
8462 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8463 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8464
8465 val = REG_RD(bp, addr);
8466 val &= ~(0x300);
8467 REG_WR(bp, addr, val);
8468 } else if (CHIP_IS_E1H(bp)) {
8469 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8470 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8471 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8472 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8473 }
8474}
8475
8476/* must be called with rtnl_lock */
8477static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8478{
8479 int i;
8480
8481 if (bp->state == BNX2X_STATE_CLOSED) {
8482 /* Interface has been removed - nothing to recover */
8483 bp->recovery_state = BNX2X_RECOVERY_DONE;
8484 bp->is_leader = 0;
8485 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8486 smp_wmb();
8487
8488 return -EINVAL;
8489 }
8490
8491#ifdef BCM_CNIC
8492 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8493#endif
8494 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8495
8496 /* Set "drop all" */
8497 bp->rx_mode = BNX2X_RX_MODE_NONE;
8498 bnx2x_set_storm_rx_mode(bp);
8499
8500 /* Disable HW interrupts, NAPI and Tx */
8501 bnx2x_netif_stop(bp, 1);
8502 netif_carrier_off(bp->dev);
8503
8504 del_timer_sync(&bp->timer);
8505 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8506 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8507 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8508
8509 /* Release IRQs */
8510 bnx2x_free_irq(bp, false);
8511
8512 /* Cleanup the chip if needed */
8513 if (unload_mode != UNLOAD_RECOVERY)
8514 bnx2x_chip_cleanup(bp, unload_mode);
8515
8013 bp->port.pmf = 0; 8516 bp->port.pmf = 0;
8014 8517
8015 /* Free SKBs, SGEs, TPA pool and driver internals */ 8518 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8022,19 +8525,448 @@ unload_error:
8022 8525
8023 bp->state = BNX2X_STATE_CLOSED; 8526 bp->state = BNX2X_STATE_CLOSED;
8024 8527
8025 netif_carrier_off(bp->dev); 8528 /* The last driver must disable a "close the gate" if there is no
8529 * parity attention or "process kill" pending.
8530 */
8531 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8532 bnx2x_reset_is_done(bp))
8533 bnx2x_disable_close_the_gate(bp);
8534
8535 /* Reset MCP mail box sequence if there is on going recovery */
8536 if (unload_mode == UNLOAD_RECOVERY)
8537 bp->fw_seq = 0;
8538
8539 return 0;
8540}
8541
8542/* Close gates #2, #3 and #4: */
8543static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8544{
8545 u32 val, addr;
8546
8547 /* Gates #2 and #4a are closed/opened for "not E1" only */
8548 if (!CHIP_IS_E1(bp)) {
8549 /* #4 */
8550 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8551 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8552 close ? (val | 0x1) : (val & (~(u32)1)));
8553 /* #2 */
8554 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8555 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8556 close ? (val | 0x1) : (val & (~(u32)1)));
8557 }
8558
8559 /* #3 */
8560 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8561 val = REG_RD(bp, addr);
8562 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8563
8564 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8565 close ? "closing" : "opening");
8566 mmiowb();
8567}
8568
8569#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8570
8571static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8572{
8573 /* Do some magic... */
8574 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8575 *magic_val = val & SHARED_MF_CLP_MAGIC;
8576 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8577}
8578
8579/* Restore the value of the `magic' bit.
8580 *
8581 * @param pdev Device handle.
8582 * @param magic_val Old value of the `magic' bit.
8583 */
8584static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8585{
8586 /* Restore the `magic' bit value... */
8587 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8588 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8589 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8590 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8591 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8592 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8593}
8594
8595/* Prepares for MCP reset: takes care of CLP configurations.
8596 *
8597 * @param bp
8598 * @param magic_val Old value of 'magic' bit.
8599 */
8600static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8601{
8602 u32 shmem;
8603 u32 validity_offset;
8604
8605 DP(NETIF_MSG_HW, "Starting\n");
8606
8607 /* Set `magic' bit in order to save MF config */
8608 if (!CHIP_IS_E1(bp))
8609 bnx2x_clp_reset_prep(bp, magic_val);
8610
8611 /* Get shmem offset */
8612 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8613 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8614
8615 /* Clear validity map flags */
8616 if (shmem > 0)
8617 REG_WR(bp, shmem + validity_offset, 0);
8618}
8619
8620#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8621#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8622
8623/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8624 * depending on the HW type.
8625 *
8626 * @param bp
8627 */
8628static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8629{
8630 /* special handling for emulation and FPGA,
8631 wait 10 times longer */
8632 if (CHIP_REV_IS_SLOW(bp))
8633 msleep(MCP_ONE_TIMEOUT*10);
8634 else
8635 msleep(MCP_ONE_TIMEOUT);
8636}
8637
8638static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8639{
8640 u32 shmem, cnt, validity_offset, val;
8641 int rc = 0;
8642
8643 msleep(100);
8644
8645 /* Get shmem offset */
8646 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8647 if (shmem == 0) {
8648 BNX2X_ERR("Shmem 0 return failure\n");
8649 rc = -ENOTTY;
8650 goto exit_lbl;
8651 }
8652
8653 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8654
8655 /* Wait for MCP to come up */
8656 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8657 /* TBD: its best to check validity map of last port.
8658 * currently checks on port 0.
8659 */
8660 val = REG_RD(bp, shmem + validity_offset);
8661 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8662 shmem + validity_offset, val);
8663
8664 /* check that shared memory is valid. */
8665 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8666 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8667 break;
8668
8669 bnx2x_mcp_wait_one(bp);
8670 }
8671
8672 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8673
8674 /* Check that shared memory is valid. This indicates that MCP is up. */
8675 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8676 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8677 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8678 rc = -ENOTTY;
8679 goto exit_lbl;
8680 }
8681
8682exit_lbl:
8683 /* Restore the `magic' bit value */
8684 if (!CHIP_IS_E1(bp))
8685 bnx2x_clp_reset_done(bp, magic_val);
8686
8687 return rc;
8688}
8689
8690static void bnx2x_pxp_prep(struct bnx2x *bp)
8691{
8692 if (!CHIP_IS_E1(bp)) {
8693 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8694 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8695 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8696 mmiowb();
8697 }
8698}
8699
8700/*
8701 * Reset the whole chip except for:
8702 * - PCIE core
8703 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8704 * one reset bit)
8705 * - IGU
8706 * - MISC (including AEU)
8707 * - GRC
8708 * - RBCN, RBCP
8709 */
8710static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8711{
8712 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8713
8714 not_reset_mask1 =
8715 MISC_REGISTERS_RESET_REG_1_RST_HC |
8716 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8717 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8718
8719 not_reset_mask2 =
8720 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8721 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8723 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8724 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8725 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8726 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8727 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8728
8729 reset_mask1 = 0xffffffff;
8730
8731 if (CHIP_IS_E1(bp))
8732 reset_mask2 = 0xffff;
8733 else
8734 reset_mask2 = 0x1ffff;
8735
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8737 reset_mask1 & (~not_reset_mask1));
8738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8739 reset_mask2 & (~not_reset_mask2));
8740
8741 barrier();
8742 mmiowb();
8743
8744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8746 mmiowb();
8747}
8748
8749static int bnx2x_process_kill(struct bnx2x *bp)
8750{
8751 int cnt = 1000;
8752 u32 val = 0;
8753 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8754
8755
8756 /* Empty the Tetris buffer, wait for 1s */
8757 do {
8758 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8759 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8760 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8761 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8762 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8763 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8764 ((port_is_idle_0 & 0x1) == 0x1) &&
8765 ((port_is_idle_1 & 0x1) == 0x1) &&
8766 (pgl_exp_rom2 == 0xffffffff))
8767 break;
8768 msleep(1);
8769 } while (cnt-- > 0);
8770
8771 if (cnt <= 0) {
8772 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8773 " are still"
8774 " outstanding read requests after 1s!\n");
8775 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8776 " port_is_idle_0=0x%08x,"
8777 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8778 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8779 pgl_exp_rom2);
8780 return -EAGAIN;
8781 }
8782
8783 barrier();
8784
8785 /* Close gates #2, #3 and #4 */
8786 bnx2x_set_234_gates(bp, true);
8787
8788 /* TBD: Indicate that "process kill" is in progress to MCP */
8789
8790 /* Clear "unprepared" bit */
8791 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8792 barrier();
8793
8794 /* Make sure all is written to the chip before the reset */
8795 mmiowb();
8796
8797 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8798 * PSWHST, GRC and PSWRD Tetris buffer.
8799 */
8800 msleep(1);
8801
8802 /* Prepare to chip reset: */
8803 /* MCP */
8804 bnx2x_reset_mcp_prep(bp, &val);
8805
8806 /* PXP */
8807 bnx2x_pxp_prep(bp);
8808 barrier();
8809
8810 /* reset the chip */
8811 bnx2x_process_kill_chip_reset(bp);
8812 barrier();
8813
8814 /* Recover after reset: */
8815 /* MCP */
8816 if (bnx2x_reset_mcp_comp(bp, val))
8817 return -EAGAIN;
8818
8819 /* PXP */
8820 bnx2x_pxp_prep(bp);
8821
8822 /* Open the gates #2, #3 and #4 */
8823 bnx2x_set_234_gates(bp, false);
8824
8825 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8826 * reset state, re-enable attentions. */
8026 8827
8027 return 0; 8828 return 0;
8028} 8829}
8029 8830
8831static int bnx2x_leader_reset(struct bnx2x *bp)
8832{
8833 int rc = 0;
8834 /* Try to recover after the failure */
8835 if (bnx2x_process_kill(bp)) {
8836 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8837 bp->dev->name);
8838 rc = -EAGAIN;
8839 goto exit_leader_reset;
8840 }
8841
8842 /* Clear "reset is in progress" bit and update the driver state */
8843 bnx2x_set_reset_done(bp);
8844 bp->recovery_state = BNX2X_RECOVERY_DONE;
8845
8846exit_leader_reset:
8847 bp->is_leader = 0;
8848 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8849 smp_wmb();
8850 return rc;
8851}
8852
8853static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8854
8855/* Assumption: runs under rtnl lock. This together with the fact
8856 * that it's called only from bnx2x_reset_task() ensure that it
8857 * will never be called when netif_running(bp->dev) is false.
8858 */
8859static void bnx2x_parity_recover(struct bnx2x *bp)
8860{
8861 DP(NETIF_MSG_HW, "Handling parity\n");
8862 while (1) {
8863 switch (bp->recovery_state) {
8864 case BNX2X_RECOVERY_INIT:
8865 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8866 /* Try to get a LEADER_LOCK HW lock */
8867 if (bnx2x_trylock_hw_lock(bp,
8868 HW_LOCK_RESOURCE_RESERVED_08))
8869 bp->is_leader = 1;
8870
8871 /* Stop the driver */
8872 /* If interface has been removed - break */
8873 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8874 return;
8875
8876 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8877 /* Ensure "is_leader" and "recovery_state"
8878 * update values are seen on other CPUs
8879 */
8880 smp_wmb();
8881 break;
8882
8883 case BNX2X_RECOVERY_WAIT:
8884 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8885 if (bp->is_leader) {
8886 u32 load_counter = bnx2x_get_load_cnt(bp);
8887 if (load_counter) {
8888 /* Wait until all other functions get
8889 * down.
8890 */
8891 schedule_delayed_work(&bp->reset_task,
8892 HZ/10);
8893 return;
8894 } else {
8895 /* If all other functions got down -
8896 * try to bring the chip back to
8897 * normal. In any case it's an exit
8898 * point for a leader.
8899 */
8900 if (bnx2x_leader_reset(bp) ||
8901 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8902 printk(KERN_ERR"%s: Recovery "
8903 "has failed. Power cycle is "
8904 "needed.\n", bp->dev->name);
8905 /* Disconnect this device */
8906 netif_device_detach(bp->dev);
8907 /* Block ifup for all function
8908 * of this ASIC until
8909 * "process kill" or power
8910 * cycle.
8911 */
8912 bnx2x_set_reset_in_progress(bp);
8913 /* Shut down the power */
8914 bnx2x_set_power_state(bp,
8915 PCI_D3hot);
8916 return;
8917 }
8918
8919 return;
8920 }
8921 } else { /* non-leader */
8922 if (!bnx2x_reset_is_done(bp)) {
8923 /* Try to get a LEADER_LOCK HW lock as
8924 * long as a former leader may have
8925 * been unloaded by the user or
8926 * released a leadership by another
8927 * reason.
8928 */
8929 if (bnx2x_trylock_hw_lock(bp,
8930 HW_LOCK_RESOURCE_RESERVED_08)) {
8931 /* I'm a leader now! Restart a
8932 * switch case.
8933 */
8934 bp->is_leader = 1;
8935 break;
8936 }
8937
8938 schedule_delayed_work(&bp->reset_task,
8939 HZ/10);
8940 return;
8941
8942 } else { /* A leader has completed
8943 * the "process kill". It's an exit
8944 * point for a non-leader.
8945 */
8946 bnx2x_nic_load(bp, LOAD_NORMAL);
8947 bp->recovery_state =
8948 BNX2X_RECOVERY_DONE;
8949 smp_wmb();
8950 return;
8951 }
8952 }
8953 default:
8954 return;
8955 }
8956 }
8957}
8958
8959/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8960 * scheduled on a general queue in order to prevent a dead lock.
8961 */
8030static void bnx2x_reset_task(struct work_struct *work) 8962static void bnx2x_reset_task(struct work_struct *work)
8031{ 8963{
8032 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); 8964 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8033 8965
8034#ifdef BNX2X_STOP_ON_ERROR 8966#ifdef BNX2X_STOP_ON_ERROR
8035 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 8967 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036 " so reset not done to allow debug dump,\n" 8968 " so reset not done to allow debug dump,\n"
8037 " you will need to reboot when done\n"); 8969 KERN_ERR " you will need to reboot when done\n");
8038 return; 8970 return;
8039#endif 8971#endif
8040 8972
@@ -8043,8 +8975,12 @@ static void bnx2x_reset_task(struct work_struct *work)
8043 if (!netif_running(bp->dev)) 8975 if (!netif_running(bp->dev))
8044 goto reset_task_exit; 8976 goto reset_task_exit;
8045 8977
8046 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 8978 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8047 bnx2x_nic_load(bp, LOAD_NORMAL); 8979 bnx2x_parity_recover(bp);
8980 else {
8981 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8982 bnx2x_nic_load(bp, LOAD_NORMAL);
8983 }
8048 8984
8049reset_task_exit: 8985reset_task_exit:
8050 rtnl_unlock(); 8986 rtnl_unlock();
@@ -8264,7 +9200,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8264 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9200 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9201 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9202 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 BNX2X_ERR("BAD MCP validity signature\n"); 9203 BNX2X_ERROR("BAD MCP validity signature\n");
8268 9204
8269 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9205 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8270 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9206 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9224,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8288 if (val < BNX2X_BC_VER) { 9224 if (val < BNX2X_BC_VER) {
8289 /* for now only warn 9225 /* for now only warn
8290 * later we might need to enforce this */ 9226 * later we might need to enforce this */
8291 BNX2X_ERR("This driver needs bc_ver %X but found %X," 9227 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
8292 " please upgrade BC\n", BNX2X_BC_VER, val); 9228 "please upgrade BC\n", BNX2X_BC_VER, val);
8293 } 9229 }
8294 bp->link_params.feature_config_flags |= 9230 bp->link_params.feature_config_flags |=
8295 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 9231 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9246,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9246 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9247 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312 9248
8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4); 9249 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9250 val, val2, val3, val4);
8314} 9251}
8315 9252
8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9253static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9525,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8588 bp->port.advertising = (ADVERTISED_10baseT_Full | 9525 bp->port.advertising = (ADVERTISED_10baseT_Full |
8589 ADVERTISED_TP); 9526 ADVERTISED_TP);
8590 } else { 9527 } else {
8591 BNX2X_ERR("NVRAM config error. " 9528 BNX2X_ERROR("NVRAM config error. "
8592 "Invalid link_config 0x%x" 9529 "Invalid link_config 0x%x"
8593 " speed_cap_mask 0x%x\n", 9530 " speed_cap_mask 0x%x\n",
8594 bp->port.link_config, 9531 bp->port.link_config,
8595 bp->link_params.speed_cap_mask); 9532 bp->link_params.speed_cap_mask);
8596 return; 9533 return;
8597 } 9534 }
8598 break; 9535 break;
@@ -8604,11 +9541,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8604 bp->port.advertising = (ADVERTISED_10baseT_Half | 9541 bp->port.advertising = (ADVERTISED_10baseT_Half |
8605 ADVERTISED_TP); 9542 ADVERTISED_TP);
8606 } else { 9543 } else {
8607 BNX2X_ERR("NVRAM config error. " 9544 BNX2X_ERROR("NVRAM config error. "
8608 "Invalid link_config 0x%x" 9545 "Invalid link_config 0x%x"
8609 " speed_cap_mask 0x%x\n", 9546 " speed_cap_mask 0x%x\n",
8610 bp->port.link_config, 9547 bp->port.link_config,
8611 bp->link_params.speed_cap_mask); 9548 bp->link_params.speed_cap_mask);
8612 return; 9549 return;
8613 } 9550 }
8614 break; 9551 break;
@@ -8619,11 +9556,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8619 bp->port.advertising = (ADVERTISED_100baseT_Full | 9556 bp->port.advertising = (ADVERTISED_100baseT_Full |
8620 ADVERTISED_TP); 9557 ADVERTISED_TP);
8621 } else { 9558 } else {
8622 BNX2X_ERR("NVRAM config error. " 9559 BNX2X_ERROR("NVRAM config error. "
8623 "Invalid link_config 0x%x" 9560 "Invalid link_config 0x%x"
8624 " speed_cap_mask 0x%x\n", 9561 " speed_cap_mask 0x%x\n",
8625 bp->port.link_config, 9562 bp->port.link_config,
8626 bp->link_params.speed_cap_mask); 9563 bp->link_params.speed_cap_mask);
8627 return; 9564 return;
8628 } 9565 }
8629 break; 9566 break;
@@ -8635,11 +9572,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8635 bp->port.advertising = (ADVERTISED_100baseT_Half | 9572 bp->port.advertising = (ADVERTISED_100baseT_Half |
8636 ADVERTISED_TP); 9573 ADVERTISED_TP);
8637 } else { 9574 } else {
8638 BNX2X_ERR("NVRAM config error. " 9575 BNX2X_ERROR("NVRAM config error. "
8639 "Invalid link_config 0x%x" 9576 "Invalid link_config 0x%x"
8640 " speed_cap_mask 0x%x\n", 9577 " speed_cap_mask 0x%x\n",
8641 bp->port.link_config, 9578 bp->port.link_config,
8642 bp->link_params.speed_cap_mask); 9579 bp->link_params.speed_cap_mask);
8643 return; 9580 return;
8644 } 9581 }
8645 break; 9582 break;
@@ -8650,11 +9587,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8650 bp->port.advertising = (ADVERTISED_1000baseT_Full | 9587 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651 ADVERTISED_TP); 9588 ADVERTISED_TP);
8652 } else { 9589 } else {
8653 BNX2X_ERR("NVRAM config error. " 9590 BNX2X_ERROR("NVRAM config error. "
8654 "Invalid link_config 0x%x" 9591 "Invalid link_config 0x%x"
8655 " speed_cap_mask 0x%x\n", 9592 " speed_cap_mask 0x%x\n",
8656 bp->port.link_config, 9593 bp->port.link_config,
8657 bp->link_params.speed_cap_mask); 9594 bp->link_params.speed_cap_mask);
8658 return; 9595 return;
8659 } 9596 }
8660 break; 9597 break;
@@ -8665,11 +9602,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8665 bp->port.advertising = (ADVERTISED_2500baseX_Full | 9602 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666 ADVERTISED_TP); 9603 ADVERTISED_TP);
8667 } else { 9604 } else {
8668 BNX2X_ERR("NVRAM config error. " 9605 BNX2X_ERROR("NVRAM config error. "
8669 "Invalid link_config 0x%x" 9606 "Invalid link_config 0x%x"
8670 " speed_cap_mask 0x%x\n", 9607 " speed_cap_mask 0x%x\n",
8671 bp->port.link_config, 9608 bp->port.link_config,
8672 bp->link_params.speed_cap_mask); 9609 bp->link_params.speed_cap_mask);
8673 return; 9610 return;
8674 } 9611 }
8675 break; 9612 break;
@@ -8682,19 +9619,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8682 bp->port.advertising = (ADVERTISED_10000baseT_Full | 9619 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683 ADVERTISED_FIBRE); 9620 ADVERTISED_FIBRE);
8684 } else { 9621 } else {
8685 BNX2X_ERR("NVRAM config error. " 9622 BNX2X_ERROR("NVRAM config error. "
8686 "Invalid link_config 0x%x" 9623 "Invalid link_config 0x%x"
8687 " speed_cap_mask 0x%x\n", 9624 " speed_cap_mask 0x%x\n",
8688 bp->port.link_config, 9625 bp->port.link_config,
8689 bp->link_params.speed_cap_mask); 9626 bp->link_params.speed_cap_mask);
8690 return; 9627 return;
8691 } 9628 }
8692 break; 9629 break;
8693 9630
8694 default: 9631 default:
8695 BNX2X_ERR("NVRAM config error. " 9632 BNX2X_ERROR("NVRAM config error. "
8696 "BAD link speed link_config 0x%x\n", 9633 "BAD link speed link_config 0x%x\n",
8697 bp->port.link_config); 9634 bp->port.link_config);
8698 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 9635 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8699 bp->port.advertising = bp->port.supported; 9636 bp->port.advertising = bp->port.supported;
8700 break; 9637 break;
@@ -8823,7 +9760,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8823 9760
8824 bp->e1hov = 0; 9761 bp->e1hov = 0;
8825 bp->e1hmf = 0; 9762 bp->e1hmf = 0;
8826 if (CHIP_IS_E1H(bp)) { 9763 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
8827 bp->mf_config = 9764 bp->mf_config =
8828 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 9765 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8829 9766
@@ -8844,14 +9781,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8844 "(0x%04x)\n", 9781 "(0x%04x)\n",
8845 func, bp->e1hov, bp->e1hov); 9782 func, bp->e1hov, bp->e1hov);
8846 } else { 9783 } else {
8847 BNX2X_ERR("!!! No valid E1HOV for func %d," 9784 BNX2X_ERROR("No valid E1HOV for func %d,"
8848 " aborting\n", func); 9785 " aborting\n", func);
8849 rc = -EPERM; 9786 rc = -EPERM;
8850 } 9787 }
8851 } else { 9788 } else {
8852 if (BP_E1HVN(bp)) { 9789 if (BP_E1HVN(bp)) {
8853 BNX2X_ERR("!!! VN %d in single function mode," 9790 BNX2X_ERROR("VN %d in single function mode,"
8854 " aborting\n", BP_E1HVN(bp)); 9791 " aborting\n", BP_E1HVN(bp));
8855 rc = -EPERM; 9792 rc = -EPERM;
8856 } 9793 }
8857 } 9794 }
@@ -8887,7 +9824,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8887 9824
8888 if (BP_NOMCP(bp)) { 9825 if (BP_NOMCP(bp)) {
8889 /* only supposed to happen on emulation/FPGA */ 9826 /* only supposed to happen on emulation/FPGA */
8890 BNX2X_ERR("warning random MAC workaround active\n"); 9827 BNX2X_ERROR("warning: random MAC workaround active\n");
8891 random_ether_addr(bp->dev->dev_addr); 9828 random_ether_addr(bp->dev->dev_addr);
8892 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 9829 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893 } 9830 }
@@ -8895,6 +9832,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8895 return rc; 9832 return rc;
8896} 9833}
8897 9834
9835static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9836{
9837 int cnt, i, block_end, rodi;
9838 char vpd_data[BNX2X_VPD_LEN+1];
9839 char str_id_reg[VENDOR_ID_LEN+1];
9840 char str_id_cap[VENDOR_ID_LEN+1];
9841 u8 len;
9842
9843 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9844 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9845
9846 if (cnt < BNX2X_VPD_LEN)
9847 goto out_not_found;
9848
9849 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9850 PCI_VPD_LRDT_RO_DATA);
9851 if (i < 0)
9852 goto out_not_found;
9853
9854
9855 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9856 pci_vpd_lrdt_size(&vpd_data[i]);
9857
9858 i += PCI_VPD_LRDT_TAG_SIZE;
9859
9860 if (block_end > BNX2X_VPD_LEN)
9861 goto out_not_found;
9862
9863 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9864 PCI_VPD_RO_KEYWORD_MFR_ID);
9865 if (rodi < 0)
9866 goto out_not_found;
9867
9868 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9869
9870 if (len != VENDOR_ID_LEN)
9871 goto out_not_found;
9872
9873 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9874
9875 /* vendor specific info */
9876 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9877 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9878 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9879 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9880
9881 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9882 PCI_VPD_RO_KEYWORD_VENDOR0);
9883 if (rodi >= 0) {
9884 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9885
9886 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9887
9888 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9889 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9890 bp->fw_ver[len] = ' ';
9891 }
9892 }
9893 return;
9894 }
9895out_not_found:
9896 return;
9897}
9898
8898static int __devinit bnx2x_init_bp(struct bnx2x *bp) 9899static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899{ 9900{
8900 int func = BP_FUNC(bp); 9901 int func = BP_FUNC(bp);
@@ -8912,29 +9913,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8912#endif 9913#endif
8913 9914
8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 9915 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9916 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8916 9917
8917 rc = bnx2x_get_hwinfo(bp); 9918 rc = bnx2x_get_hwinfo(bp);
8918 9919
9920 bnx2x_read_fwinfo(bp);
8919 /* need to reset chip if undi was active */ 9921 /* need to reset chip if undi was active */
8920 if (!BP_NOMCP(bp)) 9922 if (!BP_NOMCP(bp))
8921 bnx2x_undi_unload(bp); 9923 bnx2x_undi_unload(bp);
8922 9924
8923 if (CHIP_REV_IS_FPGA(bp)) 9925 if (CHIP_REV_IS_FPGA(bp))
8924 pr_err("FPGA detected\n"); 9926 dev_err(&bp->pdev->dev, "FPGA detected\n");
8925 9927
8926 if (BP_NOMCP(bp) && (func == 0)) 9928 if (BP_NOMCP(bp) && (func == 0))
8927 pr_err("MCP disabled, must load devices in order!\n"); 9929 dev_err(&bp->pdev->dev, "MCP disabled, "
9930 "must load devices in order!\n");
8928 9931
8929 /* Set multi queue mode */ 9932 /* Set multi queue mode */
8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 9933 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 9934 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n"); 9935 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9936 "requested is not MSI-X\n");
8933 multi_mode = ETH_RSS_MODE_DISABLED; 9937 multi_mode = ETH_RSS_MODE_DISABLED;
8934 } 9938 }
8935 bp->multi_mode = multi_mode; 9939 bp->multi_mode = multi_mode;
8936 9940
8937 9941
9942 bp->dev->features |= NETIF_F_GRO;
9943
8938 /* Set TPA flags */ 9944 /* Set TPA flags */
8939 if (disable_tpa) { 9945 if (disable_tpa) {
8940 bp->flags &= ~TPA_ENABLE_FLAG; 9946 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10310,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
9304 bnx2x_release_phy_lock(bp); 10310 bnx2x_release_phy_lock(bp);
9305 } 10311 }
9306 10312
9307 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", 10313 strncpy(info->fw_version, bp->fw_ver, 32);
10314 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10315 "bc %d.%d.%d%s%s",
9308 (bp->common.bc_ver & 0xff0000) >> 16, 10316 (bp->common.bc_ver & 0xff0000) >> 16,
9309 (bp->common.bc_ver & 0xff00) >> 8, 10317 (bp->common.bc_ver & 0xff00) >> 8,
9310 (bp->common.bc_ver & 0xff), 10318 (bp->common.bc_ver & 0xff),
9311 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); 10319 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
9312 strcpy(info->bus_info, pci_name(bp->pdev)); 10320 strcpy(info->bus_info, pci_name(bp->pdev));
9313 info->n_stats = BNX2X_NUM_STATS; 10321 info->n_stats = BNX2X_NUM_STATS;
9314 info->testinfo_len = BNX2X_NUM_TESTS; 10322 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10850,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
9842 return 0; 10850 return 0;
9843} 10851}
9844 10852
9845#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9846static int bnx2x_set_coalesce(struct net_device *dev, 10853static int bnx2x_set_coalesce(struct net_device *dev,
9847 struct ethtool_coalesce *coal) 10854 struct ethtool_coalesce *coal)
9848{ 10855{
9849 struct bnx2x *bp = netdev_priv(dev); 10856 struct bnx2x *bp = netdev_priv(dev);
9850 10857
9851 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 10858 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9852 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT) 10859 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9853 bp->rx_ticks = BNX2X_MAX_COALES_TOUT; 10860 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9854 10861
9855 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 10862 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9856 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT) 10863 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9857 bp->tx_ticks = BNX2X_MAX_COALES_TOUT; 10864 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9858 10865
9859 if (netif_running(dev)) 10866 if (netif_running(dev))
9860 bnx2x_update_coalesce(bp); 10867 bnx2x_update_coalesce(bp);
@@ -9885,6 +10892,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
9885 struct bnx2x *bp = netdev_priv(dev); 10892 struct bnx2x *bp = netdev_priv(dev);
9886 int rc = 0; 10893 int rc = 0;
9887 10894
10895 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10896 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10897 return -EAGAIN;
10898 }
10899
9888 if ((ering->rx_pending > MAX_RX_AVAIL) || 10900 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9889 (ering->tx_pending > MAX_TX_AVAIL) || 10901 (ering->tx_pending > MAX_TX_AVAIL) ||
9890 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 10902 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10982,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9970 int changed = 0; 10982 int changed = 0;
9971 int rc = 0; 10983 int rc = 0;
9972 10984
10985 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10986 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10987 return -EAGAIN;
10988 }
10989
9973 /* TPA requires Rx CSUM offloading */ 10990 /* TPA requires Rx CSUM offloading */
9974 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 10991 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9975 if (!disable_tpa) { 10992 if (!disable_tpa) {
@@ -9986,6 +11003,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9986 changed = 1; 11003 changed = 1;
9987 } 11004 }
9988 11005
11006 if (data & ETH_FLAG_RXHASH)
11007 dev->features |= NETIF_F_RXHASH;
11008 else
11009 dev->features &= ~NETIF_F_RXHASH;
11010
9989 if (changed && netif_running(dev)) { 11011 if (changed && netif_running(dev)) {
9990 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 11012 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9991 rc = bnx2x_nic_load(bp, LOAD_NORMAL); 11013 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
@@ -10006,6 +11028,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006 struct bnx2x *bp = netdev_priv(dev); 11028 struct bnx2x *bp = netdev_priv(dev);
10007 int rc = 0; 11029 int rc = 0;
10008 11030
11031 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11032 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11033 return -EAGAIN;
11034 }
11035
10009 bp->rx_csum = data; 11036 bp->rx_csum = data;
10010 11037
10011 /* Disable TPA, when Rx CSUM is disabled. Otherwise all 11038 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11077,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10050 u32 wr_val = 0; 11077 u32 wr_val = 0;
10051 int port = BP_PORT(bp); 11078 int port = BP_PORT(bp);
10052 static const struct { 11079 static const struct {
10053 u32 offset0; 11080 u32 offset0;
10054 u32 offset1; 11081 u32 offset1;
10055 u32 mask; 11082 u32 mask;
10056 } reg_tbl[] = { 11083 } reg_tbl[] = {
10057/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, 11084/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10058 { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, 11085 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11146,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10119 11146
10120 save_val = REG_RD(bp, offset); 11147 save_val = REG_RD(bp, offset);
10121 11148
10122 REG_WR(bp, offset, wr_val); 11149 REG_WR(bp, offset, (wr_val & mask));
10123 val = REG_RD(bp, offset); 11150 val = REG_RD(bp, offset);
10124 11151
10125 /* Restore the original register's value */ 11152 /* Restore the original register's value */
10126 REG_WR(bp, offset, save_val); 11153 REG_WR(bp, offset, save_val);
10127 11154
10128 /* verify that value is as expected value */ 11155 /* verify value is as expected */
10129 if ((val & mask) != (wr_val & mask)) 11156 if ((val & mask) != (wr_val & mask)) {
11157 DP(NETIF_MSG_PROBE,
11158 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11159 offset, val, wr_val, mask);
10130 goto test_reg_exit; 11160 goto test_reg_exit;
11161 }
10131 } 11162 }
10132 } 11163 }
10133 11164
@@ -10267,8 +11298,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267 11298
10268 bd_prod = TX_BD(fp_tx->tx_bd_prod); 11299 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 11300 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270 mapping = pci_map_single(bp->pdev, skb->data, 11301 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10271 skb_headlen(skb), PCI_DMA_TODEVICE); 11302 skb_headlen(skb), DMA_TO_DEVICE);
10272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11303 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11304 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 11305 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11375,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10344{ 11375{
10345 int rc = 0, res; 11376 int rc = 0, res;
10346 11377
11378 if (BP_NOMCP(bp))
11379 return rc;
11380
10347 if (!netif_running(bp->dev)) 11381 if (!netif_running(bp->dev))
10348 return BNX2X_LOOPBACK_FAILED; 11382 return BNX2X_LOOPBACK_FAILED;
10349 11383
@@ -10391,6 +11425,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
10391 int i, rc; 11425 int i, rc;
10392 u32 magic, crc; 11426 u32 magic, crc;
10393 11427
11428 if (BP_NOMCP(bp))
11429 return 0;
11430
10394 rc = bnx2x_nvram_read(bp, 0, data, 4); 11431 rc = bnx2x_nvram_read(bp, 0, data, 4);
10395 if (rc) { 11432 if (rc) {
10396 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); 11433 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11505,12 @@ static void bnx2x_self_test(struct net_device *dev,
10468{ 11505{
10469 struct bnx2x *bp = netdev_priv(dev); 11506 struct bnx2x *bp = netdev_priv(dev);
10470 11507
11508 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11509 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11510 etest->flags |= ETH_TEST_FL_FAILED;
11511 return;
11512 }
11513
10471 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 11514 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10472 11515
10473 if (!netif_running(dev)) 11516 if (!netif_running(dev))
@@ -10556,7 +11599,11 @@ static const struct {
10556 11599
10557/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, 11600/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10558 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11601 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10559 8, "[%d]: tx_packets" } 11602 8, "[%d]: tx_ucast_packets" },
11603 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11604 8, "[%d]: tx_mcast_packets" },
11605 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11606 8, "[%d]: tx_bcast_packets" }
10560}; 11607};
10561 11608
10562static const struct { 11609static const struct {
@@ -10618,16 +11665,20 @@ static const struct {
10618 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 11665 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10619 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 11666 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10620 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11667 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10621 8, STATS_FLAGS_BOTH, "tx_packets" }, 11668 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11669 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11670 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11671 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11672 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
10622 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 11673 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10623 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 11674 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10624 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 11675 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10625 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 11676 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10626 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 11677/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10627 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 11678 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10628 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 11679 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10629 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 11680 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10630/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 11681 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10631 8, STATS_FLAGS_PORT, "tx_deferred" }, 11682 8, STATS_FLAGS_PORT, "tx_deferred" },
10632 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 11683 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10633 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 11684 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11694,11 @@ static const struct {
10643 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 11694 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10644 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 11695 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10645 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 11696 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10646 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 11697/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10647 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 11698 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10648 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 11699 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10649 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 11700 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10650/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi), 11701 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
10651 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 11702 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10652 { STATS_OFFSET32(pause_frames_sent_hi), 11703 { STATS_OFFSET32(pause_frames_sent_hi),
10653 8, STATS_FLAGS_PORT, "tx_pause_frames" } 11704 8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11715,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664 struct bnx2x *bp = netdev_priv(dev); 11715 struct bnx2x *bp = netdev_priv(dev);
10665 int i, num_stats; 11716 int i, num_stats;
10666 11717
10667 switch(stringset) { 11718 switch (stringset) {
10668 case ETH_SS_STATS: 11719 case ETH_SS_STATS:
10669 if (is_multi(bp)) { 11720 if (is_multi(bp)) {
10670 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 11721 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11944,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10893 break; 11944 break;
10894 11945
10895 case PCI_D3hot: 11946 case PCI_D3hot:
11947 /* If there are other clients above don't
11948 shut down the power */
11949 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11950 return 0;
11951 /* Don't shut down the power for emulation and FPGA */
11952 if (CHIP_REV_IS_SLOW(bp))
11953 return 0;
11954
10896 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 11955 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10897 pmcsr |= 3; 11956 pmcsr |= 3;
10898 11957
@@ -11182,6 +12241,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11182 int i; 12241 int i;
11183 u8 hlen = 0; 12242 u8 hlen = 0;
11184 __le16 pkt_size = 0; 12243 __le16 pkt_size = 0;
12244 struct ethhdr *eth;
12245 u8 mac_type = UNICAST_ADDRESS;
11185 12246
11186#ifdef BNX2X_STOP_ON_ERROR 12247#ifdef BNX2X_STOP_ON_ERROR
11187 if (unlikely(bp->panic)) 12248 if (unlikely(bp->panic))
@@ -11205,6 +12266,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11205 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 12266 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11206 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 12267 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11207 12268
12269 eth = (struct ethhdr *)skb->data;
12270
12271 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12272 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12273 if (is_broadcast_ether_addr(eth->h_dest))
12274 mac_type = BROADCAST_ADDRESS;
12275 else
12276 mac_type = MULTICAST_ADDRESS;
12277 }
12278
11208#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 12279#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11209 /* First, check if we need to linearize the skb (due to FW 12280 /* First, check if we need to linearize the skb (due to FW
11210 restrictions). No need to check fragmentation if page size > 8K 12281 restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12309,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11238 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 12309 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11239 12310
11240 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 12311 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241 tx_start_bd->general_data = (UNICAST_ADDRESS << 12312 tx_start_bd->general_data = (mac_type <<
11242 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 12313 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11243 /* header nbd */ 12314 /* header nbd */
11244 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 12315 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11245 12316
@@ -11314,8 +12385,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11314 } 12385 }
11315 } 12386 }
11316 12387
11317 mapping = pci_map_single(bp->pdev, skb->data, 12388 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11318 skb_headlen(skb), PCI_DMA_TODEVICE); 12389 skb_headlen(skb), DMA_TO_DEVICE);
11319 12390
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12391 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12392 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12443,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11372 if (total_pkt_bd == NULL) 12443 if (total_pkt_bd == NULL)
11373 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 12444 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 12445
11375 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 12446 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11376 frag->size, PCI_DMA_TODEVICE); 12447 frag->page_offset,
12448 frag->size, DMA_TO_DEVICE);
11377 12449
11378 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12450 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12451 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12524,40 @@ static int bnx2x_open(struct net_device *dev)
11452 12524
11453 bnx2x_set_power_state(bp, PCI_D0); 12525 bnx2x_set_power_state(bp, PCI_D0);
11454 12526
12527 if (!bnx2x_reset_is_done(bp)) {
12528 do {
12529 /* Reset MCP mail box sequence if there is on going
12530 * recovery
12531 */
12532 bp->fw_seq = 0;
12533
12534 /* If it's the first function to load and reset done
12535 * is still not cleared it may mean that. We don't
12536 * check the attention state here because it may have
12537 * already been cleared by a "common" reset but we
12538 * shell proceed with "process kill" anyway.
12539 */
12540 if ((bnx2x_get_load_cnt(bp) == 0) &&
12541 bnx2x_trylock_hw_lock(bp,
12542 HW_LOCK_RESOURCE_RESERVED_08) &&
12543 (!bnx2x_leader_reset(bp))) {
12544 DP(NETIF_MSG_HW, "Recovered in open\n");
12545 break;
12546 }
12547
12548 bnx2x_set_power_state(bp, PCI_D3hot);
12549
12550 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12551 " completed yet. Try again later. If u still see this"
12552 " message after a few retries then power cycle is"
12553 " required.\n", bp->dev->name);
12554
12555 return -EAGAIN;
12556 } while (0);
12557 }
12558
12559 bp->recovery_state = BNX2X_RECOVERY_DONE;
12560
11455 return bnx2x_nic_load(bp, LOAD_OPEN); 12561 return bnx2x_nic_load(bp, LOAD_OPEN);
11456} 12562}
11457 12563
@@ -11462,9 +12568,7 @@ static int bnx2x_close(struct net_device *dev)
11462 12568
11463 /* Unload the driver, release IRQs */ 12569 /* Unload the driver, release IRQs */
11464 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 12570 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11465 if (atomic_read(&bp->pdev->enable_cnt) == 1) 12571 bnx2x_set_power_state(bp, PCI_D3hot);
11466 if (!CHIP_REV_IS_SLOW(bp))
11467 bnx2x_set_power_state(bp, PCI_D3hot);
11468 12572
11469 return 0; 12573 return 0;
11470} 12574}
@@ -11494,21 +12598,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11494 else { /* some multicasts */ 12598 else { /* some multicasts */
11495 if (CHIP_IS_E1(bp)) { 12599 if (CHIP_IS_E1(bp)) {
11496 int i, old, offset; 12600 int i, old, offset;
11497 struct dev_mc_list *mclist; 12601 struct netdev_hw_addr *ha;
11498 struct mac_configuration_cmd *config = 12602 struct mac_configuration_cmd *config =
11499 bnx2x_sp(bp, mcast_config); 12603 bnx2x_sp(bp, mcast_config);
11500 12604
11501 i = 0; 12605 i = 0;
11502 netdev_for_each_mc_addr(mclist, dev) { 12606 netdev_for_each_mc_addr(ha, dev) {
11503 config->config_table[i]. 12607 config->config_table[i].
11504 cam_entry.msb_mac_addr = 12608 cam_entry.msb_mac_addr =
11505 swab16(*(u16 *)&mclist->dmi_addr[0]); 12609 swab16(*(u16 *)&ha->addr[0]);
11506 config->config_table[i]. 12610 config->config_table[i].
11507 cam_entry.middle_mac_addr = 12611 cam_entry.middle_mac_addr =
11508 swab16(*(u16 *)&mclist->dmi_addr[2]); 12612 swab16(*(u16 *)&ha->addr[2]);
11509 config->config_table[i]. 12613 config->config_table[i].
11510 cam_entry.lsb_mac_addr = 12614 cam_entry.lsb_mac_addr =
11511 swab16(*(u16 *)&mclist->dmi_addr[4]); 12615 swab16(*(u16 *)&ha->addr[4]);
11512 config->config_table[i].cam_entry.flags = 12616 config->config_table[i].cam_entry.flags =
11513 cpu_to_le16(port); 12617 cpu_to_le16(port);
11514 config->config_table[i]. 12618 config->config_table[i].
@@ -11562,18 +12666,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11562 0); 12666 0);
11563 } else { /* E1H */ 12667 } else { /* E1H */
11564 /* Accept one or more multicasts */ 12668 /* Accept one or more multicasts */
11565 struct dev_mc_list *mclist; 12669 struct netdev_hw_addr *ha;
11566 u32 mc_filter[MC_HASH_SIZE]; 12670 u32 mc_filter[MC_HASH_SIZE];
11567 u32 crc, bit, regidx; 12671 u32 crc, bit, regidx;
11568 int i; 12672 int i;
11569 12673
11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 12674 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571 12675
11572 netdev_for_each_mc_addr(mclist, dev) { 12676 netdev_for_each_mc_addr(ha, dev) {
11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 12677 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574 mclist->dmi_addr); 12678 ha->addr);
11575 12679
11576 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 12680 crc = crc32c_le(0, ha->addr, ETH_ALEN);
11577 bit = (crc >> 24) & 0xff; 12681 bit = (crc >> 24) & 0xff;
11578 regidx = bit >> 5; 12682 regidx = bit >> 5;
11579 bit &= 0x1f; 12683 bit &= 0x1f;
@@ -11690,6 +12794,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11690 struct bnx2x *bp = netdev_priv(dev); 12794 struct bnx2x *bp = netdev_priv(dev);
11691 int rc = 0; 12795 int rc = 0;
11692 12796
12797 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12798 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12799 return -EAGAIN;
12800 }
12801
11693 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 12802 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11694 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 12803 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11695 return -EINVAL; 12804 return -EINVAL;
@@ -11717,7 +12826,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
11717 bnx2x_panic(); 12826 bnx2x_panic();
11718#endif 12827#endif
11719 /* This allows the netif to be shutdown gracefully before resetting */ 12828 /* This allows the netif to be shutdown gracefully before resetting */
11720 schedule_work(&bp->reset_task); 12829 schedule_delayed_work(&bp->reset_task, 0);
11721} 12830}
11722 12831
11723#ifdef BCM_VLAN 12832#ifdef BCM_VLAN
@@ -11789,18 +12898,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11789 12898
11790 rc = pci_enable_device(pdev); 12899 rc = pci_enable_device(pdev);
11791 if (rc) { 12900 if (rc) {
11792 pr_err("Cannot enable PCI device, aborting\n"); 12901 dev_err(&bp->pdev->dev,
12902 "Cannot enable PCI device, aborting\n");
11793 goto err_out; 12903 goto err_out;
11794 } 12904 }
11795 12905
11796 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12906 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11797 pr_err("Cannot find PCI device base address, aborting\n"); 12907 dev_err(&bp->pdev->dev,
12908 "Cannot find PCI device base address, aborting\n");
11798 rc = -ENODEV; 12909 rc = -ENODEV;
11799 goto err_out_disable; 12910 goto err_out_disable;
11800 } 12911 }
11801 12912
11802 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12913 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11803 pr_err("Cannot find second PCI device base address, aborting\n"); 12914 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12915 " base address, aborting\n");
11804 rc = -ENODEV; 12916 rc = -ENODEV;
11805 goto err_out_disable; 12917 goto err_out_disable;
11806 } 12918 }
@@ -11808,7 +12920,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11808 if (atomic_read(&pdev->enable_cnt) == 1) { 12920 if (atomic_read(&pdev->enable_cnt) == 1) {
11809 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12921 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11810 if (rc) { 12922 if (rc) {
11811 pr_err("Cannot obtain PCI resources, aborting\n"); 12923 dev_err(&bp->pdev->dev,
12924 "Cannot obtain PCI resources, aborting\n");
11812 goto err_out_disable; 12925 goto err_out_disable;
11813 } 12926 }
11814 12927
@@ -11818,28 +12931,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11818 12931
11819 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12932 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820 if (bp->pm_cap == 0) { 12933 if (bp->pm_cap == 0) {
11821 pr_err("Cannot find power management capability, aborting\n"); 12934 dev_err(&bp->pdev->dev,
12935 "Cannot find power management capability, aborting\n");
11822 rc = -EIO; 12936 rc = -EIO;
11823 goto err_out_release; 12937 goto err_out_release;
11824 } 12938 }
11825 12939
11826 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 12940 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11827 if (bp->pcie_cap == 0) { 12941 if (bp->pcie_cap == 0) {
11828 pr_err("Cannot find PCI Express capability, aborting\n"); 12942 dev_err(&bp->pdev->dev,
12943 "Cannot find PCI Express capability, aborting\n");
11829 rc = -EIO; 12944 rc = -EIO;
11830 goto err_out_release; 12945 goto err_out_release;
11831 } 12946 }
11832 12947
11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 12948 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
11834 bp->flags |= USING_DAC_FLAG; 12949 bp->flags |= USING_DAC_FLAG;
11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 12950 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n"); 12951 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12952 " failed, aborting\n");
11837 rc = -EIO; 12953 rc = -EIO;
11838 goto err_out_release; 12954 goto err_out_release;
11839 } 12955 }
11840 12956
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 12957 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11842 pr_err("System does not support DMA, aborting\n"); 12958 dev_err(&bp->pdev->dev,
12959 "System does not support DMA, aborting\n");
11843 rc = -EIO; 12960 rc = -EIO;
11844 goto err_out_release; 12961 goto err_out_release;
11845 } 12962 }
@@ -11852,7 +12969,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11852 12969
11853 bp->regview = pci_ioremap_bar(pdev, 0); 12970 bp->regview = pci_ioremap_bar(pdev, 0);
11854 if (!bp->regview) { 12971 if (!bp->regview) {
11855 pr_err("Cannot map register space, aborting\n"); 12972 dev_err(&bp->pdev->dev,
12973 "Cannot map register space, aborting\n");
11856 rc = -ENOMEM; 12974 rc = -ENOMEM;
11857 goto err_out_release; 12975 goto err_out_release;
11858 } 12976 }
@@ -11861,7 +12979,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11861 min_t(u64, BNX2X_DB_SIZE, 12979 min_t(u64, BNX2X_DB_SIZE,
11862 pci_resource_len(pdev, 2))); 12980 pci_resource_len(pdev, 2)));
11863 if (!bp->doorbells) { 12981 if (!bp->doorbells) {
11864 pr_err("Cannot map doorbell space, aborting\n"); 12982 dev_err(&bp->pdev->dev,
12983 "Cannot map doorbell space, aborting\n");
11865 rc = -ENOMEM; 12984 rc = -ENOMEM;
11866 goto err_out_unmap; 12985 goto err_out_unmap;
11867 } 12986 }
@@ -11876,6 +12995,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 12995 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 12996 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11878 12997
12998 /* Reset the load counter */
12999 bnx2x_clear_load_cnt(bp);
13000
11879 dev->watchdog_timeo = TX_TIMEOUT; 13001 dev->watchdog_timeo = TX_TIMEOUT;
11880 13002
11881 dev->netdev_ops = &bnx2x_netdev_ops; 13003 dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13085,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11963 offset = be32_to_cpu(sections[i].offset); 13085 offset = be32_to_cpu(sections[i].offset);
11964 len = be32_to_cpu(sections[i].len); 13086 len = be32_to_cpu(sections[i].len);
11965 if (offset + len > firmware->size) { 13087 if (offset + len > firmware->size) {
11966 pr_err("Section %d length is out of bounds\n", i); 13088 dev_err(&bp->pdev->dev,
13089 "Section %d length is out of bounds\n", i);
11967 return -EINVAL; 13090 return -EINVAL;
11968 } 13091 }
11969 } 13092 }
@@ -11975,7 +13098,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11975 13098
11976 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13099 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11977 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13100 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11978 pr_err("Section offset %d is out of bounds\n", i); 13101 dev_err(&bp->pdev->dev,
13102 "Section offset %d is out of bounds\n", i);
11979 return -EINVAL; 13103 return -EINVAL;
11980 } 13104 }
11981 } 13105 }
@@ -11987,7 +13111,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 13111 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 13112 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 13113 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13114 dev_err(&bp->pdev->dev,
13115 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11991 fw_ver[0], fw_ver[1], fw_ver[2], 13116 fw_ver[0], fw_ver[1], fw_ver[2],
11992 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 13117 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11993 BCM_5710_FW_MINOR_VERSION, 13118 BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13147,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12022 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13147 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12023 tmp = be32_to_cpu(source[j]); 13148 tmp = be32_to_cpu(source[j]);
12024 target[i].op = (tmp >> 24) & 0xff; 13149 target[i].op = (tmp >> 24) & 0xff;
12025 target[i].offset = tmp & 0xffffff; 13150 target[i].offset = tmp & 0xffffff;
12026 target[i].raw_data = be32_to_cpu(source[j+1]); 13151 target[i].raw_data = be32_to_cpu(source[j + 1]);
12027 } 13152 }
12028} 13153}
12029 13154
@@ -12057,20 +13182,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12057 13182
12058 if (CHIP_IS_E1(bp)) 13183 if (CHIP_IS_E1(bp))
12059 fw_file_name = FW_FILE_NAME_E1; 13184 fw_file_name = FW_FILE_NAME_E1;
12060 else 13185 else if (CHIP_IS_E1H(bp))
12061 fw_file_name = FW_FILE_NAME_E1H; 13186 fw_file_name = FW_FILE_NAME_E1H;
13187 else {
13188 dev_err(dev, "Unsupported chip revision\n");
13189 return -EINVAL;
13190 }
12062 13191
12063 pr_info("Loading %s\n", fw_file_name); 13192 dev_info(dev, "Loading %s\n", fw_file_name);
12064 13193
12065 rc = request_firmware(&bp->firmware, fw_file_name, dev); 13194 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066 if (rc) { 13195 if (rc) {
12067 pr_err("Can't load firmware file %s\n", fw_file_name); 13196 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
12068 goto request_firmware_exit; 13197 goto request_firmware_exit;
12069 } 13198 }
12070 13199
12071 rc = bnx2x_check_firmware(bp); 13200 rc = bnx2x_check_firmware(bp);
12072 if (rc) { 13201 if (rc) {
12073 pr_err("Corrupt firmware file %s\n", fw_file_name); 13202 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit; 13203 goto request_firmware_exit;
12075 } 13204 }
12076 13205
@@ -12129,7 +13258,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12129 /* dev zeroed in init_etherdev */ 13258 /* dev zeroed in init_etherdev */
12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 13259 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131 if (!dev) { 13260 if (!dev) {
12132 pr_err("Cannot allocate net device\n"); 13261 dev_err(&pdev->dev, "Cannot allocate net device\n");
12133 return -ENOMEM; 13262 return -ENOMEM;
12134 } 13263 }
12135 13264
@@ -12151,7 +13280,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12151 /* Set init arrays */ 13280 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev); 13281 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 if (rc) { 13282 if (rc) {
12154 pr_err("Error loading firmware\n"); 13283 dev_err(&pdev->dev, "Error loading firmware\n");
12155 goto init_one_exit; 13284 goto init_one_exit;
12156 } 13285 }
12157 13286
@@ -12162,11 +13291,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12162 } 13291 }
12163 13292
12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 13293 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 13294 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
12166 board_info[ent->driver_data].name, 13295 " IRQ %d, ", board_info[ent->driver_data].name,
12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 13296 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 13297 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13298 dev->base_addr, bp->pdev->irq);
13299 pr_cont("node addr %pM\n", dev->dev_addr);
12170 13300
12171 return 0; 13301 return 0;
12172 13302
@@ -12194,13 +13324,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct bnx2x *bp; 13324 struct bnx2x *bp;
12195 13325
12196 if (!dev) { 13326 if (!dev) {
12197 pr_err("BAD net device from bnx2x_init_one\n"); 13327 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12198 return; 13328 return;
12199 } 13329 }
12200 bp = netdev_priv(dev); 13330 bp = netdev_priv(dev);
12201 13331
12202 unregister_netdev(dev); 13332 unregister_netdev(dev);
12203 13333
13334 /* Make sure RESET task is not scheduled before continuing */
13335 cancel_delayed_work_sync(&bp->reset_task);
13336
12204 kfree(bp->init_ops_offsets); 13337 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops); 13338 kfree(bp->init_ops);
12206 kfree(bp->init_data); 13339 kfree(bp->init_data);
@@ -12227,7 +13360,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct bnx2x *bp; 13360 struct bnx2x *bp;
12228 13361
12229 if (!dev) { 13362 if (!dev) {
12230 pr_err("BAD net device from bnx2x_init_one\n"); 13363 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12231 return -ENODEV; 13364 return -ENODEV;
12232 } 13365 }
12233 bp = netdev_priv(dev); 13366 bp = netdev_priv(dev);
@@ -12259,11 +13392,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
12259 int rc; 13392 int rc;
12260 13393
12261 if (!dev) { 13394 if (!dev) {
12262 pr_err("BAD net device from bnx2x_init_one\n"); 13395 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12263 return -ENODEV; 13396 return -ENODEV;
12264 } 13397 }
12265 bp = netdev_priv(dev); 13398 bp = netdev_priv(dev);
12266 13399
13400 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13401 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13402 return -EAGAIN;
13403 }
13404
12267 rtnl_lock(); 13405 rtnl_lock();
12268 13406
12269 pci_restore_state(pdev); 13407 pci_restore_state(pdev);
@@ -12292,6 +13430,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12292 bp->rx_mode = BNX2X_RX_MODE_NONE; 13430 bp->rx_mode = BNX2X_RX_MODE_NONE;
12293 13431
12294 bnx2x_netif_stop(bp, 0); 13432 bnx2x_netif_stop(bp, 0);
13433 netif_carrier_off(bp->dev);
12295 13434
12296 del_timer_sync(&bp->timer); 13435 del_timer_sync(&bp->timer);
12297 bp->stats_state = STATS_STATE_DISABLED; 13436 bp->stats_state = STATS_STATE_DISABLED;
@@ -12318,8 +13457,6 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12318 13457
12319 bp->state = BNX2X_STATE_CLOSED; 13458 bp->state = BNX2X_STATE_CLOSED;
12320 13459
12321 netif_carrier_off(bp->dev);
12322
12323 return 0; 13460 return 0;
12324} 13461}
12325 13462
@@ -12430,6 +13567,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12430 struct net_device *dev = pci_get_drvdata(pdev); 13567 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev); 13568 struct bnx2x *bp = netdev_priv(dev);
12432 13569
13570 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13571 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13572 return;
13573 }
13574
12433 rtnl_lock(); 13575 rtnl_lock();
12434 13576
12435 bnx2x_eeh_recover(bp); 13577 bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e78c81..a1f3bf0cd630 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
767#define MCP_REG_MCPR_NVM_WRITE 0x86408 767#define MCP_REG_MCPR_NVM_WRITE 0x86408
768#define MCP_REG_MCPR_SCRATCH 0xa0000 768#define MCP_REG_MCPR_SCRATCH 0xa0000
769#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
770#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
769/* [R 32] read first 32 bit after inversion of function 0. mapped as 771/* [R 32] read first 32 bit after inversion of function 0. mapped as
770 follows: [0] NIG attention for function0; [1] NIG attention for 772 follows: [0] NIG attention for function0; [1] NIG attention for
771 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; 773 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
1249#define MISC_REG_E1HMF_MODE 0xa5f8 1251#define MISC_REG_E1HMF_MODE 0xa5f8
1250/* [RW 32] Debug only: spare RW register reset by core reset */ 1252/* [RW 32] Debug only: spare RW register reset by core reset */
1251#define MISC_REG_GENERIC_CR_0 0xa460 1253#define MISC_REG_GENERIC_CR_0 0xa460
1254/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474
1252/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1253 these bits is written as a '1'; the corresponding SPIO bit will turn off 1257 these bits is written as a '1'; the corresponding SPIO bit will turn off
1254 it's drivers and become an input. This is the reset state of all GPIO 1258 it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
1438 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ 1442 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1439#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc 1443#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1440/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses 1444/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1441 in this register. addres 0 - timer 1; address - timer 2�address 7 - 1445 in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
1442 timer 8 */ 1446 timer 8 */
1443#define MISC_REG_SW_TIMER_VAL 0xa5c0 1447#define MISC_REG_SW_TIMER_VAL 0xa5c0
1444/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are 1448/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
2407/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means 2411/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
2408 this client is waiting for the arbiter. */ 2412 this client is waiting for the arbiter. */
2409#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 2413#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
2414/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
2415 block. Should be used for close the gates. */
2416#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
2410/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit 2417/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
2411 should update accoring to 'hst_discard_doorbells' register when the state 2418 should update accoring to 'hst_discard_doorbells' register when the state
2412 machine is idle */ 2419 machine is idle */
2413#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 2420#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
2421/* [RW 1] When 1; new internal writes arriving to the block are discarded.
2422 Should be used for close the gates. */
2423#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
2414/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' 2424/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
2415 means this PSWHST is discarding inputs from this client. Each bit should 2425 means this PSWHST is discarding inputs from this client. Each bit should
2416 update accoring to 'hst_discard_internal_writes' register when the state 2426 update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
4422#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4432#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4423#define MISC_REGISTERS_GPIO_SET_POS 8 4433#define MISC_REGISTERS_GPIO_SET_POS 8
4424#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4434#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4435#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
4425#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) 4436#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4437#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
4438#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
4426#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4439#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4427#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4440#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4428#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4441#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
4429#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) 4442#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
4443#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
4444#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
4445#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
4446#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
4447#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
4448#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
4449#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
4430#define MISC_REGISTERS_RESET_REG_2_SET 0x594 4450#define MISC_REGISTERS_RESET_REG_2_SET 0x594
4431#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 4451#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
4432#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) 4452#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
4454#define HW_LOCK_RESOURCE_GPIO 1 4474#define HW_LOCK_RESOURCE_GPIO 1
4455#define HW_LOCK_RESOURCE_MDIO 0 4475#define HW_LOCK_RESOURCE_MDIO 0
4456#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 4476#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4477#define HW_LOCK_RESOURCE_RESERVED_08 8
4457#define HW_LOCK_RESOURCE_SPIO 2 4478#define HW_LOCK_RESOURCE_SPIO 2
4458#define HW_LOCK_RESOURCE_UNDI 5 4479#define HW_LOCK_RESOURCE_UNDI 5
4459#define PRS_FLAG_OVERETH_IPV4 1 4480#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
4474#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) 4495#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
4475#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) 4496#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
4476#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) 4497#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
4498#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
4499#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
4500#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
4501#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
4477#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) 4502#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
4478#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) 4503#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
4479#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) 4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf3cb76..969ffed86b9f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
37static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) 37static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
38{ 38{
39 struct inet6_dev *idev; 39 struct inet6_dev *idev;
40 struct inet6_ifaddr *ifa;
41 40
42 if (!dev) 41 if (!dev)
43 return; 42 return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
47 return; 46 return;
48 47
49 read_lock_bh(&idev->lock); 48 read_lock_bh(&idev->lock);
50 ifa = idev->addr_list; 49 if (!list_empty(&idev->addr_list)) {
51 if (ifa) 50 struct inet6_ifaddr *ifa
51 = list_first_entry(&idev->addr_list,
52 struct inet6_ifaddr, if_list);
52 ipv6_addr_copy(addr, &ifa->addr); 53 ipv6_addr_copy(addr, &ifa->addr);
53 else 54 } else
54 ipv6_addr_set(addr, 0, 0, 0, 0); 55 ipv6_addr_set(addr, 0, 0, 0, 0);
55 56
56 read_unlock_bh(&idev->lock); 57 read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514bf32f..5e12462a9d5e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -59,6 +59,7 @@
59#include <linux/uaccess.h> 59#include <linux/uaccess.h>
60#include <linux/errno.h> 60#include <linux/errno.h>
61#include <linux/netdevice.h> 61#include <linux/netdevice.h>
62#include <linux/netpoll.h>
62#include <linux/inetdevice.h> 63#include <linux/inetdevice.h>
63#include <linux/igmp.h> 64#include <linux/igmp.h>
64#include <linux/etherdevice.h> 65#include <linux/etherdevice.h>
@@ -430,7 +431,18 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
430 } 431 }
431 432
432 skb->priority = 1; 433 skb->priority = 1;
433 dev_queue_xmit(skb); 434#ifdef CONFIG_NET_POLL_CONTROLLER
435 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
436 struct netpoll *np = bond->dev->npinfo->netpoll;
437 slave_dev->npinfo = bond->dev->npinfo;
438 np->real_dev = np->dev = skb->dev;
439 slave_dev->priv_flags |= IFF_IN_NETPOLL;
440 netpoll_send_skb(np, skb);
441 slave_dev->priv_flags &= ~IFF_IN_NETPOLL;
442 np->dev = bond->dev;
443 } else
444#endif
445 dev_queue_xmit(skb);
434 446
435 return 0; 447 return 0;
436} 448}
@@ -762,32 +774,6 @@ static int bond_check_dev_link(struct bonding *bond,
762/*----------------------------- Multicast list ------------------------------*/ 774/*----------------------------- Multicast list ------------------------------*/
763 775
764/* 776/*
765 * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
766 */
767static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
768 const struct dev_mc_list *dmi2)
769{
770 return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
771 dmi1->dmi_addrlen == dmi2->dmi_addrlen;
772}
773
774/*
775 * returns dmi entry if found, NULL otherwise
776 */
777static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
778 struct dev_mc_list *mc_list)
779{
780 struct dev_mc_list *idmi;
781
782 for (idmi = mc_list; idmi; idmi = idmi->next) {
783 if (bond_is_dmi_same(dmi, idmi))
784 return idmi;
785 }
786
787 return NULL;
788}
789
790/*
791 * Push the promiscuity flag down to appropriate slaves 777 * Push the promiscuity flag down to appropriate slaves
792 */ 778 */
793static int bond_set_promiscuity(struct bonding *bond, int inc) 779static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +825,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
839 * Add a Multicast address to slaves 825 * Add a Multicast address to slaves
840 * according to mode 826 * according to mode
841 */ 827 */
842static void bond_mc_add(struct bonding *bond, void *addr, int alen) 828static void bond_mc_add(struct bonding *bond, void *addr)
843{ 829{
844 if (USES_PRIMARY(bond->params.mode)) { 830 if (USES_PRIMARY(bond->params.mode)) {
845 /* write lock already acquired */ 831 /* write lock already acquired */
846 if (bond->curr_active_slave) 832 if (bond->curr_active_slave)
847 dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0); 833 dev_mc_add(bond->curr_active_slave->dev, addr);
848 } else { 834 } else {
849 struct slave *slave; 835 struct slave *slave;
850 int i; 836 int i;
851 837
852 bond_for_each_slave(bond, slave, i) 838 bond_for_each_slave(bond, slave, i)
853 dev_mc_add(slave->dev, addr, alen, 0); 839 dev_mc_add(slave->dev, addr);
854 } 840 }
855} 841}
856 842
@@ -858,18 +844,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
858 * Remove a multicast address from slave 844 * Remove a multicast address from slave
859 * according to mode 845 * according to mode
860 */ 846 */
861static void bond_mc_delete(struct bonding *bond, void *addr, int alen) 847static void bond_mc_del(struct bonding *bond, void *addr)
862{ 848{
863 if (USES_PRIMARY(bond->params.mode)) { 849 if (USES_PRIMARY(bond->params.mode)) {
864 /* write lock already acquired */ 850 /* write lock already acquired */
865 if (bond->curr_active_slave) 851 if (bond->curr_active_slave)
866 dev_mc_delete(bond->curr_active_slave->dev, addr, 852 dev_mc_del(bond->curr_active_slave->dev, addr);
867 alen, 0);
868 } else { 853 } else {
869 struct slave *slave; 854 struct slave *slave;
870 int i; 855 int i;
871 bond_for_each_slave(bond, slave, i) { 856 bond_for_each_slave(bond, slave, i) {
872 dev_mc_delete(slave->dev, addr, alen, 0); 857 dev_mc_del(slave->dev, addr);
873 } 858 }
874 } 859 }
875} 860}
@@ -896,66 +881,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
896} 881}
897 882
898/* 883/*
899 * Totally destroys the mc_list in bond
900 */
901static void bond_mc_list_destroy(struct bonding *bond)
902{
903 struct dev_mc_list *dmi;
904
905 dmi = bond->mc_list;
906 while (dmi) {
907 bond->mc_list = dmi->next;
908 kfree(dmi);
909 dmi = bond->mc_list;
910 }
911
912 bond->mc_list = NULL;
913}
914
915/*
916 * Copy all the Multicast addresses from src to the bonding device dst
917 */
918static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
919 gfp_t gfp_flag)
920{
921 struct dev_mc_list *dmi, *new_dmi;
922
923 for (dmi = mc_list; dmi; dmi = dmi->next) {
924 new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
925
926 if (!new_dmi) {
927 /* FIXME: Potential memory leak !!! */
928 return -ENOMEM;
929 }
930
931 new_dmi->next = bond->mc_list;
932 bond->mc_list = new_dmi;
933 new_dmi->dmi_addrlen = dmi->dmi_addrlen;
934 memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
935 new_dmi->dmi_users = dmi->dmi_users;
936 new_dmi->dmi_gusers = dmi->dmi_gusers;
937 }
938
939 return 0;
940}
941
942/*
943 * flush all members of flush->mc_list from device dev->mc_list 884 * flush all members of flush->mc_list from device dev->mc_list
944 */ 885 */
945static void bond_mc_list_flush(struct net_device *bond_dev, 886static void bond_mc_list_flush(struct net_device *bond_dev,
946 struct net_device *slave_dev) 887 struct net_device *slave_dev)
947{ 888{
948 struct bonding *bond = netdev_priv(bond_dev); 889 struct bonding *bond = netdev_priv(bond_dev);
949 struct dev_mc_list *dmi; 890 struct netdev_hw_addr *ha;
950 891
951 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) 892 netdev_for_each_mc_addr(ha, bond_dev)
952 dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); 893 dev_mc_del(slave_dev, ha->addr);
953 894
954 if (bond->params.mode == BOND_MODE_8023AD) { 895 if (bond->params.mode == BOND_MODE_8023AD) {
955 /* del lacpdu mc addr from mc list */ 896 /* del lacpdu mc addr from mc list */
956 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 897 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
957 898
958 dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0); 899 dev_mc_del(slave_dev, lacpdu_multicast);
959 } 900 }
960} 901}
961 902
@@ -969,7 +910,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
969static void bond_mc_swap(struct bonding *bond, struct slave *new_active, 910static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
970 struct slave *old_active) 911 struct slave *old_active)
971{ 912{
972 struct dev_mc_list *dmi; 913 struct netdev_hw_addr *ha;
973 914
974 if (!USES_PRIMARY(bond->params.mode)) 915 if (!USES_PRIMARY(bond->params.mode))
975 /* nothing to do - mc list is already up-to-date on 916 /* nothing to do - mc list is already up-to-date on
@@ -984,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
984 if (bond->dev->flags & IFF_ALLMULTI) 925 if (bond->dev->flags & IFF_ALLMULTI)
985 dev_set_allmulti(old_active->dev, -1); 926 dev_set_allmulti(old_active->dev, -1);
986 927
987 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) 928 netdev_for_each_mc_addr(ha, bond->dev)
988 dev_mc_delete(old_active->dev, dmi->dmi_addr, 929 dev_mc_del(old_active->dev, ha->addr);
989 dmi->dmi_addrlen, 0);
990 } 930 }
991 931
992 if (new_active) { 932 if (new_active) {
@@ -997,9 +937,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
997 if (bond->dev->flags & IFF_ALLMULTI) 937 if (bond->dev->flags & IFF_ALLMULTI)
998 dev_set_allmulti(new_active->dev, 1); 938 dev_set_allmulti(new_active->dev, 1);
999 939
1000 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) 940 netdev_for_each_mc_addr(ha, bond->dev)
1001 dev_mc_add(new_active->dev, dmi->dmi_addr, 941 dev_mc_add(new_active->dev, ha->addr);
1002 dmi->dmi_addrlen, 0);
1003 bond_resend_igmp_join_requests(bond); 942 bond_resend_igmp_join_requests(bond);
1004 } 943 }
1005} 944}
@@ -1329,6 +1268,61 @@ static void bond_detach_slave(struct bonding *bond, struct slave *slave)
1329 bond->slave_cnt--; 1268 bond->slave_cnt--;
1330} 1269}
1331 1270
1271#ifdef CONFIG_NET_POLL_CONTROLLER
1272/*
1273 * You must hold read lock on bond->lock before calling this.
1274 */
1275static bool slaves_support_netpoll(struct net_device *bond_dev)
1276{
1277 struct bonding *bond = netdev_priv(bond_dev);
1278 struct slave *slave;
1279 int i = 0;
1280 bool ret = true;
1281
1282 bond_for_each_slave(bond, slave, i) {
1283 if ((slave->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
1284 !slave->dev->netdev_ops->ndo_poll_controller)
1285 ret = false;
1286 }
1287 return i != 0 && ret;
1288}
1289
1290static void bond_poll_controller(struct net_device *bond_dev)
1291{
1292 struct net_device *dev = bond_dev->npinfo->netpoll->real_dev;
1293 if (dev != bond_dev)
1294 netpoll_poll_dev(dev);
1295}
1296
1297static void bond_netpoll_cleanup(struct net_device *bond_dev)
1298{
1299 struct bonding *bond = netdev_priv(bond_dev);
1300 struct slave *slave;
1301 const struct net_device_ops *ops;
1302 int i;
1303
1304 read_lock(&bond->lock);
1305 bond_dev->npinfo = NULL;
1306 bond_for_each_slave(bond, slave, i) {
1307 if (slave->dev) {
1308 ops = slave->dev->netdev_ops;
1309 if (ops->ndo_netpoll_cleanup)
1310 ops->ndo_netpoll_cleanup(slave->dev);
1311 else
1312 slave->dev->npinfo = NULL;
1313 }
1314 }
1315 read_unlock(&bond->lock);
1316}
1317
1318#else
1319
1320static void bond_netpoll_cleanup(struct net_device *bond_dev)
1321{
1322}
1323
1324#endif
1325
1332/*---------------------------------- IOCTL ----------------------------------*/ 1326/*---------------------------------- IOCTL ----------------------------------*/
1333 1327
1334static int bond_sethwaddr(struct net_device *bond_dev, 1328static int bond_sethwaddr(struct net_device *bond_dev,
@@ -1411,7 +1405,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1411 struct bonding *bond = netdev_priv(bond_dev); 1405 struct bonding *bond = netdev_priv(bond_dev);
1412 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1406 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1413 struct slave *new_slave = NULL; 1407 struct slave *new_slave = NULL;
1414 struct dev_mc_list *dmi; 1408 struct netdev_hw_addr *ha;
1415 struct sockaddr addr; 1409 struct sockaddr addr;
1416 int link_reporting; 1410 int link_reporting;
1417 int old_features = bond_dev->features; 1411 int old_features = bond_dev->features;
@@ -1485,14 +1479,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1485 bond_dev->name, 1479 bond_dev->name,
1486 bond_dev->type, slave_dev->type); 1480 bond_dev->type, slave_dev->type);
1487 1481
1488 netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE); 1482 res = netdev_bonding_change(bond_dev,
1483 NETDEV_PRE_TYPE_CHANGE);
1484 res = notifier_to_errno(res);
1485 if (res) {
1486 pr_err("%s: refused to change device type\n",
1487 bond_dev->name);
1488 res = -EBUSY;
1489 goto err_undo_flags;
1490 }
1491
1492 /* Flush unicast and multicast addresses */
1493 dev_uc_flush(bond_dev);
1494 dev_mc_flush(bond_dev);
1489 1495
1490 if (slave_dev->type != ARPHRD_ETHER) 1496 if (slave_dev->type != ARPHRD_ETHER)
1491 bond_setup_by_slave(bond_dev, slave_dev); 1497 bond_setup_by_slave(bond_dev, slave_dev);
1492 else 1498 else
1493 ether_setup(bond_dev); 1499 ether_setup(bond_dev);
1494 1500
1495 netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE); 1501 netdev_bonding_change(bond_dev,
1502 NETDEV_POST_TYPE_CHANGE);
1496 } 1503 }
1497 } else if (bond_dev->type != slave_dev->type) { 1504 } else if (bond_dev->type != slave_dev->type) {
1498 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", 1505 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1600,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1593 1600
1594 netif_addr_lock_bh(bond_dev); 1601 netif_addr_lock_bh(bond_dev);
1595 /* upload master's mc_list to new slave */ 1602 /* upload master's mc_list to new slave */
1596 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) 1603 netdev_for_each_mc_addr(ha, bond_dev)
1597 dev_mc_add(slave_dev, dmi->dmi_addr, 1604 dev_mc_add(slave_dev, ha->addr);
1598 dmi->dmi_addrlen, 0);
1599 netif_addr_unlock_bh(bond_dev); 1605 netif_addr_unlock_bh(bond_dev);
1600 } 1606 }
1601 1607
@@ -1603,7 +1609,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1603 /* add lacpdu mc addr to mc list */ 1609 /* add lacpdu mc addr to mc list */
1604 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 1610 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1605 1611
1606 dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0); 1612 dev_mc_add(slave_dev, lacpdu_multicast);
1607 } 1613 }
1608 1614
1609 bond_add_vlans_on_slave(bond, slave_dev); 1615 bond_add_vlans_on_slave(bond, slave_dev);
@@ -1735,6 +1741,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1735 1741
1736 bond_set_carrier(bond); 1742 bond_set_carrier(bond);
1737 1743
1744#ifdef CONFIG_NET_POLL_CONTROLLER
1745 if (slaves_support_netpoll(bond_dev)) {
1746 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1747 if (bond_dev->npinfo)
1748 slave_dev->npinfo = bond_dev->npinfo;
1749 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
1750 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1751 pr_info("New slave device %s does not support netpoll\n",
1752 slave_dev->name);
1753 pr_info("Disabling netpoll support for %s\n", bond_dev->name);
1754 }
1755#endif
1738 read_unlock(&bond->lock); 1756 read_unlock(&bond->lock);
1739 1757
1740 res = bond_create_slave_symlinks(bond_dev, slave_dev); 1758 res = bond_create_slave_symlinks(bond_dev, slave_dev);
@@ -1801,6 +1819,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1801 return -EINVAL; 1819 return -EINVAL;
1802 } 1820 }
1803 1821
1822 netdev_bonding_change(bond_dev, NETDEV_BONDING_DESLAVE);
1804 write_lock_bh(&bond->lock); 1823 write_lock_bh(&bond->lock);
1805 1824
1806 slave = bond_get_slave_by_dev(bond, slave_dev); 1825 slave = bond_get_slave_by_dev(bond, slave_dev);
@@ -1929,6 +1948,17 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1929 1948
1930 netdev_set_master(slave_dev, NULL); 1949 netdev_set_master(slave_dev, NULL);
1931 1950
1951#ifdef CONFIG_NET_POLL_CONTROLLER
1952 read_lock_bh(&bond->lock);
1953 if (slaves_support_netpoll(bond_dev))
1954 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1955 read_unlock_bh(&bond->lock);
1956 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
1957 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
1958 else
1959 slave_dev->npinfo = NULL;
1960#endif
1961
1932 /* close slave before restoring its mac address */ 1962 /* close slave before restoring its mac address */
1933 dev_close(slave_dev); 1963 dev_close(slave_dev);
1934 1964
@@ -3905,10 +3935,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3905 return res; 3935 return res;
3906} 3936}
3907 3937
3938static bool bond_addr_in_mc_list(unsigned char *addr,
3939 struct netdev_hw_addr_list *list,
3940 int addrlen)
3941{
3942 struct netdev_hw_addr *ha;
3943
3944 netdev_hw_addr_list_for_each(ha, list)
3945 if (!memcmp(ha->addr, addr, addrlen))
3946 return true;
3947
3948 return false;
3949}
3950
3908static void bond_set_multicast_list(struct net_device *bond_dev) 3951static void bond_set_multicast_list(struct net_device *bond_dev)
3909{ 3952{
3910 struct bonding *bond = netdev_priv(bond_dev); 3953 struct bonding *bond = netdev_priv(bond_dev);
3911 struct dev_mc_list *dmi; 3954 struct netdev_hw_addr *ha;
3955 bool found;
3912 3956
3913 /* 3957 /*
3914 * Do promisc before checking multicast_mode 3958 * Do promisc before checking multicast_mode
@@ -3943,20 +3987,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
3943 bond->flags = bond_dev->flags; 3987 bond->flags = bond_dev->flags;
3944 3988
3945 /* looking for addresses to add to slaves' mc list */ 3989 /* looking for addresses to add to slaves' mc list */
3946 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { 3990 netdev_for_each_mc_addr(ha, bond_dev) {
3947 if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) 3991 found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
3948 bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen); 3992 bond_dev->addr_len);
3993 if (!found)
3994 bond_mc_add(bond, ha->addr);
3949 } 3995 }
3950 3996
3951 /* looking for addresses to delete from slaves' list */ 3997 /* looking for addresses to delete from slaves' list */
3952 for (dmi = bond->mc_list; dmi; dmi = dmi->next) { 3998 netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
3953 if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) 3999 found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
3954 bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen); 4000 bond_dev->addr_len);
4001 if (!found)
4002 bond_mc_del(bond, ha->addr);
3955 } 4003 }
3956 4004
3957 /* save master's multicast list */ 4005 /* save master's multicast list */
3958 bond_mc_list_destroy(bond); 4006 __hw_addr_flush(&bond->mc_list);
3959 bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); 4007 __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
4008 bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
3960 4009
3961 read_unlock(&bond->lock); 4010 read_unlock(&bond->lock);
3962} 4011}
@@ -4448,6 +4497,10 @@ static const struct net_device_ops bond_netdev_ops = {
4448 .ndo_vlan_rx_register = bond_vlan_rx_register, 4497 .ndo_vlan_rx_register = bond_vlan_rx_register,
4449 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid, 4498 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
4450 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid, 4499 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
4500#ifdef CONFIG_NET_POLL_CONTROLLER
4501 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
4502 .ndo_poll_controller = bond_poll_controller,
4503#endif
4451}; 4504};
4452 4505
4453static void bond_destructor(struct net_device *bond_dev) 4506static void bond_destructor(struct net_device *bond_dev)
@@ -4541,6 +4594,8 @@ static void bond_uninit(struct net_device *bond_dev)
4541{ 4594{
4542 struct bonding *bond = netdev_priv(bond_dev); 4595 struct bonding *bond = netdev_priv(bond_dev);
4543 4596
4597 bond_netpoll_cleanup(bond_dev);
4598
4544 /* Release the bonded slaves */ 4599 /* Release the bonded slaves */
4545 bond_release_all(bond_dev); 4600 bond_release_all(bond_dev);
4546 4601
@@ -4550,9 +4605,7 @@ static void bond_uninit(struct net_device *bond_dev)
4550 4605
4551 bond_remove_proc_entry(bond); 4606 bond_remove_proc_entry(bond);
4552 4607
4553 netif_addr_lock_bh(bond_dev); 4608 __hw_addr_flush(&bond->mc_list);
4554 bond_mc_list_destroy(bond);
4555 netif_addr_unlock_bh(bond_dev);
4556} 4609}
4557 4610
4558/*------------------------- Module initialization ---------------------------*/ 4611/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4736,13 @@ static int bond_check_params(struct bond_params *params)
4683 } 4736 }
4684 4737
4685 if (num_grat_arp < 0 || num_grat_arp > 255) { 4738 if (num_grat_arp < 0 || num_grat_arp > 255) {
4686 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n", 4739 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
4687 num_grat_arp); 4740 num_grat_arp);
4688 num_grat_arp = 1; 4741 num_grat_arp = 1;
4689 } 4742 }
4690 4743
4691 if (num_unsol_na < 0 || num_unsol_na > 255) { 4744 if (num_unsol_na < 0 || num_unsol_na > 255) {
4692 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n", 4745 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4693 num_unsol_na); 4746 num_unsol_na);
4694 num_unsol_na = 1; 4747 num_unsol_na = 1;
4695 } 4748 }
@@ -4924,6 +4977,8 @@ static int bond_init(struct net_device *bond_dev)
4924 list_add_tail(&bond->bond_list, &bn->dev_list); 4977 list_add_tail(&bond->bond_list, &bn->dev_list);
4925 4978
4926 bond_prepare_sysfs_group(bond); 4979 bond_prepare_sysfs_group(bond);
4980
4981 __hw_addr_init(&bond->mc_list);
4927 return 0; 4982 return 0;
4928} 4983}
4929 4984
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4dfce9..2aa336720591 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
202 char proc_file_name[IFNAMSIZ]; 202 char proc_file_name[IFNAMSIZ];
203#endif /* CONFIG_PROC_FS */ 203#endif /* CONFIG_PROC_FS */
204 struct list_head bond_list; 204 struct list_head bond_list;
205 struct dev_mc_list *mc_list; 205 struct netdev_hw_addr_list mc_list;
206 int (*xmit_hash_policy)(struct sk_buff *, int); 206 int (*xmit_hash_policy)(struct sk_buff *, int);
207 __be32 master_ip; 207 __be32 master_ip;
208 u16 flags; 208 u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 000000000000..0b28e0107697
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
1#
2# CAIF physical drivers
3#
4
5if CAIF
6
7comment "CAIF transport drivers"
8
9config CAIF_TTY
10 tristate "CAIF TTY transport driver"
11 default n
12 ---help---
13 The CAIF TTY transport driver is a Line Discipline (ldisc)
14 identified as N_CAIF. When this ldisc is opened from user space
15 it will redirect the TTY's traffic into the CAIF stack.
16
17endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 000000000000..52b6d1f826f8
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1)
2CAIF_DBG_FLAGS := -DDEBUG
3endif
4
5KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
6
7ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
8clean-dirs:= .tmp_versions
9clean-files:= Module.symvers modules.order *.cmd *~ \
10
11# Serial interface
12obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 000000000000..09257ca8f563
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,449 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/init.h>
8#include <linux/version.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17#include <linux/if_arp.h>
18#include <net/caif/caif_device.h>
19#include <net/caif/cfcnfg.h>
20#include <linux/err.h>
21#include <linux/debugfs.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
25MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF);
28
29#define SEND_QUEUE_LOW 10
30#define SEND_QUEUE_HIGH 100
31#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33#define MAX_WRITE_CHUNK 4096
34#define ON 1
35#define OFF 0
36#define CAIF_MAX_MTU 4096
37
38/*This list is protected by the rtnl lock. */
39static LIST_HEAD(ser_list);
40
41static int ser_loop;
42module_param(ser_loop, bool, S_IRUGO);
43MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
44
45static int ser_use_stx = 1;
46module_param(ser_use_stx, bool, S_IRUGO);
47MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
48
49static int ser_use_fcs = 1;
50
51module_param(ser_use_fcs, bool, S_IRUGO);
52MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
53
54static int ser_write_chunk = MAX_WRITE_CHUNK;
55module_param(ser_write_chunk, int, S_IRUGO);
56
57MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
58
59static struct dentry *debugfsdir;
60
61static int caif_net_open(struct net_device *dev);
62static int caif_net_close(struct net_device *dev);
63
64struct ser_device {
65 struct caif_dev_common common;
66 struct list_head node;
67 struct net_device *dev;
68 struct sk_buff_head head;
69 struct tty_struct *tty;
70 bool tx_started;
71 unsigned long state;
72 char *tty_name;
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debugfs_tty_dir;
75 struct debugfs_blob_wrapper tx_blob;
76 struct debugfs_blob_wrapper rx_blob;
77 u8 rx_data[128];
78 u8 tx_data[128];
79 u8 tty_status;
80
81#endif
82};
83
84static void caifdev_setup(struct net_device *dev);
85static void ldisc_tx_wakeup(struct tty_struct *tty);
86#ifdef CONFIG_DEBUG_FS
87static inline void update_tty_status(struct ser_device *ser)
88{
89 ser->tty_status =
90 ser->tty->stopped << 5 |
91 ser->tty->hw_stopped << 4 |
92 ser->tty->flow_stopped << 3 |
93 ser->tty->packet << 2 |
94 ser->tty->low_latency << 1 |
95 ser->tty->warned;
96}
97static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
98{
99 ser->debugfs_tty_dir =
100 debugfs_create_dir(tty->name, debugfsdir);
101 if (!IS_ERR(ser->debugfs_tty_dir)) {
102 debugfs_create_blob("last_tx_msg", S_IRUSR,
103 ser->debugfs_tty_dir,
104 &ser->tx_blob);
105
106 debugfs_create_blob("last_rx_msg", S_IRUSR,
107 ser->debugfs_tty_dir,
108 &ser->rx_blob);
109
110 debugfs_create_x32("ser_state", S_IRUSR,
111 ser->debugfs_tty_dir,
112 (u32 *)&ser->state);
113
114 debugfs_create_x8("tty_status", S_IRUSR,
115 ser->debugfs_tty_dir,
116 &ser->tty_status);
117
118 }
119 ser->tx_blob.data = ser->tx_data;
120 ser->tx_blob.size = 0;
121 ser->rx_blob.data = ser->rx_data;
122 ser->rx_blob.size = 0;
123}
124
125static inline void debugfs_deinit(struct ser_device *ser)
126{
127 debugfs_remove_recursive(ser->debugfs_tty_dir);
128}
129
130static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
131{
132 if (size > sizeof(ser->rx_data))
133 size = sizeof(ser->rx_data);
134 memcpy(ser->rx_data, data, size);
135 ser->rx_blob.data = ser->rx_data;
136 ser->rx_blob.size = size;
137}
138
139static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
140{
141 if (size > sizeof(ser->tx_data))
142 size = sizeof(ser->tx_data);
143 memcpy(ser->tx_data, data, size);
144 ser->tx_blob.data = ser->tx_data;
145 ser->tx_blob.size = size;
146}
147#else
148static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
149{
150}
151
152static inline void debugfs_deinit(struct ser_device *ser)
153{
154}
155
156static inline void update_tty_status(struct ser_device *ser)
157{
158}
159
160static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
161{
162}
163
164static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
165{
166}
167
168#endif
169
170static void ldisc_receive(struct tty_struct *tty, const u8 *data,
171 char *flags, int count)
172{
173 struct sk_buff *skb = NULL;
174 struct ser_device *ser;
175 int ret;
176 u8 *p;
177 ser = tty->disc_data;
178
179 /*
180 * NOTE: flags may contain information about break or overrun.
181 * This is not yet handled.
182 */
183
184
185 /*
186 * Workaround for garbage at start of transmission,
187 * only enable if STX handling is not enabled.
188 */
189 if (!ser->common.use_stx && !ser->tx_started) {
190 dev_info(&ser->dev->dev,
191 "Bytes received before initial transmission -"
192 "bytes discarded.\n");
193 return;
194 }
195
196 BUG_ON(ser->dev == NULL);
197
198 /* Get a suitable caif packet and copy in data. */
199 skb = netdev_alloc_skb(ser->dev, count+1);
200 if (skb == NULL)
201 return;
202 p = skb_put(skb, count);
203 memcpy(p, data, count);
204
205 skb->protocol = htons(ETH_P_CAIF);
206 skb_reset_mac_header(skb);
207 skb->dev = ser->dev;
208 debugfs_rx(ser, data, count);
209 /* Push received packet up the stack. */
210 ret = netif_rx_ni(skb);
211 if (!ret) {
212 ser->dev->stats.rx_packets++;
213 ser->dev->stats.rx_bytes += count;
214 } else
215 ++ser->dev->stats.rx_dropped;
216 update_tty_status(ser);
217}
218
219static int handle_tx(struct ser_device *ser)
220{
221 struct tty_struct *tty;
222 struct sk_buff *skb;
223 int tty_wr, len, room;
224 tty = ser->tty;
225 ser->tx_started = true;
226
227 /* Enter critical section */
228 if (test_and_set_bit(CAIF_SENDING, &ser->state))
229 return 0;
230
231 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
232 while ((skb = skb_peek(&ser->head)) != NULL) {
233
234 /* Make sure you don't write too much */
235 len = skb->len;
236 room = tty_write_room(tty);
237 if (!room)
238 break;
239 if (room > ser_write_chunk)
240 room = ser_write_chunk;
241 if (len > room)
242 len = room;
243
244 /* Write to tty or loopback */
245 if (!ser_loop) {
246 tty_wr = tty->ops->write(tty, skb->data, len);
247 update_tty_status(ser);
248 } else {
249 tty_wr = len;
250 ldisc_receive(tty, skb->data, NULL, len);
251 }
252 ser->dev->stats.tx_packets++;
253 ser->dev->stats.tx_bytes += tty_wr;
254
255 /* Error on TTY ?! */
256 if (tty_wr < 0)
257 goto error;
258 /* Reduce buffer written, and discard if empty */
259 skb_pull(skb, tty_wr);
260 if (skb->len == 0) {
261 struct sk_buff *tmp = skb_dequeue(&ser->head);
262 BUG_ON(tmp != skb);
263 if (in_interrupt())
264 dev_kfree_skb_irq(skb);
265 else
266 kfree_skb(skb);
267 }
268 }
269 /* Send flow off if queue is empty */
270 if (ser->head.qlen <= SEND_QUEUE_LOW &&
271 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
272 ser->common.flowctrl != NULL)
273 ser->common.flowctrl(ser->dev, ON);
274 clear_bit(CAIF_SENDING, &ser->state);
275 return 0;
276error:
277 clear_bit(CAIF_SENDING, &ser->state);
278 return tty_wr;
279}
280
281static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
282{
283 struct ser_device *ser;
284 BUG_ON(dev == NULL);
285 ser = netdev_priv(dev);
286
287 /* Send flow off once, on high water mark */
288 if (ser->head.qlen > SEND_QUEUE_HIGH &&
289 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
290 ser->common.flowctrl != NULL)
291
292 ser->common.flowctrl(ser->dev, OFF);
293
294 skb_queue_tail(&ser->head, skb);
295 return handle_tx(ser);
296}
297
298
299static void ldisc_tx_wakeup(struct tty_struct *tty)
300{
301 struct ser_device *ser;
302 ser = tty->disc_data;
303 BUG_ON(ser == NULL);
304 BUG_ON(ser->tty != tty);
305 handle_tx(ser);
306}
307
308
309static int ldisc_open(struct tty_struct *tty)
310{
311 struct ser_device *ser;
312 struct net_device *dev;
313 char name[64];
314 int result;
315
316 /* No write no play */
317 if (tty->ops->write == NULL)
318 return -EOPNOTSUPP;
319 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
320 return -EPERM;
321
322 sprintf(name, "cf%s", tty->name);
323 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
324 ser = netdev_priv(dev);
325 ser->tty = tty_kref_get(tty);
326 ser->dev = dev;
327 debugfs_init(ser, tty);
328 tty->receive_room = N_TTY_BUF_SIZE;
329 tty->disc_data = ser;
330 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
331 rtnl_lock();
332 result = register_netdevice(dev);
333 if (result) {
334 rtnl_unlock();
335 free_netdev(dev);
336 return -ENODEV;
337 }
338
339 list_add(&ser->node, &ser_list);
340 rtnl_unlock();
341 netif_stop_queue(dev);
342 update_tty_status(ser);
343 return 0;
344}
345
346static void ldisc_close(struct tty_struct *tty)
347{
348 struct ser_device *ser = tty->disc_data;
349 /* Remove may be called inside or outside of rtnl_lock */
350 int islocked = rtnl_is_locked();
351 if (!islocked)
352 rtnl_lock();
353 /* device is freed automagically by net-sysfs */
354 dev_close(ser->dev);
355 unregister_netdevice(ser->dev);
356 list_del(&ser->node);
357 debugfs_deinit(ser);
358 tty_kref_put(ser->tty);
359 if (!islocked)
360 rtnl_unlock();
361}
362
363/* The line discipline structure. */
364static struct tty_ldisc_ops caif_ldisc = {
365 .owner = THIS_MODULE,
366 .magic = TTY_LDISC_MAGIC,
367 .name = "n_caif",
368 .open = ldisc_open,
369 .close = ldisc_close,
370 .receive_buf = ldisc_receive,
371 .write_wakeup = ldisc_tx_wakeup
372};
373
374static int register_ldisc(void)
375{
376 int result;
377 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
378 if (result < 0) {
379 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
380 result);
381 return result;
382 }
383 return result;
384}
385static const struct net_device_ops netdev_ops = {
386 .ndo_open = caif_net_open,
387 .ndo_stop = caif_net_close,
388 .ndo_start_xmit = caif_xmit
389};
390
391static void caifdev_setup(struct net_device *dev)
392{
393 struct ser_device *serdev = netdev_priv(dev);
394 dev->features = 0;
395 dev->netdev_ops = &netdev_ops;
396 dev->type = ARPHRD_CAIF;
397 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
398 dev->mtu = CAIF_MAX_MTU;
399 dev->hard_header_len = CAIF_NEEDED_HEADROOM;
400 dev->tx_queue_len = 0;
401 dev->destructor = free_netdev;
402 skb_queue_head_init(&serdev->head);
403 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
404 serdev->common.use_frag = true;
405 serdev->common.use_stx = ser_use_stx;
406 serdev->common.use_fcs = ser_use_fcs;
407 serdev->dev = dev;
408}
409
410
411static int caif_net_open(struct net_device *dev)
412{
413 struct ser_device *ser;
414 ser = netdev_priv(dev);
415 netif_wake_queue(dev);
416 return 0;
417}
418
419static int caif_net_close(struct net_device *dev)
420{
421 netif_stop_queue(dev);
422 return 0;
423}
424
425static int __init caif_ser_init(void)
426{
427 int ret;
428 ret = register_ldisc();
429 debugfsdir = debugfs_create_dir("caif_serial", NULL);
430 return ret;
431}
432
433static void __exit caif_ser_exit(void)
434{
435 struct ser_device *ser = NULL;
436 struct list_head *node;
437 struct list_head *_tmp;
438 list_for_each_safe(node, _tmp, &ser_list) {
439 ser = list_entry(node, struct ser_device, node);
440 dev_close(ser->dev);
441 unregister_netdevice(ser->dev);
442 list_del(node);
443 }
444 tty_unregister_ldisc(N_CAIF);
445 debugfs_remove_recursive(debugfsdir);
446}
447
448module_init(caif_ser_init);
449module_exit(caif_ser_exit);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a38798a..2d8bd86bc5e2 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/types.h> 36#include <linux/types.h>
37 37
38#include <linux/can.h>
39#include <linux/can/dev.h> 38#include <linux/can/dev.h>
40#include <linux/can/error.h> 39#include <linux/can/error.h>
41 40
@@ -376,7 +375,6 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
376 at91_write(priv, AT91_MCR(mb), reg_mcr); 375 at91_write(priv, AT91_MCR(mb), reg_mcr);
377 376
378 stats->tx_bytes += cf->can_dlc; 377 stats->tx_bytes += cf->can_dlc;
379 dev->trans_start = jiffies;
380 378
381 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */ 379 /* _NOTE_: substract AT91_MB_TX_FIRST offset from mb! */
382 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST); 380 can_put_echo_skb(skb, dev, mb - AT91_MB_TX_FIRST);
@@ -662,7 +660,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
662 at91_poll_err_frame(dev, cf, reg_sr); 660 at91_poll_err_frame(dev, cf, reg_sr);
663 netif_receive_skb(skb); 661 netif_receive_skb(skb);
664 662
665 dev->last_rx = jiffies;
666 dev->stats.rx_packets++; 663 dev->stats.rx_packets++;
667 dev->stats.rx_bytes += cf->can_dlc; 664 dev->stats.rx_bytes += cf->can_dlc;
668 665
@@ -899,7 +896,6 @@ static void at91_irq_err(struct net_device *dev)
899 at91_irq_err_state(dev, cf, new_state); 896 at91_irq_err_state(dev, cf, new_state);
900 netif_rx(skb); 897 netif_rx(skb);
901 898
902 dev->last_rx = jiffies;
903 dev->stats.rx_packets++; 899 dev->stats.rx_packets++;
904 dev->stats.rx_bytes += cf->can_dlc; 900 dev->stats.rx_bytes += cf->can_dlc;
905 901
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 03489864376d..b6e890d28366 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20 20
21#include <linux/can.h>
22#include <linux/can/dev.h> 21#include <linux/can/dev.h>
23#include <linux/can/error.h> 22#include <linux/can/error.h>
24 23
@@ -270,8 +269,6 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
270 /* fill data length code */ 269 /* fill data length code */
271 bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc); 270 bfin_write16(&reg->chl[TRANSMIT_CHL].dlc, dlc);
272 271
273 dev->trans_start = jiffies;
274
275 can_put_echo_skb(skb, dev, 0); 272 can_put_echo_skb(skb, dev, 0);
276 273
277 /* set transmit request */ 274 /* set transmit request */
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108318b4..b11a0cb5ed81 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
58 * 58 *
59 */ 59 */
60 60
61#include <linux/can.h>
62#include <linux/can/core.h> 61#include <linux/can/core.h>
63#include <linux/can/dev.h> 62#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h> 63#include <linux/can/platform/mcp251x.h>
@@ -476,7 +475,6 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
476 475
477 netif_stop_queue(net); 476 netif_stop_queue(net);
478 priv->tx_skb = skb; 477 priv->tx_skb = skb;
479 net->trans_start = jiffies;
480 queue_work(priv->wq, &priv->tx_work); 478 queue_work(priv->wq, &priv->tx_work);
481 479
482 return NETDEV_TX_OK; 480 return NETDEV_TX_OK;
@@ -923,12 +921,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
923 struct net_device *net; 921 struct net_device *net;
924 struct mcp251x_priv *priv; 922 struct mcp251x_priv *priv;
925 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 923 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
924 int model = spi_get_device_id(spi)->driver_data;
926 int ret = -ENODEV; 925 int ret = -ENODEV;
927 926
928 if (!pdata) 927 if (!pdata)
929 /* Platform data is required for osc freq */ 928 /* Platform data is required for osc freq */
930 goto error_out; 929 goto error_out;
931 930
931 if (model)
932 pdata->model = model;
933
932 /* Allocate can/net device */ 934 /* Allocate can/net device */
933 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); 935 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
934 if (!net) { 936 if (!net) {
@@ -1118,6 +1120,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
1118#define mcp251x_can_resume NULL 1120#define mcp251x_can_resume NULL
1119#endif 1121#endif
1120 1122
1123static struct spi_device_id mcp251x_id_table[] = {
1124 { "mcp251x", 0 /* Use pdata.model */ },
1125 { "mcp2510", CAN_MCP251X_MCP2510 },
1126 { "mcp2515", CAN_MCP251X_MCP2515 },
1127 { },
1128};
1129
1130MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
1131
1121static struct spi_driver mcp251x_can_driver = { 1132static struct spi_driver mcp251x_can_driver = {
1122 .driver = { 1133 .driver = {
1123 .name = DEVICE_NAME, 1134 .name = DEVICE_NAME,
@@ -1125,6 +1136,7 @@ static struct spi_driver mcp251x_can_driver = {
1125 .owner = THIS_MODULE, 1136 .owner = THIS_MODULE,
1126 }, 1137 },
1127 1138
1139 .id_table = mcp251x_id_table,
1128 .probe = mcp251x_can_probe, 1140 .probe = mcp251x_can_probe,
1129 .remove = __devexit_p(mcp251x_can_remove), 1141 .remove = __devexit_p(mcp251x_can_remove),
1130 .suspend = mcp251x_can_suspend, 1142 .suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 2120784f8db4..8af8442c694a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h> 28#include <linux/can/dev.h>
30#include <linux/of_platform.h> 29#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 30#include <sysdev/fsl_soc.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd578d417..64c378cd0c34 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
28#include <linux/if_arp.h> 28#include <linux/if_arp.h>
29#include <linux/if_ether.h> 29#include <linux/if_ether.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h> 31#include <linux/can/dev.h>
33#include <linux/can/error.h> 32#include <linux/can/error.h>
34#include <linux/io.h> 33#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d64a318..ae3505afd682 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
53 Driver supports now: 53 Driver supports now:
54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/) 54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
55 - Adlink PCI-7841/cPCI-7841 SE card 55 - Adlink PCI-7841/cPCI-7841 SE card
56 - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
57 - esd CAN-PCI/PMC/266
58 - esd CAN-PCIe/2000
56 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) 59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
57 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) 60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
58
59endif 61endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0bc40c..36f4f9780c30 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/can.h>
28#include <linux/can/dev.h> 27#include <linux/can/dev.h>
29#include <linux/io.h> 28#include <linux/io.h>
30 29
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776a7f59..ed004cebd31f 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/can.h>
40#include <linux/can/dev.h> 39#include <linux/can/dev.h>
41#include <linux/io.h> 40#include <linux/io.h>
42 41
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff4070db96..437b5c716a24 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/can.h>
31#include <linux/can/dev.h> 30#include <linux/can/dev.h>
32#include <linux/io.h> 31#include <linux/io.h>
33 32
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
41MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " 40MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
42 "Adlink PCI-7841/cPCI-7841 SE, " 41 "Adlink PCI-7841/cPCI-7841 SE, "
43 "Marathon CAN-bus-PCI, " 42 "Marathon CAN-bus-PCI, "
44 "TEWS TECHNOLOGIES TPMC810"); 43 "TEWS TECHNOLOGIES TPMC810, "
44 "esd CAN-PCI/CPCI/PCI104/200, "
45 "esd CAN-PCI/PMC/266, "
46 "esd CAN-PCIe/2000")
45MODULE_LICENSE("GPL v2"); 47MODULE_LICENSE("GPL v2");
46 48
47#define PLX_PCI_MAX_CHAN 2 49#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
50 int channels; /* detected channels count */ 52 int channels; /* detected channels count */
51 struct net_device *net_dev[PLX_PCI_MAX_CHAN]; 53 struct net_device *net_dev[PLX_PCI_MAX_CHAN];
52 void __iomem *conf_addr; 54 void __iomem *conf_addr;
55
56 /* Pointer to device-dependent reset function */
57 void (*reset_func)(struct pci_dev *pdev);
53}; 58};
54 59
55#define PLX_PCI_CAN_CLOCK (16000000 / 2) 60#define PLX_PCI_CAN_CLOCK (16000000 / 2)
56 61
57/* PLX90xx registers */ 62/* PLX9030/9050/9052 registers */
58#define PLX_INTCSR 0x4c /* Interrupt Control/Status */ 63#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
59#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response, 64#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
60 * Serial EEPROM, and Initialization 65 * Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
66#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ 71#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
67#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ 72#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
68 73
74/* PLX9056 registers */
75#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
76#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
77
78#define PLX9056_LINTI (1 << 11)
79#define PLX9056_PCI_INT_EN (1 << 8)
80#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
81
69/* 82/*
70 * The board configuration is probably following: 83 * The board configuration is probably following:
71 * RX1 is connected to ground. 84 * RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
101#define ADLINK_PCI_VENDOR_ID 0x144A 114#define ADLINK_PCI_VENDOR_ID 0x144A
102#define ADLINK_PCI_DEVICE_ID 0x7841 115#define ADLINK_PCI_DEVICE_ID 0x7841
103 116
117#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
118#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
119#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
120#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
121#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
122#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
123
104#define MARATHON_PCI_DEVICE_ID 0x2715 124#define MARATHON_PCI_DEVICE_ID 0x2715
105 125
106#define TEWS_PCI_VENDOR_ID 0x1498 126#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
108 128
109static void plx_pci_reset_common(struct pci_dev *pdev); 129static void plx_pci_reset_common(struct pci_dev *pdev);
110static void plx_pci_reset_marathon(struct pci_dev *pdev); 130static void plx_pci_reset_marathon(struct pci_dev *pdev);
131static void plx9056_pci_reset_common(struct pci_dev *pdev);
111 132
112struct plx_pci_channel_map { 133struct plx_pci_channel_map {
113 u32 bar; 134 u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
148 /* based on PLX9052 */ 169 /* based on PLX9052 */
149}; 170};
150 171
172static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
173 "esd CAN-PCI/CPCI/PCI104/200", 2,
174 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
175 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
176 &plx_pci_reset_common
177 /* based on PLX9030/9050 */
178};
179
180static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
181 "esd CAN-PCI/PMC/266", 2,
182 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
183 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
184 &plx9056_pci_reset_common
185 /* based on PLX9056 */
186};
187
188static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
189 "esd CAN-PCIe/2000", 2,
190 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
191 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
192 &plx9056_pci_reset_common
193 /* based on PEX8311 */
194};
195
151static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = { 196static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
152 "Marathon CAN-bus-PCI", 2, 197 "Marathon CAN-bus-PCI", 2,
153 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 198 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
180 (kernel_ulong_t)&plx_pci_card_info_adlink_se 225 (kernel_ulong_t)&plx_pci_card_info_adlink_se
181 }, 226 },
182 { 227 {
228 /* esd CAN-PCI/200 */
229 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
230 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
231 0, 0,
232 (kernel_ulong_t)&plx_pci_card_info_esd200
233 },
234 {
235 /* esd CAN-CPCI/200 */
236 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
237 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
238 0, 0,
239 (kernel_ulong_t)&plx_pci_card_info_esd200
240 },
241 {
242 /* esd CAN-PCI104/200 */
243 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
244 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
245 0, 0,
246 (kernel_ulong_t)&plx_pci_card_info_esd200
247 },
248 {
249 /* esd CAN-PCI/266 */
250 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
251 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
252 0, 0,
253 (kernel_ulong_t)&plx_pci_card_info_esd266
254 },
255 {
256 /* esd CAN-PMC/266 */
257 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
258 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
259 0, 0,
260 (kernel_ulong_t)&plx_pci_card_info_esd266
261 },
262 {
263 /* esd CAN-PCIE/2000 */
264 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
265 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
266 0, 0,
267 (kernel_ulong_t)&plx_pci_card_info_esd2000
268 },
269 {
183 /* Marathon CAN-bus-PCI card */ 270 /* Marathon CAN-bus-PCI card */
184 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, 271 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
185 PCI_ANY_ID, PCI_ANY_ID, 272 PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
242} 329}
243 330
244/* 331/*
245 * PLX90xx software reset 332 * PLX9030/50/52 software reset
246 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired). 333 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
247 * For most cards it's enough for reset the SJA1000 chips. 334 * For most cards it's enough for reset the SJA1000 chips.
248 */ 335 */
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
259 iowrite32(cntrl, card->conf_addr + PLX_CNTRL); 346 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
260}; 347};
261 348
349/*
350 * PLX9056 software reset
351 * Assert LRESET# and reset device(s) on the Local Bus (if wired).
352 */
353static void plx9056_pci_reset_common(struct pci_dev *pdev)
354{
355 struct plx_pci_card *card = pci_get_drvdata(pdev);
356 u32 cntrl;
357
358 /* issue a local bus reset */
359 cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
360 cntrl |= PLX_PCI_RESET;
361 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
362 udelay(100);
363 cntrl ^= PLX_PCI_RESET;
364 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
365
366 /* reload local configuration from EEPROM */
367 cntrl |= PLX9056_PCI_RCR;
368 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
369
370 /*
371 * There is no safe way to poll for the end
372 * of reconfiguration process. Waiting for 10ms
373 * is safe.
374 */
375 mdelay(10);
376
377 cntrl ^= PLX9056_PCI_RCR;
378 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
379};
380
262/* Special reset function for Marathon card */ 381/* Special reset function for Marathon card */
263static void plx_pci_reset_marathon(struct pci_dev *pdev) 382static void plx_pci_reset_marathon(struct pci_dev *pdev)
264{ 383{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
302 free_sja1000dev(dev); 421 free_sja1000dev(dev);
303 } 422 }
304 423
305 plx_pci_reset_common(pdev); 424 card->reset_func(pdev);
306 425
307 /* 426 /*
308 * Disable interrupts from PCI-card (PLX90xx) and disable Local_1, 427 * Disable interrupts from PCI-card and disable local
309 * Local_2 interrupts 428 * interrupts
310 */ 429 */
311 iowrite32(0x0, card->conf_addr + PLX_INTCSR); 430 if (pdev->device != PCI_DEVICE_ID_PLX_9056)
431 iowrite32(0x0, card->conf_addr + PLX_INTCSR);
432 else
433 iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
312 434
313 if (card->conf_addr) 435 if (card->conf_addr)
314 pci_iounmap(pdev, card->conf_addr); 436 pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
367 card->conf_addr = addr + ci->conf_map.offset; 489 card->conf_addr = addr + ci->conf_map.offset;
368 490
369 ci->reset_func(pdev); 491 ci->reset_func(pdev);
492 card->reset_func = ci->reset_func;
370 493
371 /* Detect available channels */ 494 /* Detect available channels */
372 for (i = 0; i < ci->channel_count; i++) { 495 for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
438 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, 561 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
439 * Local_2 interrupts from the SJA1000 chips 562 * Local_2 interrupts from the SJA1000 chips
440 */ 563 */
441 val = ioread32(card->conf_addr + PLX_INTCSR); 564 if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
442 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; 565 val = ioread32(card->conf_addr + PLX_INTCSR);
443 iowrite32(val, card->conf_addr + PLX_INTCSR); 566 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
444 567 val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
568 else
569 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
570 iowrite32(val, card->conf_addr + PLX_INTCSR);
571 } else {
572 iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
573 card->conf_addr + PLX9056_INTCSR);
574 }
445 return 0; 575 return 0;
446 576
447failure_cleanup: 577failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a731a53..85f7cbfe8e5f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
60#include <linux/skbuff.h> 60#include <linux/skbuff.h>
61#include <linux/delay.h> 61#include <linux/delay.h>
62 62
63#include <linux/can.h>
64#include <linux/can/dev.h> 63#include <linux/can/dev.h>
65#include <linux/can/error.h> 64#include <linux/can/error.h>
66 65
@@ -84,6 +83,20 @@ static struct can_bittiming_const sja1000_bittiming_const = {
84 .brp_inc = 1, 83 .brp_inc = 1,
85}; 84};
86 85
86static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
87{
88 unsigned long flags;
89
90 /*
91 * The command register needs some locking and time to settle
92 * the write_reg() operation - especially on SMP systems.
93 */
94 spin_lock_irqsave(&priv->cmdreg_lock, flags);
95 priv->write_reg(priv, REG_CMR, val);
96 priv->read_reg(priv, REG_SR);
97 spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
98}
99
87static int sja1000_probe_chip(struct net_device *dev) 100static int sja1000_probe_chip(struct net_device *dev)
88{ 101{
89 struct sja1000_priv *priv = netdev_priv(dev); 102 struct sja1000_priv *priv = netdev_priv(dev);
@@ -293,11 +306,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
293 for (i = 0; i < dlc; i++) 306 for (i = 0; i < dlc; i++)
294 priv->write_reg(priv, dreg++, cf->data[i]); 307 priv->write_reg(priv, dreg++, cf->data[i]);
295 308
296 dev->trans_start = jiffies;
297
298 can_put_echo_skb(skb, dev, 0); 309 can_put_echo_skb(skb, dev, 0);
299 310
300 priv->write_reg(priv, REG_CMR, CMD_TR); 311 sja1000_write_cmdreg(priv, CMD_TR);
301 312
302 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
303} 314}
@@ -346,7 +357,7 @@ static void sja1000_rx(struct net_device *dev)
346 cf->can_id = id; 357 cf->can_id = id;
347 358
348 /* release receive buffer */ 359 /* release receive buffer */
349 priv->write_reg(priv, REG_CMR, CMD_RRB); 360 sja1000_write_cmdreg(priv, CMD_RRB);
350 361
351 netif_rx(skb); 362 netif_rx(skb);
352 363
@@ -374,7 +385,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
374 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 385 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
375 stats->rx_over_errors++; 386 stats->rx_over_errors++;
376 stats->rx_errors++; 387 stats->rx_errors++;
377 priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */ 388 sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */
378 } 389 }
379 390
380 if (isrc & IRQ_EI) { 391 if (isrc & IRQ_EI) {
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 97a622b9302f..de8e778f6832 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -167,6 +167,7 @@ struct sja1000_priv {
167 167
168 void __iomem *reg_base; /* ioremap'ed address to registers */ 168 void __iomem *reg_base; /* ioremap'ed address to registers */
169 unsigned long irq_flags; /* for request_irq() */ 169 unsigned long irq_flags; /* for request_irq() */
170 spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */
170 171
171 u16 flags; /* custom mode flags */ 172 u16 flags; /* custom mode flags */
172 u8 ocr; /* output control register */ 173 u8 ocr; /* output control register */
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f155962..496223e9e2fc 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/can.h>
27#include <linux/can/dev.h> 26#include <linux/can/dev.h>
28#include <linux/can/platform/sja1000.h> 27#include <linux/can/platform/sja1000.h>
29 28
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 158b76ebf3ea..ac1a83d7c204 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/can.h>
42#include <linux/can/dev.h> 41#include <linux/can/dev.h>
43 42
44#include <linux/of_platform.h> 43#include <linux/of_platform.h>
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c2a05f..d9fadc489b32 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/can.h>
28#include <linux/can/dev.h> 27#include <linux/can/dev.h>
29#include <linux/can/platform/sja1000.h> 28#include <linux/can/platform/sja1000.h>
30#include <linux/io.h> 29#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
37MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); 36MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
38MODULE_LICENSE("GPL v2"); 37MODULE_LICENSE("GPL v2");
39 38
40static u8 sp_read_reg(const struct sja1000_priv *priv, int reg) 39static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
41{ 40{
42 return ioread8(priv->reg_base + reg); 41 return ioread8(priv->reg_base + reg);
43} 42}
44 43
45static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val) 44static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
46{ 45{
47 iowrite8(val, priv->reg_base + reg); 46 iowrite8(val, priv->reg_base + reg);
48} 47}
49 48
49static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
50{
51 return ioread8(priv->reg_base + reg * 2);
52}
53
54static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
55{
56 iowrite8(val, priv->reg_base + reg * 2);
57}
58
59static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
60{
61 return ioread8(priv->reg_base + reg * 4);
62}
63
64static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
65{
66 iowrite8(val, priv->reg_base + reg * 4);
67}
68
50static int sp_probe(struct platform_device *pdev) 69static int sp_probe(struct platform_device *pdev)
51{ 70{
52 int err; 71 int err;
@@ -90,14 +109,29 @@ static int sp_probe(struct platform_device *pdev)
90 priv = netdev_priv(dev); 109 priv = netdev_priv(dev);
91 110
92 dev->irq = res_irq->start; 111 dev->irq = res_irq->start;
93 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; 112 priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
94 priv->reg_base = addr; 113 priv->reg_base = addr;
95 priv->read_reg = sp_read_reg; 114 /* The CAN clock frequency is half the oscillator clock frequency */
96 priv->write_reg = sp_write_reg; 115 priv->can.clock.freq = pdata->osc_freq / 2;
97 priv->can.clock.freq = pdata->clock;
98 priv->ocr = pdata->ocr; 116 priv->ocr = pdata->ocr;
99 priv->cdr = pdata->cdr; 117 priv->cdr = pdata->cdr;
100 118
119 switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
120 case IORESOURCE_MEM_32BIT:
121 priv->read_reg = sp_read_reg32;
122 priv->write_reg = sp_write_reg32;
123 break;
124 case IORESOURCE_MEM_16BIT:
125 priv->read_reg = sp_read_reg16;
126 priv->write_reg = sp_write_reg16;
127 break;
128 case IORESOURCE_MEM_8BIT:
129 default:
130 priv->read_reg = sp_read_reg8;
131 priv->write_reg = sp_write_reg8;
132 break;
133 }
134
101 dev_set_drvdata(&pdev->dev, dev); 135 dev_set_drvdata(&pdev->dev, dev);
102 SET_NETDEV_DEV(dev, &pdev->dev); 136 SET_NETDEV_DEV(dev, &pdev->dev);
103 137
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba0d178..4d07f1ee7168 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49 49
50#include <linux/can.h>
51#include <linux/can/dev.h> 50#include <linux/can/dev.h>
52#include <linux/can/error.h> 51#include <linux/can/error.h>
53#include <linux/can/platform/ti_hecc.h> 52#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 33451092b8e8..1fc0871d2ef7 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -300,8 +300,6 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
300 else if (err) 300 else if (err)
301 dev_err(netdev->dev.parent, 301 dev_err(netdev->dev.parent,
302 "failed resubmitting intr urb: %d\n", err); 302 "failed resubmitting intr urb: %d\n", err);
303
304 return;
305} 303}
306 304
307static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) 305static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -497,8 +495,6 @@ resubmit_urb:
497 else if (retval) 495 else if (retval)
498 dev_err(netdev->dev.parent, 496 dev_err(netdev->dev.parent,
499 "failed resubmitting read bulk urb: %d\n", retval); 497 "failed resubmitting read bulk urb: %d\n", retval);
500
501 return;
502} 498}
503 499
504/* 500/*
@@ -516,8 +512,8 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
516 netdev = dev->netdev; 512 netdev = dev->netdev;
517 513
518 /* free up our allocated buffer */ 514 /* free up our allocated buffer */
519 usb_buffer_free(urb->dev, urb->transfer_buffer_length, 515 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
520 urb->transfer_buffer, urb->transfer_dma); 516 urb->transfer_buffer, urb->transfer_dma);
521 517
522 atomic_dec(&dev->active_tx_urbs); 518 atomic_dec(&dev->active_tx_urbs);
523 519
@@ -614,8 +610,8 @@ static int ems_usb_start(struct ems_usb *dev)
614 return -ENOMEM; 610 return -ENOMEM;
615 } 611 }
616 612
617 buf = usb_buffer_alloc(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, 613 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
618 &urb->transfer_dma); 614 &urb->transfer_dma);
619 if (!buf) { 615 if (!buf) {
620 dev_err(netdev->dev.parent, 616 dev_err(netdev->dev.parent,
621 "No memory left for USB buffer\n"); 617 "No memory left for USB buffer\n");
@@ -635,8 +631,8 @@ static int ems_usb_start(struct ems_usb *dev)
635 netif_device_detach(dev->netdev); 631 netif_device_detach(dev->netdev);
636 632
637 usb_unanchor_urb(urb); 633 usb_unanchor_urb(urb);
638 usb_buffer_free(dev->udev, RX_BUFFER_SIZE, buf, 634 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
639 urb->transfer_dma); 635 urb->transfer_dma);
640 break; 636 break;
641 } 637 }
642 638
@@ -777,7 +773,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
777 goto nomem; 773 goto nomem;
778 } 774 }
779 775
780 buf = usb_buffer_alloc(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); 776 buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
781 if (!buf) { 777 if (!buf) {
782 dev_err(netdev->dev.parent, "No memory left for USB buffer\n"); 778 dev_err(netdev->dev.parent, "No memory left for USB buffer\n");
783 usb_free_urb(urb); 779 usb_free_urb(urb);
@@ -820,7 +816,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
820 */ 816 */
821 if (!context) { 817 if (!context) {
822 usb_unanchor_urb(urb); 818 usb_unanchor_urb(urb);
823 usb_buffer_free(dev->udev, size, buf, urb->transfer_dma); 819 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
824 820
825 dev_warn(netdev->dev.parent, "couldn't find free context\n"); 821 dev_warn(netdev->dev.parent, "couldn't find free context\n");
826 822
@@ -845,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
845 can_free_echo_skb(netdev, context->echo_index); 841 can_free_echo_skb(netdev, context->echo_index);
846 842
847 usb_unanchor_urb(urb); 843 usb_unanchor_urb(urb);
848 usb_buffer_free(dev->udev, size, buf, urb->transfer_dma); 844 usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
849 dev_kfree_skb(skb); 845 dev_kfree_skb(skb);
850 846
851 atomic_dec(&dev->active_tx_urbs); 847 atomic_dec(&dev->active_tx_urbs);
@@ -1006,7 +1002,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1006 1002
1007 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); 1003 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1008 if (!netdev) { 1004 if (!netdev) {
1009 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1005 dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n");
1010 return -ENOMEM; 1006 return -ENOMEM;
1011 } 1007 }
1012 1008
@@ -1036,20 +1032,20 @@ static int ems_usb_probe(struct usb_interface *intf,
1036 1032
1037 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); 1033 dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
1038 if (!dev->intr_urb) { 1034 if (!dev->intr_urb) {
1039 dev_err(netdev->dev.parent, "Couldn't alloc intr URB\n"); 1035 dev_err(&intf->dev, "Couldn't alloc intr URB\n");
1040 goto cleanup_candev; 1036 goto cleanup_candev;
1041 } 1037 }
1042 1038
1043 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); 1039 dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL);
1044 if (!dev->intr_in_buffer) { 1040 if (!dev->intr_in_buffer) {
1045 dev_err(netdev->dev.parent, "Couldn't alloc Intr buffer\n"); 1041 dev_err(&intf->dev, "Couldn't alloc Intr buffer\n");
1046 goto cleanup_intr_urb; 1042 goto cleanup_intr_urb;
1047 } 1043 }
1048 1044
1049 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + 1045 dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE +
1050 sizeof(struct ems_cpc_msg), GFP_KERNEL); 1046 sizeof(struct ems_cpc_msg), GFP_KERNEL);
1051 if (!dev->tx_msg_buffer) { 1047 if (!dev->tx_msg_buffer) {
1052 dev_err(netdev->dev.parent, "Couldn't alloc Tx buffer\n"); 1048 dev_err(&intf->dev, "Couldn't alloc Tx buffer\n");
1053 goto cleanup_intr_in_buffer; 1049 goto cleanup_intr_in_buffer;
1054 } 1050 }
1055 1051
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e4111c..04a03f7003a0 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2889,7 +2889,6 @@ static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev)
2889 */ 2889 */
2890 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) 2890 if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb))
2891 return NETDEV_TX_BUSY; 2891 return NETDEV_TX_BUSY;
2892 dev->trans_start = jiffies;
2893 return NETDEV_TX_OK; 2892 return NETDEV_TX_OK;
2894} 2893}
2895 2894
@@ -2957,20 +2956,20 @@ static void cas_process_mc_list(struct cas *cp)
2957{ 2956{
2958 u16 hash_table[16]; 2957 u16 hash_table[16];
2959 u32 crc; 2958 u32 crc;
2960 struct dev_mc_list *dmi; 2959 struct netdev_hw_addr *ha;
2961 int i = 1; 2960 int i = 1;
2962 2961
2963 memset(hash_table, 0, sizeof(hash_table)); 2962 memset(hash_table, 0, sizeof(hash_table));
2964 netdev_for_each_mc_addr(dmi, cp->dev) { 2963 netdev_for_each_mc_addr(ha, cp->dev) {
2965 if (i <= CAS_MC_EXACT_MATCH_SIZE) { 2964 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2966 /* use the alternate mac address registers for the 2965 /* use the alternate mac address registers for the
2967 * first 15 multicast addresses 2966 * first 15 multicast addresses
2968 */ 2967 */
2969 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 2968 writel((ha->addr[4] << 8) | ha->addr[5],
2970 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 2969 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2971 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 2970 writel((ha->addr[2] << 8) | ha->addr[3],
2972 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 2971 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2973 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 2972 writel((ha->addr[0] << 8) | ha->addr[1],
2974 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 2973 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2975 i++; 2974 i++;
2976 } 2975 }
@@ -2978,7 +2977,7 @@ static void cas_process_mc_list(struct cas *cp)
2978 /* use hw hash table for the next series of 2977 /* use hw hash table for the next series of
2979 * multicast addresses 2978 * multicast addresses
2980 */ 2979 */
2981 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 2980 crc = ether_crc_le(ETH_ALEN, ha->addr);
2982 crc >>= 24; 2981 crc >>= 24;
2983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 2982 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2984 } 2983 }
@@ -4825,7 +4824,7 @@ static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4825 break; 4824 break;
4826 default: 4825 default:
4827 break; 4826 break;
4828 }; 4827 }
4829 4828
4830 mutex_unlock(&cp->pm_mutex); 4829 mutex_unlock(&cp->pm_mutex);
4831 return rc; 4830 return rc;
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9d3948..7dbb16d36fff 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
377 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; 377 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
378 } else if (t1_rx_mode_mc_cnt(rm)) { 378 } else if (t1_rx_mode_mc_cnt(rm)) {
379 /* Accept one or more multicast(s). */ 379 /* Accept one or more multicast(s). */
380 struct dev_mc_list *dmi; 380 struct netdev_hw_addr *ha;
381 int bit; 381 int bit;
382 u16 mc_filter[4] = { 0, }; 382 u16 mc_filter[4] = { 0, };
383 383
384 netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) { 384 netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
385 bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */ 385 /* bit[23:28] */
386 bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
386 mc_filter[bit >> 4] |= 1 << (bit & 0xf); 387 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
387 } 388 }
388 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); 389 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a1410696e..f01cfdb995de 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
162 */ 162 */
163struct cmdQ_ce { 163struct cmdQ_ce {
164 struct sk_buff *skb; 164 struct sk_buff *skb;
165 DECLARE_PCI_UNMAP_ADDR(dma_addr); 165 DEFINE_DMA_UNMAP_ADDR(dma_addr);
166 DECLARE_PCI_UNMAP_LEN(dma_len); 166 DEFINE_DMA_UNMAP_LEN(dma_len);
167}; 167};
168 168
169struct freelQ_ce { 169struct freelQ_ce {
170 struct sk_buff *skb; 170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr); 171 DEFINE_DMA_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len); 172 DEFINE_DMA_UNMAP_LEN(dma_len);
173}; 173};
174 174
175/* 175/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 460
461again: 461again:
462 for (i = 0; i < MAX_NPORTS; i++) { 462 for (i = 0; i < MAX_NPORTS; i++) {
463 s->port = ++s->port & (MAX_NPORTS - 1); 463 s->port = (s->port + 1) & (MAX_NPORTS - 1);
464 skbq = &s->p[s->port].skbq; 464 skbq = &s->p[s->port].skbq;
465 465
466 skb = skb_peek(skbq); 466 skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
518 while (q->credits--) { 518 while (q->credits--) {
519 struct freelQ_ce *ce = &q->centries[cidx]; 519 struct freelQ_ce *ce = &q->centries[cidx];
520 520
521 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 521 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
522 pci_unmap_len(ce, dma_len), 522 dma_unmap_len(ce, dma_len),
523 PCI_DMA_FROMDEVICE); 523 PCI_DMA_FROMDEVICE);
524 dev_kfree_skb(ce->skb); 524 dev_kfree_skb(ce->skb);
525 ce->skb = NULL; 525 ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
633 q->in_use -= n; 633 q->in_use -= n;
634 ce = &q->centries[cidx]; 634 ce = &q->centries[cidx];
635 while (n--) { 635 while (n--) {
636 if (likely(pci_unmap_len(ce, dma_len))) { 636 if (likely(dma_unmap_len(ce, dma_len))) {
637 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 637 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
638 pci_unmap_len(ce, dma_len), 638 dma_unmap_len(ce, dma_len),
639 PCI_DMA_TODEVICE); 639 PCI_DMA_TODEVICE);
640 if (q->sop) 640 if (q->sop)
641 q->sop = 0; 641 q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
851 skb_reserve(skb, sge->rx_pkt_pad); 851 skb_reserve(skb, sge->rx_pkt_pad);
852 852
853 ce->skb = skb; 853 ce->skb = skb;
854 pci_unmap_addr_set(ce, dma_addr, mapping); 854 dma_unmap_addr_set(ce, dma_addr, mapping);
855 pci_unmap_len_set(ce, dma_len, dma_len); 855 dma_unmap_len_set(ce, dma_len, dma_len);
856 e->addr_lo = (u32)mapping; 856 e->addr_lo = (u32)mapping;
857 e->addr_hi = (u64)mapping >> 32; 857 e->addr_hi = (u64)mapping >> 32;
858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1059 skb_reserve(skb, 2); /* align IP header */ 1059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len); 1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev, 1061 pci_dma_sync_single_for_cpu(pdev,
1062 pci_unmap_addr(ce, dma_addr), 1062 dma_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len), 1063 dma_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE); 1064 PCI_DMA_FROMDEVICE);
1065 skb_copy_from_linear_data(ce->skb, skb->data, len); 1065 skb_copy_from_linear_data(ce->skb, skb->data, len);
1066 pci_dma_sync_single_for_device(pdev, 1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr), 1067 dma_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len), 1068 dma_unmap_len(ce, dma_len),
1069 PCI_DMA_FROMDEVICE); 1069 PCI_DMA_FROMDEVICE);
1070 recycle_fl_buf(fl, fl->cidx); 1070 recycle_fl_buf(fl, fl->cidx);
1071 return skb; 1071 return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
1077 return NULL; 1077 return NULL;
1078 } 1078 }
1079 1079
1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1081 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1082 skb = ce->skb; 1082 skb = ce->skb;
1083 prefetch(skb->data); 1083 prefetch(skb->data);
1084 1084
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1100 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1100 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1101 struct sk_buff *skb = ce->skb; 1101 struct sk_buff *skb = ce->skb;
1102 1102
1103 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), 1103 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1104 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1104 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1105 pr_err("%s: unexpected offload packet, cmd %u\n", 1105 pr_err("%s: unexpected offload packet, cmd %u\n",
1106 adapter->name, *skb->data); 1106 adapter->name, *skb->data);
1107 recycle_fl_buf(fl, fl->cidx); 1107 recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1123 1123
1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1126 unsigned int i, len = skb->len - skb->data_len; 1126 unsigned int i, len = skb_headlen(skb);
1127 while (len > SGE_TX_DESC_MAX_PLEN) { 1127 while (len > SGE_TX_DESC_MAX_PLEN) {
1128 count++; 1128 count++;
1129 len -= SGE_TX_DESC_MAX_PLEN; 1129 len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1183 *gen, nfrags == 0 && *desc_len == 0); 1183 *gen, nfrags == 0 && *desc_len == 0);
1184 ce1->skb = NULL; 1184 ce1->skb = NULL;
1185 pci_unmap_len_set(ce1, dma_len, 0); 1185 dma_unmap_len_set(ce1, dma_len, 0);
1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1187 if (*desc_len) { 1187 if (*desc_len) {
1188 ce1++; 1188 ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1219 ce = &q->centries[pidx]; 1219 ce = &q->centries[pidx];
1220 1220
1221 mapping = pci_map_single(adapter->pdev, skb->data, 1221 mapping = pci_map_single(adapter->pdev, skb->data,
1222 skb->len - skb->data_len, PCI_DMA_TODEVICE); 1222 skb_headlen(skb), PCI_DMA_TODEVICE);
1223 1223
1224 desc_mapping = mapping; 1224 desc_mapping = mapping;
1225 desc_len = skb->len - skb->data_len; 1225 desc_len = skb_headlen(skb);
1226 1226
1227 flags = F_CMD_DATAVALID | F_CMD_SOP | 1227 flags = F_CMD_DATAVALID | F_CMD_SOP |
1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1233 e->addr_hi = (u64)desc_mapping >> 32; 1233 e->addr_hi = (u64)desc_mapping >> 32;
1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1235 ce->skb = NULL; 1235 ce->skb = NULL;
1236 pci_unmap_len_set(ce, dma_len, 0); 1236 dma_unmap_len_set(ce, dma_len, 0);
1237 1237
1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1239 desc_len > SGE_TX_DESC_MAX_PLEN) { 1239 desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1257 } 1257 }
1258 1258
1259 ce->skb = NULL; 1259 ce->skb = NULL;
1260 pci_unmap_addr_set(ce, dma_addr, mapping); 1260 dma_unmap_addr_set(ce, dma_addr, mapping);
1261 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); 1261 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1262 1262
1263 for (i = 0; nfrags--; i++) { 1263 for (i = 0; nfrags--; i++) {
1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1284 write_tx_desc(e1, desc_mapping, desc_len, gen, 1284 write_tx_desc(e1, desc_mapping, desc_len, gen,
1285 nfrags == 0); 1285 nfrags == 0);
1286 ce->skb = NULL; 1286 ce->skb = NULL;
1287 pci_unmap_addr_set(ce, dma_addr, mapping); 1287 dma_unmap_addr_set(ce, dma_addr, mapping);
1288 pci_unmap_len_set(ce, dma_len, frag->size); 1288 dma_unmap_len_set(ce, dma_len, frag->size);
1289 } 1289 }
1290 ce->skb = skb; 1290 ce->skb = skb;
1291 wmb(); 1291 wmb();
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4b451a7c03e9..be90d3598bca 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1143,12 +1143,12 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1143 1143
1144 spin_lock_bh(&cp->cnic_ulp_lock); 1144 spin_lock_bh(&cp->cnic_ulp_lock);
1145 if (num_wqes > cnic_kwq_avail(cp) && 1145 if (num_wqes > cnic_kwq_avail(cp) &&
1146 !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) { 1146 !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1147 spin_unlock_bh(&cp->cnic_ulp_lock); 1147 spin_unlock_bh(&cp->cnic_ulp_lock);
1148 return -EAGAIN; 1148 return -EAGAIN;
1149 } 1149 }
1150 1150
1151 cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT; 1151 clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1152 1152
1153 prod = cp->kwq_prod_idx; 1153 prod = cp->kwq_prod_idx;
1154 sw_prod = prod & MAX_KWQ_IDX; 1154 sw_prod = prod & MAX_KWQ_IDX;
@@ -2092,7 +2092,6 @@ end:
2092 i += j; 2092 i += j;
2093 j = 1; 2093 j = 1;
2094 } 2094 }
2095 return;
2096} 2095}
2097 2096
2098static u16 cnic_bnx2_next_idx(u16 idx) 2097static u16 cnic_bnx2_next_idx(u16 idx)
@@ -2146,17 +2145,56 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
2146 return last_cnt; 2145 return last_cnt;
2147} 2146}
2148 2147
2148static int cnic_l2_completion(struct cnic_local *cp)
2149{
2150 u16 hw_cons, sw_cons;
2151 union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2152 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
2153 u32 cmd;
2154 int comp = 0;
2155
2156 if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2157 return 0;
2158
2159 hw_cons = *cp->rx_cons_ptr;
2160 if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2161 hw_cons++;
2162
2163 sw_cons = cp->rx_cons;
2164 while (sw_cons != hw_cons) {
2165 u8 cqe_fp_flags;
2166
2167 cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2168 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2169 if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2170 cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2171 cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2172 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2173 cmd == RAMROD_CMD_ID_ETH_HALT)
2174 comp++;
2175 }
2176 sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2177 }
2178 return comp;
2179}
2180
2149static void cnic_chk_pkt_rings(struct cnic_local *cp) 2181static void cnic_chk_pkt_rings(struct cnic_local *cp)
2150{ 2182{
2151 u16 rx_cons = *cp->rx_cons_ptr; 2183 u16 rx_cons = *cp->rx_cons_ptr;
2152 u16 tx_cons = *cp->tx_cons_ptr; 2184 u16 tx_cons = *cp->tx_cons_ptr;
2185 int comp = 0;
2153 2186
2154 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2187 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2188 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2189 comp = cnic_l2_completion(cp);
2190
2155 cp->tx_cons = tx_cons; 2191 cp->tx_cons = tx_cons;
2156 cp->rx_cons = rx_cons; 2192 cp->rx_cons = rx_cons;
2157 2193
2158 uio_event_notify(cp->cnic_uinfo); 2194 uio_event_notify(cp->cnic_uinfo);
2159 } 2195 }
2196 if (comp)
2197 clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2160} 2198}
2161 2199
2162static int cnic_service_bnx2(void *data, void *status_blk) 2200static int cnic_service_bnx2(void *data, void *status_blk)
@@ -2325,7 +2363,6 @@ done:
2325 status_idx, IGU_INT_ENABLE, 1); 2363 status_idx, IGU_INT_ENABLE, 1);
2326 2364
2327 cp->kcq_prod_idx = sw_prod; 2365 cp->kcq_prod_idx = sw_prod;
2328 return;
2329} 2366}
2330 2367
2331static int cnic_service_bnx2x(void *data, void *status_blk) 2368static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -3692,7 +3729,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
3692 cp->max_kwq_idx = MAX_KWQ_IDX; 3729 cp->max_kwq_idx = MAX_KWQ_IDX;
3693 cp->kwq_prod_idx = 0; 3730 cp->kwq_prod_idx = 0;
3694 cp->kwq_con_idx = 0; 3731 cp->kwq_con_idx = 0;
3695 cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT; 3732 set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
3696 3733
3697 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708) 3734 if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
3698 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15; 3735 cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
@@ -4170,6 +4207,8 @@ static void cnic_init_rings(struct cnic_dev *dev)
4170 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++) 4207 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4171 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]); 4208 CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4172 4209
4210 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4211
4173 cnic_init_bnx2x_tx_ring(dev); 4212 cnic_init_bnx2x_tx_ring(dev);
4174 cnic_init_bnx2x_rx_ring(dev); 4213 cnic_init_bnx2x_rx_ring(dev);
4175 4214
@@ -4177,6 +4216,15 @@ static void cnic_init_rings(struct cnic_dev *dev)
4177 l5_data.phy_address.hi = 0; 4216 l5_data.phy_address.hi = 0;
4178 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 4217 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4179 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4218 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4219 i = 0;
4220 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4221 ++i < 10)
4222 msleep(1);
4223
4224 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4225 netdev_err(dev->netdev,
4226 "iSCSI CLIENT_SETUP did not complete\n");
4227 cnic_kwq_completion(dev, 1);
4180 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 4228 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4181 } 4229 }
4182} 4230}
@@ -4189,14 +4237,25 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4189 struct cnic_local *cp = dev->cnic_priv; 4237 struct cnic_local *cp = dev->cnic_priv;
4190 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4238 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4191 union l5cm_specific_data l5_data; 4239 union l5cm_specific_data l5_data;
4240 int i;
4192 4241
4193 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 4242 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4194 4243
4244 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4245
4195 l5_data.phy_address.lo = cli; 4246 l5_data.phy_address.lo = cli;
4196 l5_data.phy_address.hi = 0; 4247 l5_data.phy_address.hi = 0;
4197 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 4248 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4198 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 4249 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4199 msleep(10); 4250 i = 0;
4251 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4252 ++i < 10)
4253 msleep(1);
4254
4255 if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
4256 netdev_err(dev->netdev,
4257 "iSCSI CLIENT_HALT did not complete\n");
4258 cnic_kwq_completion(dev, 1);
4200 4259
4201 memset(&l5_data, 0, sizeof(l5_data)); 4260 memset(&l5_data, 0, sizeof(l5_data));
4202 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL, 4261 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
@@ -4317,7 +4376,15 @@ static void cnic_stop_hw(struct cnic_dev *dev)
4317{ 4376{
4318 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4377 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
4319 struct cnic_local *cp = dev->cnic_priv; 4378 struct cnic_local *cp = dev->cnic_priv;
4379 int i = 0;
4320 4380
4381 /* Need to wait for the ring shutdown event to complete
4382 * before clearing the CNIC_UP flag.
4383 */
4384 while (cp->uio_dev != -1 && i < 15) {
4385 msleep(100);
4386 i++;
4387 }
4321 clear_bit(CNIC_F_CNIC_UP, &dev->flags); 4388 clear_bit(CNIC_F_CNIC_UP, &dev->flags);
4322 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL); 4389 rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
4323 synchronize_rcu(); 4390 synchronize_rcu();
@@ -4628,7 +4695,6 @@ static void __exit cnic_exit(void)
4628{ 4695{
4629 unregister_netdevice_notifier(&cnic_netdev_notifier); 4696 unregister_netdevice_notifier(&cnic_netdev_notifier);
4630 cnic_release(); 4697 cnic_release();
4631 return;
4632} 4698}
4633 4699
4634module_init(cnic_init); 4700module_init(cnic_init);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a0d853dff983..08b1235d987d 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -179,9 +179,9 @@ struct cnic_local {
179#define ULP_F_CALL_PENDING 2 179#define ULP_F_CALL_PENDING 2
180 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 180 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
181 181
182 /* protected by ulp_lock */ 182 unsigned long cnic_local_flags;
183 u32 cnic_local_flags; 183#define CNIC_LCL_FL_KWQ_INIT 0x0
184#define CNIC_LCL_FL_KWQ_INIT 0x00000001 184#define CNIC_LCL_FL_L2_WAIT 0x1
185 185
186 struct cnic_dev *dev; 186 struct cnic_dev *dev;
187 187
@@ -349,6 +349,10 @@ struct bnx2x_bd_chain_next {
349#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe)) 349#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
350#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1) 350#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
351 351
352#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) == \
353 (BNX2X_MAX_RCQ_DESC_CNT - 1)) ? \
354 ((x) + 2) : ((x) + 1)
355
352#define BNX2X_DEF_SB_ID 16 356#define BNX2X_DEF_SB_ID 16
353 357
354#define BNX2X_ISCSI_RX_SB_INDEX_NUM \ 358#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd90b33..3c58db595285 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
328 328
329static void cpmac_set_multicast_list(struct net_device *dev) 329static void cpmac_set_multicast_list(struct net_device *dev)
330{ 330{
331 struct dev_mc_list *iter; 331 struct netdev_hw_addr *ha;
332 u8 tmp; 332 u8 tmp;
333 u32 mbp, bit, hash[2] = { 0, }; 333 u32 mbp, bit, hash[2] = { 0, };
334 struct cpmac_priv *priv = netdev_priv(dev); 334 struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
348 * cpmac uses some strange mac address hashing 348 * cpmac uses some strange mac address hashing
349 * (not crc32) 349 * (not crc32)
350 */ 350 */
351 netdev_for_each_mc_addr(iter, dev) { 351 netdev_for_each_mc_addr(ha, dev) {
352 bit = 0; 352 bit = 0;
353 tmp = iter->dmi_addr[0]; 353 tmp = ha->addr[0];
354 bit ^= (tmp >> 2) ^ (tmp << 4); 354 bit ^= (tmp >> 2) ^ (tmp << 4);
355 tmp = iter->dmi_addr[1]; 355 tmp = ha->addr[1];
356 bit ^= (tmp >> 4) ^ (tmp << 2); 356 bit ^= (tmp >> 4) ^ (tmp << 2);
357 tmp = iter->dmi_addr[2]; 357 tmp = ha->addr[2];
358 bit ^= (tmp >> 6) ^ tmp; 358 bit ^= (tmp >> 6) ^ tmp;
359 tmp = iter->dmi_addr[3]; 359 tmp = ha->addr[3];
360 bit ^= (tmp >> 2) ^ (tmp << 4); 360 bit ^= (tmp >> 2) ^ (tmp << 4);
361 tmp = iter->dmi_addr[4]; 361 tmp = ha->addr[4];
362 bit ^= (tmp >> 4) ^ (tmp << 2); 362 bit ^= (tmp >> 4) ^ (tmp << 2);
363 tmp = iter->dmi_addr[5]; 363 tmp = ha->addr[5];
364 bit ^= (tmp >> 6) ^ tmp; 364 bit ^= (tmp >> 6) ^ tmp;
365 bit &= 0x3f; 365 bit &= 0x3f;
366 hash[bit / 32] |= 1 << (bit % 32); 366 hash[bit / 32] |= 1 << (bit % 32);
@@ -579,7 +579,6 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
579 } 579 }
580 580
581 spin_lock(&priv->lock); 581 spin_lock(&priv->lock);
582 dev->trans_start = jiffies;
583 spin_unlock(&priv->lock); 582 spin_unlock(&priv->lock);
584 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; 583 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
585 desc->skb = skb; 584 desc->skb = skb;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a33914e96f..7e00027b9f8e 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1108,7 +1108,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
1108 1108
1109 myNextTxDesc->skb = skb; 1109 myNextTxDesc->skb = skb;
1110 1110
1111 dev->trans_start = jiffies; 1111 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1112 1112
1113 e100_hardware_send_packet(np, buf, skb->len); 1113 e100_hardware_send_packet(np, buf, skb->len);
1114 1114
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
1595 } else { 1595 } else {
1596 /* MC mode, receive normal and MC packets */ 1596 /* MC mode, receive normal and MC packets */
1597 char hash_ix; 1597 char hash_ix;
1598 struct dev_mc_list *dmi; 1598 struct netdev_hw_addr *ha;
1599 char *baddr; 1599 char *baddr;
1600 1600
1601 lo_bits = 0x00000000ul; 1601 lo_bits = 0x00000000ul;
1602 hi_bits = 0x00000000ul; 1602 hi_bits = 0x00000000ul;
1603 netdev_for_each_mc_addr(dmi, dev) { 1603 netdev_for_each_mc_addr(ha, dev) {
1604 /* Calculate the hash index for the GA registers */ 1604 /* Calculate the hash index for the GA registers */
1605 1605
1606 hash_ix = 0; 1606 hash_ix = 0;
1607 baddr = dmi->dmi_addr; 1607 baddr = ha->addr;
1608 hash_ix ^= (*baddr) & 0x3f; 1608 hash_ix ^= (*baddr) & 0x3f;
1609 hash_ix ^= ((*baddr) >> 6) & 0x03; 1609 hash_ix ^= ((*baddr) >> 6) & 0x03;
1610 ++baddr; 1610 ++baddr;
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 4c38491b8efb..2ccb9f12805b 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -902,7 +902,6 @@ get_dma_channel(struct net_device *dev)
902 return; 902 return;
903 } 903 }
904 } 904 }
905 return;
906} 905}
907 906
908static void 907static void
@@ -1554,7 +1553,6 @@ static netdev_tx_t net_send_packet(struct sk_buff *skb,struct net_device *dev)
1554 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1); 1553 writewords(dev->base_addr, TX_FRAME_PORT,skb->data,(skb->len+1) >>1);
1555 spin_unlock_irqrestore(&lp->lock, flags); 1554 spin_unlock_irqrestore(&lp->lock, flags);
1556 lp->stats.tx_bytes += skb->len; 1555 lp->stats.tx_bytes += skb->len;
1557 dev->trans_start = jiffies;
1558 dev_kfree_skb (skb); 1556 dev_kfree_skb (skb);
1559 1557
1560 /* 1558 /*
@@ -1673,7 +1671,6 @@ count_rx_errors(int status, struct net_local *lp)
1673 /* per str 172 */ 1671 /* per str 172 */
1674 lp->stats.rx_crc_errors++; 1672 lp->stats.rx_crc_errors++;
1675 if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++; 1673 if (status & RX_DRIBBLE) lp->stats.rx_frame_errors++;
1676 return;
1677} 1674}
1678 1675
1679/* We have a good packet(s), get it/them out of the buffers. */ 1676/* We have a good packet(s), get it/them out of the buffers. */
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 5248f9e0b2f4..35cd36729155 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -934,7 +934,7 @@ static struct cphy_ops xaui_direct_ops = {
934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter, 934int t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
935 int phy_addr, const struct mdio_ops *mdio_ops) 935 int phy_addr, const struct mdio_ops *mdio_ops)
936{ 936{
937 cphy_init(phy, adapter, MDIO_PRTAD_NONE, &xaui_direct_ops, mdio_ops, 937 cphy_init(phy, adapter, phy_addr, &xaui_direct_ops, mdio_ops,
938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP, 938 SUPPORTED_10000baseT_Full | SUPPORTED_AUI | SUPPORTED_TP,
939 "10GBASE-CX4"); 939 "10GBASE-CX4");
940 return 0; 940 return 0;
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index aced6c5e635c..e3f1b8566495 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -439,7 +439,7 @@ static void free_irq_resources(struct adapter *adapter)
439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, 439static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt,
440 unsigned long n) 440 unsigned long n)
441{ 441{
442 int attempts = 5; 442 int attempts = 10;
443 443
444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { 444 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) {
445 if (!--attempts) 445 if (!--attempts)
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 2f3ee721c3e1..f452c4003253 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -207,7 +207,6 @@ again:
207 */ 207 */
208 neigh_event_send(e->neigh, NULL); 208 neigh_event_send(e->neigh, NULL);
209 } 209 }
210 return;
211} 210}
212 211
213EXPORT_SYMBOL(t3_l2t_send_event); 212EXPORT_SYMBOL(t3_l2t_send_event);
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f5..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 struct fl_pg_chunk pg_chunk; 119 struct fl_pg_chunk pg_chunk;
120 }; 120 };
121 DECLARE_PCI_UNMAP_ADDR(dma_addr); 121 DEFINE_DMA_UNMAP_ADDR(dma_addr);
122}; 122};
123 123
124struct rsp_desc { /* response queue descriptor */ 124struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
209 */ 209 */
210 struct dummy { 210 struct dummy {
211 DECLARE_PCI_UNMAP_ADDR(addr); 211 DEFINE_DMA_UNMAP_ADDR(addr);
212 }; 212 };
213 213
214 return sizeof(struct dummy) != 0; 214 return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
363 put_page(d->pg_chunk.page); 363 put_page(d->pg_chunk.page);
364 d->pg_chunk.page = NULL; 364 d->pg_chunk.page = NULL;
365 } else { 365 } else {
366 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 366 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
367 q->buf_size, PCI_DMA_FROMDEVICE); 367 q->buf_size, PCI_DMA_FROMDEVICE);
368 kfree_skb(d->skb); 368 kfree_skb(d->skb);
369 d->skb = NULL; 369 d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
419 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 419 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
420 return -ENOMEM; 420 return -ENOMEM;
421 421
422 pci_unmap_addr_set(sd, dma_addr, mapping); 422 dma_unmap_addr_set(sd, dma_addr, mapping);
423 423
424 d->addr_lo = cpu_to_be32(mapping); 424 d->addr_lo = cpu_to_be32(mapping);
425 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 425 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
515 break; 515 break;
516 } 516 }
517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
518 pci_unmap_addr_set(sd, dma_addr, mapping); 518 dma_unmap_addr_set(sd, dma_addr, mapping);
519 519
520 add_one_rx_chunk(mapping, d, q->gen); 520 add_one_rx_chunk(mapping, d, q->gen);
521 pci_dma_sync_single_for_device(adap->pdev, mapping, 521 pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
791 if (likely(skb != NULL)) { 791 if (likely(skb != NULL)) {
792 __skb_put(skb, len); 792 __skb_put(skb, len);
793 pci_dma_sync_single_for_cpu(adap->pdev, 793 pci_dma_sync_single_for_cpu(adap->pdev,
794 pci_unmap_addr(sd, dma_addr), len, 794 dma_unmap_addr(sd, dma_addr), len,
795 PCI_DMA_FROMDEVICE); 795 PCI_DMA_FROMDEVICE);
796 memcpy(skb->data, sd->skb->data, len); 796 memcpy(skb->data, sd->skb->data, len);
797 pci_dma_sync_single_for_device(adap->pdev, 797 pci_dma_sync_single_for_device(adap->pdev,
798 pci_unmap_addr(sd, dma_addr), len, 798 dma_unmap_addr(sd, dma_addr), len,
799 PCI_DMA_FROMDEVICE); 799 PCI_DMA_FROMDEVICE);
800 } else if (!drop_thres) 800 } else if (!drop_thres)
801 goto use_orig_buf; 801 goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
810 goto recycle; 810 goto recycle;
811 811
812use_orig_buf: 812use_orig_buf:
813 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 813 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
814 fl->buf_size, PCI_DMA_FROMDEVICE); 814 fl->buf_size, PCI_DMA_FROMDEVICE);
815 skb = sd->skb; 815 skb = sd->skb;
816 skb_put(skb, len); 816 skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 struct sk_buff *newskb, *skb; 843 struct sk_buff *newskb, *skb;
844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
845 845
846 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); 846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
847 847
848 newskb = skb = q->pg_skb; 848 newskb = skb = q->pg_skb;
849 if (!skb && (len <= SGE_RX_COPY_THRES)) { 849 if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2097 fl->credits--; 2097 fl->credits--;
2098 2098
2099 pci_dma_sync_single_for_cpu(adap->pdev, 2099 pci_dma_sync_single_for_cpu(adap->pdev,
2100 pci_unmap_addr(sd, dma_addr), 2100 dma_unmap_addr(sd, dma_addr),
2101 fl->buf_size - SGE_PG_RSVD, 2101 fl->buf_size - SGE_PG_RSVD,
2102 PCI_DMA_FROMDEVICE); 2102 PCI_DMA_FROMDEVICE);
2103 2103
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9f..3af19a550372 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
311 if (dev->flags & IFF_ALLMULTI) 311 if (dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff; 312 hash_lo = hash_hi = 0xffffffff;
313 else { 313 else {
314 struct dev_mc_list *dmi; 314 struct netdev_hw_addr *ha;
315 int exact_addr_idx = mac->nucast; 315 int exact_addr_idx = mac->nucast;
316 316
317 hash_lo = hash_hi = 0; 317 hash_lo = hash_hi = 0;
318 netdev_for_each_mc_addr(dmi, dev) 318 netdev_for_each_mc_addr(ha, dev)
319 if (exact_addr_idx < EXACT_ADDR_FILTERS) 319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++, 320 set_addr_filter(mac, exact_addr_idx++,
321 dmi->dmi_addr); 321 ha->addr);
322 else { 322 else {
323 int hash = hash_hw_addr(dmi->dmi_addr); 323 int hash = hash_hw_addr(ha->addr);
324 324
325 if (hash < 32) 325 if (hash < 32)
326 hash_lo |= (1 << hash); 326 hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d8ff4889b56..dd1770e075e6 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -53,7 +53,7 @@
53 53
54enum { 54enum {
55 MAX_NPORTS = 4, /* max # of ports */ 55 MAX_NPORTS = 4, /* max # of ports */
56 SERNUM_LEN = 16, /* Serial # length */ 56 SERNUM_LEN = 24, /* Serial # length */
57 EC_LEN = 16, /* E/C length */ 57 EC_LEN = 16, /* E/C length */
58 ID_LEN = 16, /* ID length */ 58 ID_LEN = 16, /* ID length */
59}; 59};
@@ -477,7 +477,6 @@ struct adapter {
477 struct pci_dev *pdev; 477 struct pci_dev *pdev;
478 struct device *pdev_dev; 478 struct device *pdev_dev;
479 unsigned long registered_device_map; 479 unsigned long registered_device_map;
480 unsigned long open_device_map;
481 unsigned long flags; 480 unsigned long flags;
482 481
483 const char *name; 482 const char *name;
@@ -651,14 +650,11 @@ int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
651 struct link_config *lc); 650 struct link_config *lc);
652int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); 651int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
653int t4_seeprom_wp(struct adapter *adapter, bool enable); 652int t4_seeprom_wp(struct adapter *adapter, bool enable);
654int t4_read_flash(struct adapter *adapter, unsigned int addr,
655 unsigned int nwords, u32 *data, int byte_oriented);
656int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size); 653int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
657int t4_check_fw_version(struct adapter *adapter); 654int t4_check_fw_version(struct adapter *adapter);
658int t4_prep_adapter(struct adapter *adapter); 655int t4_prep_adapter(struct adapter *adapter);
659int t4_port_init(struct adapter *adap, int mbox, int pf, int vf); 656int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
660void t4_fatal_err(struct adapter *adapter); 657void t4_fatal_err(struct adapter *adapter);
661void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
662int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp, 658int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
663 int filter_index, int enable); 659 int filter_index, int enable);
664void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp, 660void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
@@ -709,7 +705,8 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
709int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, 705int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
710 unsigned int vf, unsigned int viid); 706 unsigned int vf, unsigned int viid);
711int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 707int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
712 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok); 708 int mtu, int promisc, int all_multi, int bcast, int vlanex,
709 bool sleep_ok);
713int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox, 710int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
714 unsigned int viid, bool free, unsigned int naddr, 711 unsigned int viid, bool free, unsigned int naddr,
715 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok); 712 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d322..58045b00cf40 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
240 u16 filt_idx[7]; 240 u16 filt_idx[7];
241 const u8 *addr[7]; 241 const u8 *addr[7];
242 int ret, naddr = 0; 242 int ret, naddr = 0;
243 const struct dev_addr_list *d;
244 const struct netdev_hw_addr *ha; 243 const struct netdev_hw_addr *ha;
245 int uc_cnt = netdev_uc_count(dev); 244 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev); 246 const struct port_info *pi = netdev_priv(dev);
247 247
248 /* first do the secondary unicast addresses */ 248 /* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
260 } 260 }
261 261
262 /* next set up the multicast addresses */ 262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(d, dev) { 263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = d->dmi_addr; 264 addr[naddr++] = ha->addr;
265 if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { 265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, 266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep); 267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0) 268 if (ret < 0)
@@ -290,7 +290,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
290 if (ret == 0) 290 if (ret == 0)
291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu, 291 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
292 (dev->flags & IFF_PROMISC) ? 1 : 0, 292 (dev->flags & IFF_PROMISC) ? 1 : 0,
293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, 293 (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
294 sleep_ok); 294 sleep_ok);
295 return ret; 295 return ret;
296} 296}
@@ -311,11 +311,11 @@ static int link_start(struct net_device *dev)
311 * that step explicitly. 311 * that step explicitly.
312 */ 312 */
313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1, 313 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
314 true); 314 pi->vlan_grp != NULL, true);
315 if (ret == 0) { 315 if (ret == 0) {
316 ret = t4_change_mac(pi->adapter, 0, pi->viid, 316 ret = t4_change_mac(pi->adapter, 0, pi->viid,
317 pi->xact_addr_filt, dev->dev_addr, true, 317 pi->xact_addr_filt, dev->dev_addr, true,
318 false); 318 true);
319 if (ret >= 0) { 319 if (ret >= 0) {
320 pi->xact_addr_filt = ret; 320 pi->xact_addr_filt = ret;
321 ret = 0; 321 ret = 0;
@@ -859,6 +859,8 @@ static char stats_strings[][ETH_GSTRING_LEN] = {
859 "RxCsumGood ", 859 "RxCsumGood ",
860 "VLANextractions ", 860 "VLANextractions ",
861 "VLANinsertions ", 861 "VLANinsertions ",
862 "GROpackets ",
863 "GROmerged ",
862}; 864};
863 865
864static int get_sset_count(struct net_device *dev, int sset) 866static int get_sset_count(struct net_device *dev, int sset)
@@ -922,6 +924,8 @@ struct queue_port_stats {
922 u64 rx_csum; 924 u64 rx_csum;
923 u64 vlan_ex; 925 u64 vlan_ex;
924 u64 vlan_ins; 926 u64 vlan_ins;
927 u64 gro_pkts;
928 u64 gro_merged;
925}; 929};
926 930
927static void collect_sge_port_stats(const struct adapter *adap, 931static void collect_sge_port_stats(const struct adapter *adap,
@@ -938,6 +942,8 @@ static void collect_sge_port_stats(const struct adapter *adap,
938 s->rx_csum += rx->stats.rx_cso; 942 s->rx_csum += rx->stats.rx_cso;
939 s->vlan_ex += rx->stats.vlan_ex; 943 s->vlan_ex += rx->stats.vlan_ex;
940 s->vlan_ins += tx->vlan_ins; 944 s->vlan_ins += tx->vlan_ins;
945 s->gro_pkts += rx->stats.lro_pkts;
946 s->gro_merged += rx->stats.lro_merged;
941 } 947 }
942} 948}
943 949
@@ -1711,6 +1717,18 @@ static int set_tso(struct net_device *dev, u32 value)
1711 return 0; 1717 return 0;
1712} 1718}
1713 1719
1720static int set_flags(struct net_device *dev, u32 flags)
1721{
1722 if (flags & ~ETH_FLAG_RXHASH)
1723 return -EOPNOTSUPP;
1724
1725 if (flags & ETH_FLAG_RXHASH)
1726 dev->features |= NETIF_F_RXHASH;
1727 else
1728 dev->features &= ~NETIF_F_RXHASH;
1729 return 0;
1730}
1731
1714static struct ethtool_ops cxgb_ethtool_ops = { 1732static struct ethtool_ops cxgb_ethtool_ops = {
1715 .get_settings = get_settings, 1733 .get_settings = get_settings,
1716 .set_settings = set_settings, 1734 .set_settings = set_settings,
@@ -1741,6 +1759,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
1741 .get_wol = get_wol, 1759 .get_wol = get_wol,
1742 .set_wol = set_wol, 1760 .set_wol = set_wol,
1743 .set_tso = set_tso, 1761 .set_tso = set_tso,
1762 .set_flags = set_flags,
1744 .flash_device = set_flash, 1763 .flash_device = set_flash,
1745}; 1764};
1746 1765
@@ -2308,6 +2327,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
2308 register_netevent_notifier(&cxgb4_netevent_nb); 2327 register_netevent_notifier(&cxgb4_netevent_nb);
2309 netevent_registered = true; 2328 netevent_registered = true;
2310 } 2329 }
2330
2331 if (adap->flags & FULL_INIT_DONE)
2332 ulds[uld].state_change(handle, CXGB4_STATE_UP);
2311} 2333}
2312 2334
2313static void attach_ulds(struct adapter *adap) 2335static void attach_ulds(struct adapter *adap)
@@ -2414,23 +2436,17 @@ EXPORT_SYMBOL(cxgb4_unregister_uld);
2414 */ 2436 */
2415static int cxgb_up(struct adapter *adap) 2437static int cxgb_up(struct adapter *adap)
2416{ 2438{
2417 int err = 0; 2439 int err;
2418 2440
2419 if (!(adap->flags & FULL_INIT_DONE)) { 2441 err = setup_sge_queues(adap);
2420 err = setup_sge_queues(adap); 2442 if (err)
2421 if (err) 2443 goto out;
2422 goto out; 2444 err = setup_rss(adap);
2423 err = setup_rss(adap); 2445 if (err)
2424 if (err) { 2446 goto freeq;
2425 t4_free_sge_resources(adap);
2426 goto out;
2427 }
2428 if (adap->flags & USING_MSIX)
2429 name_msix_vecs(adap);
2430 adap->flags |= FULL_INIT_DONE;
2431 }
2432 2447
2433 if (adap->flags & USING_MSIX) { 2448 if (adap->flags & USING_MSIX) {
2449 name_msix_vecs(adap);
2434 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, 2450 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2435 adap->msix_info[0].desc, adap); 2451 adap->msix_info[0].desc, adap);
2436 if (err) 2452 if (err)
@@ -2451,11 +2467,14 @@ static int cxgb_up(struct adapter *adap)
2451 enable_rx(adap); 2467 enable_rx(adap);
2452 t4_sge_start(adap); 2468 t4_sge_start(adap);
2453 t4_intr_enable(adap); 2469 t4_intr_enable(adap);
2470 adap->flags |= FULL_INIT_DONE;
2454 notify_ulds(adap, CXGB4_STATE_UP); 2471 notify_ulds(adap, CXGB4_STATE_UP);
2455 out: 2472 out:
2456 return err; 2473 return err;
2457 irq_err: 2474 irq_err:
2458 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); 2475 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2476 freeq:
2477 t4_free_sge_resources(adap);
2459 goto out; 2478 goto out;
2460} 2479}
2461 2480
@@ -2471,6 +2490,9 @@ static void cxgb_down(struct adapter *adapter)
2471 } else 2490 } else
2472 free_irq(adapter->pdev->irq, adapter); 2491 free_irq(adapter->pdev->irq, adapter);
2473 quiesce_rx(adapter); 2492 quiesce_rx(adapter);
2493 t4_sge_stop(adapter);
2494 t4_free_sge_resources(adapter);
2495 adapter->flags &= ~FULL_INIT_DONE;
2474} 2496}
2475 2497
2476/* 2498/*
@@ -2482,11 +2504,13 @@ static int cxgb_open(struct net_device *dev)
2482 struct port_info *pi = netdev_priv(dev); 2504 struct port_info *pi = netdev_priv(dev);
2483 struct adapter *adapter = pi->adapter; 2505 struct adapter *adapter = pi->adapter;
2484 2506
2485 if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) 2507 if (!(adapter->flags & FULL_INIT_DONE)) {
2486 return err; 2508 err = cxgb_up(adapter);
2509 if (err < 0)
2510 return err;
2511 }
2487 2512
2488 dev->real_num_tx_queues = pi->nqsets; 2513 dev->real_num_tx_queues = pi->nqsets;
2489 set_bit(pi->tx_chan, &adapter->open_device_map);
2490 link_start(dev); 2514 link_start(dev);
2491 netif_tx_start_all_queues(dev); 2515 netif_tx_start_all_queues(dev);
2492 return 0; 2516 return 0;
@@ -2494,19 +2518,12 @@ static int cxgb_open(struct net_device *dev)
2494 2518
2495static int cxgb_close(struct net_device *dev) 2519static int cxgb_close(struct net_device *dev)
2496{ 2520{
2497 int ret;
2498 struct port_info *pi = netdev_priv(dev); 2521 struct port_info *pi = netdev_priv(dev);
2499 struct adapter *adapter = pi->adapter; 2522 struct adapter *adapter = pi->adapter;
2500 2523
2501 netif_tx_stop_all_queues(dev); 2524 netif_tx_stop_all_queues(dev);
2502 netif_carrier_off(dev); 2525 netif_carrier_off(dev);
2503 ret = t4_enable_vi(adapter, 0, pi->viid, false, false); 2526 return t4_enable_vi(adapter, 0, pi->viid, false, false);
2504
2505 clear_bit(pi->tx_chan, &adapter->open_device_map);
2506
2507 if (!adapter->open_device_map)
2508 cxgb_down(adapter);
2509 return 0;
2510} 2527}
2511 2528
2512static struct net_device_stats *cxgb_get_stats(struct net_device *dev) 2529static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
@@ -2601,7 +2618,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2601 2618
2602 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */ 2619 if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
2603 return -EINVAL; 2620 return -EINVAL;
2604 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, 2621 ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1, -1,
2605 true); 2622 true);
2606 if (!ret) 2623 if (!ret)
2607 dev->mtu = new_mtu; 2624 dev->mtu = new_mtu;
@@ -2632,7 +2649,8 @@ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
2632 struct port_info *pi = netdev_priv(dev); 2649 struct port_info *pi = netdev_priv(dev);
2633 2650
2634 pi->vlan_grp = grp; 2651 pi->vlan_grp = grp;
2635 t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL); 2652 t4_set_rxmode(pi->adapter, 0, pi->viid, -1, -1, -1, -1, grp != NULL,
2653 true);
2636} 2654}
2637 2655
2638#ifdef CONFIG_NET_POLL_CONTROLLER 2656#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3066,6 +3084,12 @@ static void __devinit print_port_info(struct adapter *adap)
3066 3084
3067 int i; 3085 int i;
3068 char buf[80]; 3086 char buf[80];
3087 const char *spd = "";
3088
3089 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3090 spd = " 2.5 GT/s";
3091 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3092 spd = " 5 GT/s";
3069 3093
3070 for_each_port(adap, i) { 3094 for_each_port(adap, i) {
3071 struct net_device *dev = adap->port[i]; 3095 struct net_device *dev = adap->port[i];
@@ -3085,10 +3109,10 @@ static void __devinit print_port_info(struct adapter *adap)
3085 --bufp; 3109 --bufp;
3086 sprintf(bufp, "BASE-%s", base[pi->port_type]); 3110 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3087 3111
3088 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n", 3112 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3089 adap->params.vpd.id, adap->params.rev, 3113 adap->params.vpd.id, adap->params.rev,
3090 buf, is_offload(adap) ? "R" : "", 3114 buf, is_offload(adap) ? "R" : "",
3091 adap->params.pci.width, 3115 adap->params.pci.width, spd,
3092 (adap->flags & USING_MSIX) ? " MSI-X" : 3116 (adap->flags & USING_MSIX) ? " MSI-X" :
3093 (adap->flags & USING_MSI) ? " MSI" : ""); 3117 (adap->flags & USING_MSI) ? " MSI" : "");
3094 if (adap->name == dev->name) 3118 if (adap->name == dev->name)
@@ -3203,7 +3227,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3203 3227
3204 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; 3228 netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
3205 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 3229 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3206 netdev->features |= NETIF_F_GRO | highdma; 3230 netdev->features |= NETIF_F_GRO | NETIF_F_RXHASH | highdma;
3207 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 3231 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3208 netdev->vlan_features = netdev->features & VLAN_FEAT; 3232 netdev->vlan_features = netdev->features & VLAN_FEAT;
3209 3233
@@ -3334,8 +3358,8 @@ static void __devexit remove_one(struct pci_dev *pdev)
3334 if (adapter->debugfs_root) 3358 if (adapter->debugfs_root)
3335 debugfs_remove_recursive(adapter->debugfs_root); 3359 debugfs_remove_recursive(adapter->debugfs_root);
3336 3360
3337 t4_sge_stop(adapter); 3361 if (adapter->flags & FULL_INIT_DONE)
3338 t4_free_sge_resources(adapter); 3362 cxgb_down(adapter);
3339 t4_free_mem(adapter->l2t); 3363 t4_free_mem(adapter->l2t);
3340 t4_free_mem(adapter->tids.tid_tab); 3364 t4_free_mem(adapter->tids.tid_tab);
3341 disable_msi(adapter); 3365 disable_msi(adapter);
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 14adc58e71c3..d1f8f225e45a 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -1471,7 +1471,7 @@ EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
1471 * Releases the pages of a packet gather list. We do not own the last 1471 * Releases the pages of a packet gather list. We do not own the last
1472 * page on the list and do not free it. 1472 * page on the list and do not free it.
1473 */ 1473 */
1474void t4_pktgl_free(const struct pkt_gl *gl) 1474static void t4_pktgl_free(const struct pkt_gl *gl)
1475{ 1475{
1476 int n; 1476 int n;
1477 const skb_frag_t *p; 1477 const skb_frag_t *p;
@@ -1524,6 +1524,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1524 skb->truesize += skb->data_len; 1524 skb->truesize += skb->data_len;
1525 skb->ip_summed = CHECKSUM_UNNECESSARY; 1525 skb->ip_summed = CHECKSUM_UNNECESSARY;
1526 skb_record_rx_queue(skb, rxq->rspq.idx); 1526 skb_record_rx_queue(skb, rxq->rspq.idx);
1527 if (rxq->rspq.netdev->features & NETIF_F_RXHASH)
1528 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1527 1529
1528 if (unlikely(pkt->vlan_ex)) { 1530 if (unlikely(pkt->vlan_ex)) {
1529 struct port_info *pi = netdev_priv(rxq->rspq.netdev); 1531 struct port_info *pi = netdev_priv(rxq->rspq.netdev);
@@ -1565,7 +1567,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1565 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT)) 1567 if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
1566 return handle_trace_pkt(q->adap, si); 1568 return handle_trace_pkt(q->adap, si);
1567 1569
1568 pkt = (void *)&rsp[1]; 1570 pkt = (const struct cpl_rx_pkt *)rsp;
1569 csum_ok = pkt->csum_calc && !pkt->err_vec; 1571 csum_ok = pkt->csum_calc && !pkt->err_vec;
1570 if ((pkt->l2info & htonl(RXF_TCP)) && 1572 if ((pkt->l2info & htonl(RXF_TCP)) &&
1571 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { 1573 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
@@ -1583,6 +1585,9 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
1583 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */ 1585 __skb_pull(skb, RX_PKT_PAD); /* remove ethernet header padding */
1584 skb->protocol = eth_type_trans(skb, q->netdev); 1586 skb->protocol = eth_type_trans(skb, q->netdev);
1585 skb_record_rx_queue(skb, q->idx); 1587 skb_record_rx_queue(skb, q->idx);
1588 if (skb->dev->features & NETIF_F_RXHASH)
1589 skb->rxhash = (__force u32)pkt->rsshdr.hash_val;
1590
1586 pi = netdev_priv(skb->dev); 1591 pi = netdev_priv(skb->dev);
1587 rxq->stats.pkts++; 1592 rxq->stats.pkts++;
1588 1593
@@ -2047,7 +2052,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2047 adap->sge.ingr_map[iq->cntxt_id] = iq; 2052 adap->sge.ingr_map[iq->cntxt_id] = iq;
2048 2053
2049 if (fl) { 2054 if (fl) {
2050 fl->cntxt_id = htons(c.fl0id); 2055 fl->cntxt_id = ntohs(c.fl0id);
2051 fl->avail = fl->pend_cred = 0; 2056 fl->avail = fl->pend_cred = 0;
2052 fl->pidx = fl->cidx = 0; 2057 fl->pidx = fl->cidx = 0;
2053 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; 2058 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index a814a3afe123..da272a98fdbc 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -53,8 +53,8 @@
53 * at the time it indicated completion is stored there. Returns 0 if the 53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise. 54 * operation completes and -EAGAIN otherwise.
55 */ 55 */
56int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask, 56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp) 57 int polarity, int attempts, int delay, u32 *valp)
58{ 58{
59 while (1) { 59 while (1) {
60 u32 val = t4_read_reg(adapter, reg); 60 u32 val = t4_read_reg(adapter, reg);
@@ -109,9 +109,9 @@ void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
109 * Reads registers that are accessed indirectly through an address/data 109 * Reads registers that are accessed indirectly through an address/data
110 * register pair. 110 * register pair.
111 */ 111 */
112void t4_read_indirect(struct adapter *adap, unsigned int addr_reg, 112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals, unsigned int nregs, 113 unsigned int data_reg, u32 *vals,
114 unsigned int start_idx) 114 unsigned int nregs, unsigned int start_idx)
115{ 115{
116 while (nregs--) { 116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx); 117 t4_write_reg(adap, addr_reg, start_idx);
@@ -120,6 +120,7 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
120 } 120 }
121} 121}
122 122
123#if 0
123/** 124/**
124 * t4_write_indirect - write indirectly addressed registers 125 * t4_write_indirect - write indirectly addressed registers
125 * @adap: the adapter 126 * @adap: the adapter
@@ -132,15 +133,16 @@ void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
132 * Writes a sequential block of registers that are accessed indirectly 133 * Writes a sequential block of registers that are accessed indirectly
133 * through an address/data register pair. 134 * through an address/data register pair.
134 */ 135 */
135void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, 136static void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
136 unsigned int data_reg, const u32 *vals, 137 unsigned int data_reg, const u32 *vals,
137 unsigned int nregs, unsigned int start_idx) 138 unsigned int nregs, unsigned int start_idx)
138{ 139{
139 while (nregs--) { 140 while (nregs--) {
140 t4_write_reg(adap, addr_reg, start_idx++); 141 t4_write_reg(adap, addr_reg, start_idx++);
141 t4_write_reg(adap, data_reg, *vals++); 142 t4_write_reg(adap, data_reg, *vals++);
142 } 143 }
143} 144}
145#endif
144 146
145/* 147/*
146 * Get the reply to a mailbox command and store it in @rpl in big-endian order. 148 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
@@ -345,33 +347,21 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
345 return 0; 347 return 0;
346} 348}
347 349
348#define VPD_ENTRY(name, len) \
349 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
350
351/* 350/*
352 * Partial EEPROM Vital Product Data structure. Includes only the ID and 351 * Partial EEPROM Vital Product Data structure. Includes only the ID and
353 * VPD-R sections. 352 * VPD-R header.
354 */ 353 */
355struct t4_vpd { 354struct t4_vpd_hdr {
356 u8 id_tag; 355 u8 id_tag;
357 u8 id_len[2]; 356 u8 id_len[2];
358 u8 id_data[ID_LEN]; 357 u8 id_data[ID_LEN];
359 u8 vpdr_tag; 358 u8 vpdr_tag;
360 u8 vpdr_len[2]; 359 u8 vpdr_len[2];
361 VPD_ENTRY(pn, 16); /* part number */
362 VPD_ENTRY(ec, EC_LEN); /* EC level */
363 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
364 VPD_ENTRY(na, 12); /* MAC address base */
365 VPD_ENTRY(port_type, 8); /* port types */
366 VPD_ENTRY(gpio, 14); /* GPIO usage */
367 VPD_ENTRY(cclk, 6); /* core clock */
368 VPD_ENTRY(port_addr, 8); /* port MDIO addresses */
369 VPD_ENTRY(rv, 1); /* csum */
370 u32 pad; /* for multiple-of-4 sizing and alignment */
371}; 360};
372 361
373#define EEPROM_STAT_ADDR 0x7bfc 362#define EEPROM_STAT_ADDR 0x7bfc
374#define VPD_BASE 0 363#define VPD_BASE 0
364#define VPD_LEN 512
375 365
376/** 366/**
377 * t4_seeprom_wp - enable/disable EEPROM write protection 367 * t4_seeprom_wp - enable/disable EEPROM write protection
@@ -396,16 +386,36 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
396 */ 386 */
397static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 387static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
398{ 388{
399 int ret; 389 int i, ret;
400 struct t4_vpd vpd; 390 int ec, sn, v2;
401 u8 *q = (u8 *)&vpd, csum; 391 u8 vpd[VPD_LEN], csum;
392 unsigned int vpdr_len;
393 const struct t4_vpd_hdr *v;
402 394
403 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd); 395 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
404 if (ret < 0) 396 if (ret < 0)
405 return ret; 397 return ret;
406 398
407 for (csum = 0; q <= vpd.rv_data; q++) 399 v = (const struct t4_vpd_hdr *)vpd;
408 csum += *q; 400 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
401 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
402 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
403 return -EINVAL;
404 }
405
406#define FIND_VPD_KW(var, name) do { \
407 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
408 vpdr_len, name); \
409 if (var < 0) { \
410 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
411 return -EINVAL; \
412 } \
413 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
414} while (0)
415
416 FIND_VPD_KW(i, "RV");
417 for (csum = 0; i >= 0; i--)
418 csum += vpd[i];
409 419
410 if (csum) { 420 if (csum) {
411 dev_err(adapter->pdev_dev, 421 dev_err(adapter->pdev_dev,
@@ -413,12 +423,18 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
413 return -EINVAL; 423 return -EINVAL;
414 } 424 }
415 425
416 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10); 426 FIND_VPD_KW(ec, "EC");
417 memcpy(p->id, vpd.id_data, sizeof(vpd.id_data)); 427 FIND_VPD_KW(sn, "SN");
428 FIND_VPD_KW(v2, "V2");
429#undef FIND_VPD_KW
430
431 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
432 memcpy(p->id, v->id_data, ID_LEN);
418 strim(p->id); 433 strim(p->id);
419 memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data)); 434 memcpy(p->ec, vpd + ec, EC_LEN);
420 strim(p->ec); 435 strim(p->ec);
421 memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data)); 436 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
437 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
422 strim(p->sn); 438 strim(p->sn);
423 return 0; 439 return 0;
424} 440}
@@ -537,8 +553,8 @@ static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
537 * (i.e., big-endian), otherwise as 32-bit words in the platform's 553 * (i.e., big-endian), otherwise as 32-bit words in the platform's
538 * natural endianess. 554 * natural endianess.
539 */ 555 */
540int t4_read_flash(struct adapter *adapter, unsigned int addr, 556static int t4_read_flash(struct adapter *adapter, unsigned int addr,
541 unsigned int nwords, u32 *data, int byte_oriented) 557 unsigned int nwords, u32 *data, int byte_oriented)
542{ 558{
543 int ret; 559 int ret;
544 560
@@ -870,22 +886,6 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
870 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL); 886 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
871} 887}
872 888
873/**
874 * t4_set_vlan_accel - configure HW VLAN extraction
875 * @adap: the adapter
876 * @ports: bitmap of adapter ports to operate on
877 * @on: enable (1) or disable (0) HW VLAN extraction
878 *
879 * Enables or disables HW extraction of VLAN tags for the ports specified
880 * by @ports. @ports is a bitmap with the ith bit designating the port
881 * associated with the ith adapter channel.
882 */
883void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
884{
885 ports <<= VLANEXTENABLE_SHIFT;
886 t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
887}
888
889struct intr_info { 889struct intr_info {
890 unsigned int mask; /* bits to check in interrupt status */ 890 unsigned int mask; /* bits to check in interrupt status */
891 const char *msg; /* message to print or NULL */ 891 const char *msg; /* message to print or NULL */
@@ -2608,12 +2608,14 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change 2608 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change 2609 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change 2610 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
2611 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
2611 * @sleep_ok: if true we may sleep while awaiting command completion 2612 * @sleep_ok: if true we may sleep while awaiting command completion
2612 * 2613 *
2613 * Sets Rx properties of a virtual interface. 2614 * Sets Rx properties of a virtual interface.
2614 */ 2615 */
2615int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid, 2616int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2616 int mtu, int promisc, int all_multi, int bcast, bool sleep_ok) 2617 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2618 bool sleep_ok)
2617{ 2619{
2618 struct fw_vi_rxmode_cmd c; 2620 struct fw_vi_rxmode_cmd c;
2619 2621
@@ -2626,15 +2628,18 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
2626 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK; 2628 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2627 if (bcast < 0) 2629 if (bcast < 0)
2628 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK; 2630 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
2631 if (vlanex < 0)
2632 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
2629 2633
2630 memset(&c, 0, sizeof(c)); 2634 memset(&c, 0, sizeof(c));
2631 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST | 2635 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2632 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid)); 2636 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2633 c.retval_len16 = htonl(FW_LEN16(c)); 2637 c.retval_len16 = htonl(FW_LEN16(c));
2634 c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) | 2638 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2635 FW_VI_RXMODE_CMD_PROMISCEN(promisc) | 2639 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2636 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) | 2640 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2637 FW_VI_RXMODE_CMD_BROADCASTEN(bcast)); 2641 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2642 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
2638 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok); 2643 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2639} 2644}
2640 2645
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
index fdb117443144..7a981b81afaf 100644
--- a/drivers/net/cxgb4/t4_msg.h
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -503,6 +503,7 @@ struct cpl_rx_data_ack {
503}; 503};
504 504
505struct cpl_rx_pkt { 505struct cpl_rx_pkt {
506 struct rss_header rsshdr;
506 u8 opcode; 507 u8 opcode;
507#if defined(__LITTLE_ENDIAN_BITFIELD) 508#if defined(__LITTLE_ENDIAN_BITFIELD)
508 u8 iff:4; 509 u8 iff:4;
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 3393d05a388a..63991d68950e 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -876,7 +876,7 @@ struct fw_vi_mac_cmd {
876struct fw_vi_rxmode_cmd { 876struct fw_vi_rxmode_cmd {
877 __be32 op_to_viid; 877 __be32 op_to_viid;
878 __be32 retval_len16; 878 __be32 retval_len16;
879 __be32 mtu_to_broadcasten; 879 __be32 mtu_to_vlanexen;
880 __be32 r4_lo; 880 __be32 r4_lo;
881}; 881};
882 882
@@ -888,6 +888,8 @@ struct fw_vi_rxmode_cmd {
888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12) 888#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3 889#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10) 890#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
891#define FW_VI_RXMODE_CMD_VLANEXEN_MASK 0x3
892#define FW_VI_RXMODE_CMD_VLANEXEN(x) ((x) << 8)
891 893
892struct fw_vi_enable_cmd { 894struct fw_vi_enable_cmd {
893 __be32 op_to_viid; 895 __be32 op_to_viid;
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2efbf6..08e82b1a0b33 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
952 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 952 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
953 } 953 }
954 if (!netdev_mc_empty(ndev)) { 954 if (!netdev_mc_empty(ndev)) {
955 struct dev_mc_list *mc_ptr; 955 struct netdev_hw_addr *ha;
956
956 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 957 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
957 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); 958 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
958 /* program multicast address list into EMAC hardware */ 959 /* program multicast address list into EMAC hardware */
959 netdev_for_each_mc_addr(mc_ptr, ndev) { 960 netdev_for_each_mc_addr(ha, ndev) {
960 emac_add_mcast(priv, EMAC_MULTICAST_ADD, 961 emac_add_mcast(priv, EMAC_MULTICAST_ADD,
961 (u8 *) mc_ptr->dmi_addr); 962 (u8 *) ha->addr);
962 } 963 }
963 } else { 964 } else {
964 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); 965 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -1467,7 +1468,6 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
1467 tx_buf.length = skb->len; 1468 tx_buf.length = skb->len;
1468 tx_buf.buf_token = (void *)skb; 1469 tx_buf.buf_token = (void *)skb;
1469 tx_buf.data_ptr = skb->data; 1470 tx_buf.data_ptr = skb->data;
1470 ndev->trans_start = jiffies;
1471 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH); 1471 ret_code = emac_send(priv, &tx_packet, EMAC_DEF_TX_CH);
1472 if (unlikely(ret_code != 0)) { 1472 if (unlikely(ret_code != 0)) {
1473 if (ret_code == EMAC_ERR_TX_OUT_OF_BD) { 1473 if (ret_code == EMAC_ERR_TX_OUT_OF_BD) {
diff --git a/drivers/net/de600.c b/drivers/net/de600.c
index 6b13f4fd2e96..23a65398d011 100644
--- a/drivers/net/de600.c
+++ b/drivers/net/de600.c
@@ -166,8 +166,8 @@ static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
166 int i; 166 int i;
167 167
168 if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */ 168 if (free_tx_pages <= 0) { /* Do timeouts, to avoid hangs. */
169 tickssofar = jiffies - dev->trans_start; 169 tickssofar = jiffies - dev_trans_start(dev);
170 if (tickssofar < 5) 170 if (tickssofar < HZ/20)
171 return NETDEV_TX_BUSY; 171 return NETDEV_TX_BUSY;
172 /* else */ 172 /* else */
173 printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem"); 173 printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index a0a6830b5e6d..f3650fd096f4 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -535,7 +535,6 @@ static int de620_start_xmit(struct sk_buff *skb, struct net_device *dev)
535 } 535 }
536 de620_write_block(dev, buffer, skb->len, len-skb->len); 536 de620_write_block(dev, buffer, skb->len, len-skb->len);
537 537
538 dev->trans_start = jiffies;
539 if(!(using_txbuf == (TXBF0 | TXBF1))) 538 if(!(using_txbuf == (TXBF0 | TXBF1)))
540 netif_wake_queue(dev); 539 netif_wake_queue(dev);
541 540
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6f20e2..1d973db27c32 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -874,7 +874,7 @@ static inline int lance_reset(struct net_device *dev)
874 874
875 lance_init_ring(dev); 875 lance_init_ring(dev);
876 load_csrs(lp); 876 load_csrs(lp);
877 dev->trans_start = jiffies; 877 dev->trans_start = jiffies; /* prevent tx timeout */
878 status = init_restart_lance(lp); 878 status = init_restart_lance(lp);
879 return status; 879 return status;
880} 880}
@@ -930,7 +930,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
930 930
931 spin_unlock_irqrestore(&lp->lock, flags); 931 spin_unlock_irqrestore(&lp->lock, flags);
932 932
933 dev->trans_start = jiffies;
934 dev_kfree_skb(skb); 933 dev_kfree_skb(skb);
935 934
936 return NETDEV_TX_OK; 935 return NETDEV_TX_OK;
@@ -940,7 +939,7 @@ static void lance_load_multicast(struct net_device *dev)
940{ 939{
941 struct lance_private *lp = netdev_priv(dev); 940 struct lance_private *lp = netdev_priv(dev);
942 volatile u16 *ib = (volatile u16 *)dev->mem_start; 941 volatile u16 *ib = (volatile u16 *)dev->mem_start;
943 struct dev_mc_list *dmi; 942 struct netdev_hw_addr *ha;
944 char *addrs; 943 char *addrs;
945 u32 crc; 944 u32 crc;
946 945
@@ -959,8 +958,8 @@ static void lance_load_multicast(struct net_device *dev)
959 *lib_ptr(ib, filter[3], lp->type) = 0; 958 *lib_ptr(ib, filter[3], lp->type) = 0;
960 959
961 /* Add addresses */ 960 /* Add addresses */
962 netdev_for_each_mc_addr(dmi, dev) { 961 netdev_for_each_mc_addr(ha, dev) {
963 addrs = dmi->dmi_addr; 962 addrs = ha->addr;
964 963
965 /* multicast address? */ 964 /* multicast address? */
966 if (!(*addrs & 1)) 965 if (!(*addrs & 1))
@@ -970,7 +969,6 @@ static void lance_load_multicast(struct net_device *dev)
970 crc = crc >> 26; 969 crc = crc >> 26;
971 *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf); 970 *lib_ptr(ib, filter[crc >> 4], lp->type) |= 1 << (crc & 0xf);
972 } 971 }
973 return;
974} 972}
975 973
976static void lance_set_multicast(struct net_device *dev) 974static void lance_set_multicast(struct net_device *dev)
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d45f4e..e5667c55844e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2195{ 2195{
2196 DFX_board_t *bp = netdev_priv(dev); 2196 DFX_board_t *bp = netdev_priv(dev);
2197 int i; /* used as index in for loop */ 2197 int i; /* used as index in for loop */
2198 struct dev_mc_list *dmi; /* ptr to multicast addr entry */ 2198 struct netdev_hw_addr *ha;
2199 2199
2200 /* Enable LLC frame promiscuous mode, if necessary */ 2200 /* Enable LLC frame promiscuous mode, if necessary */
2201 2201
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2241 /* Copy addresses to multicast address table, then update adapter CAM */ 2241 /* Copy addresses to multicast address table, then update adapter CAM */
2242 2242
2243 i = 0; 2243 i = 0;
2244 netdev_for_each_mc_addr(dmi, dev) 2244 netdev_for_each_mc_addr(ha, dev)
2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], 2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2246 dmi->dmi_addr, FDDI_K_ALEN); 2246 ha->addr, FDDI_K_ALEN);
2247 2247
2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2249 { 2249 {
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c1928dfca..bf66e9b3b19e 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -921,7 +921,7 @@ static void depca_tx_timeout(struct net_device *dev)
921 STOP_DEPCA; 921 STOP_DEPCA;
922 depca_init_ring(dev); 922 depca_init_ring(dev);
923 LoadCSRs(dev); 923 LoadCSRs(dev);
924 dev->trans_start = jiffies; 924 dev->trans_start = jiffies; /* prevent tx timeout */
925 netif_wake_queue(dev); 925 netif_wake_queue(dev);
926 InitRestartDepca(dev); 926 InitRestartDepca(dev);
927} 927}
@@ -954,7 +954,6 @@ static netdev_tx_t depca_start_xmit(struct sk_buff *skb,
954 outw(CSR0, DEPCA_ADDR); 954 outw(CSR0, DEPCA_ADDR);
955 outw(INEA | TDMD, DEPCA_DATA); 955 outw(INEA | TDMD, DEPCA_DATA);
956 956
957 dev->trans_start = jiffies;
958 dev_kfree_skb(skb); 957 dev_kfree_skb(skb);
959 } 958 }
960 if (TX_BUFFS_AVAIL) 959 if (TX_BUFFS_AVAIL)
@@ -1204,8 +1203,6 @@ static void LoadCSRs(struct net_device *dev)
1204 outw(ACON, DEPCA_DATA); 1203 outw(ACON, DEPCA_DATA);
1205 1204
1206 outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */ 1205 outw(CSR0, DEPCA_ADDR); /* Point back to CSR0 */
1207
1208 return;
1209} 1206}
1210 1207
1211static int InitRestartDepca(struct net_device *dev) 1208static int InitRestartDepca(struct net_device *dev)
@@ -1272,7 +1269,7 @@ static void set_multicast_list(struct net_device *dev)
1272static void SetMulticastFilter(struct net_device *dev) 1269static void SetMulticastFilter(struct net_device *dev)
1273{ 1270{
1274 struct depca_private *lp = netdev_priv(dev); 1271 struct depca_private *lp = netdev_priv(dev);
1275 struct dev_mc_list *dmi; 1272 struct netdev_hw_addr *ha;
1276 char *addrs; 1273 char *addrs;
1277 int i, j, bit, byte; 1274 int i, j, bit, byte;
1278 u16 hashcode; 1275 u16 hashcode;
@@ -1287,8 +1284,8 @@ static void SetMulticastFilter(struct net_device *dev)
1287 lp->init_block.mcast_table[i] = 0; 1284 lp->init_block.mcast_table[i] = 0;
1288 } 1285 }
1289 /* Add multicast addresses */ 1286 /* Add multicast addresses */
1290 netdev_for_each_mc_addr(dmi, dev) { 1287 netdev_for_each_mc_addr(ha, dev) {
1291 addrs = dmi->dmi_addr; 1288 addrs = ha->addr;
1292 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1289 if ((*addrs & 0x01) == 1) { /* multicast address? */
1293 crc = ether_crc(ETH_ALEN, addrs); 1290 crc = ether_crc(ETH_ALEN, addrs);
1294 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ 1291 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
@@ -1303,8 +1300,6 @@ static void SetMulticastFilter(struct net_device *dev)
1303 } 1300 }
1304 } 1301 }
1305 } 1302 }
1306
1307 return;
1308} 1303}
1309 1304
1310static int __init depca_common_init (u_long ioaddr, struct net_device **devp) 1305static int __init depca_common_init (u_long ioaddr, struct net_device **devp)
@@ -1909,8 +1904,6 @@ static void depca_dbg_open(struct net_device *dev)
1909 outw(CSR3, DEPCA_ADDR); 1904 outw(CSR3, DEPCA_ADDR);
1910 printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA)); 1905 printk("CSR3: 0x%4.4x\n", inw(DEPCA_DATA));
1911 } 1906 }
1912
1913 return;
1914} 1907}
1915 1908
1916/* 1909/*
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad829827..a2f238d20caa 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -596,8 +596,6 @@ alloc_list (struct net_device *dev)
596 /* Set RFDListPtr */ 596 /* Set RFDListPtr */
597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0); 597 writel (np->rx_ring_dma, dev->base_addr + RFDListPtr0);
598 writel (0, dev->base_addr + RFDListPtr1); 598 writel (0, dev->base_addr + RFDListPtr1);
599
600 return;
601} 599}
602 600
603static netdev_tx_t 601static netdev_tx_t
@@ -1132,14 +1130,14 @@ set_multicast (struct net_device *dev)
1132 /* Receive broadcast and multicast frames */ 1130 /* Receive broadcast and multicast frames */
1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1131 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1134 } else if (!netdev_mc_empty(dev)) { 1132 } else if (!netdev_mc_empty(dev)) {
1135 struct dev_mc_list *mclist; 1133 struct netdev_hw_addr *ha;
1136 /* Receive broadcast frames and multicast frames filtering 1134 /* Receive broadcast frames and multicast frames filtering
1137 by Hashtable */ 1135 by Hashtable */
1138 rx_mode = 1136 rx_mode =
1139 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1137 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1140 netdev_for_each_mc_addr(mclist, dev) { 1138 netdev_for_each_mc_addr(ha, dev) {
1141 int bit, index = 0; 1139 int bit, index = 0;
1142 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1140 int crc = ether_crc_le(ETH_ALEN, ha->addr);
1143 /* The inverted high significant 6 bits of CRC are 1141 /* The inverted high significant 6 bits of CRC are
1144 used as an index to hashtable */ 1142 used as an index to hashtable */
1145 for (bit = 0; bit < 6; bit++) 1143 for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f718e3..abcc838e18af 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -476,17 +476,13 @@ static uint32_t dm9000_get_rx_csum(struct net_device *dev)
476 return dm->rx_csum; 476 return dm->rx_csum;
477} 477}
478 478
479static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data) 479static int dm9000_set_rx_csum_unlocked(struct net_device *dev, uint32_t data)
480{ 480{
481 board_info_t *dm = to_dm9000_board(dev); 481 board_info_t *dm = to_dm9000_board(dev);
482 unsigned long flags;
483 482
484 if (dm->can_csum) { 483 if (dm->can_csum) {
485 dm->rx_csum = data; 484 dm->rx_csum = data;
486
487 spin_lock_irqsave(&dm->lock, flags);
488 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0); 485 iow(dm, DM9000_RCSR, dm->rx_csum ? RCSR_CSUM : 0);
489 spin_unlock_irqrestore(&dm->lock, flags);
490 486
491 return 0; 487 return 0;
492 } 488 }
@@ -494,6 +490,19 @@ static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
494 return -EOPNOTSUPP; 490 return -EOPNOTSUPP;
495} 491}
496 492
493static int dm9000_set_rx_csum(struct net_device *dev, uint32_t data)
494{
495 board_info_t *dm = to_dm9000_board(dev);
496 unsigned long flags;
497 int ret;
498
499 spin_lock_irqsave(&dm->lock, flags);
500 ret = dm9000_set_rx_csum_unlocked(dev, data);
501 spin_unlock_irqrestore(&dm->lock, flags);
502
503 return ret;
504}
505
497static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data) 506static int dm9000_set_tx_csum(struct net_device *dev, uint32_t data)
498{ 507{
499 board_info_t *dm = to_dm9000_board(dev); 508 board_info_t *dm = to_dm9000_board(dev);
@@ -722,20 +731,17 @@ static unsigned char dm9000_type_to_char(enum dm9000_type type)
722 * Set DM9000 multicast address 731 * Set DM9000 multicast address
723 */ 732 */
724static void 733static void
725dm9000_hash_table(struct net_device *dev) 734dm9000_hash_table_unlocked(struct net_device *dev)
726{ 735{
727 board_info_t *db = netdev_priv(dev); 736 board_info_t *db = netdev_priv(dev);
728 struct dev_mc_list *mcptr; 737 struct netdev_hw_addr *ha;
729 int i, oft; 738 int i, oft;
730 u32 hash_val; 739 u32 hash_val;
731 u16 hash_table[4]; 740 u16 hash_table[4];
732 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN; 741 u8 rcr = RCR_DIS_LONG | RCR_DIS_CRC | RCR_RXEN;
733 unsigned long flags;
734 742
735 dm9000_dbg(db, 1, "entering %s\n", __func__); 743 dm9000_dbg(db, 1, "entering %s\n", __func__);
736 744
737 spin_lock_irqsave(&db->lock, flags);
738
739 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++) 745 for (i = 0, oft = DM9000_PAR; i < 6; i++, oft++)
740 iow(db, oft, dev->dev_addr[i]); 746 iow(db, oft, dev->dev_addr[i]);
741 747
@@ -753,8 +759,8 @@ dm9000_hash_table(struct net_device *dev)
753 rcr |= RCR_ALL; 759 rcr |= RCR_ALL;
754 760
755 /* the multicast address in Hash Table : 64 bits */ 761 /* the multicast address in Hash Table : 64 bits */
756 netdev_for_each_mc_addr(mcptr, dev) { 762 netdev_for_each_mc_addr(ha, dev) {
757 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; 763 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
758 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 764 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
759 } 765 }
760 766
@@ -765,11 +771,21 @@ dm9000_hash_table(struct net_device *dev)
765 } 771 }
766 772
767 iow(db, DM9000_RCR, rcr); 773 iow(db, DM9000_RCR, rcr);
774}
775
776static void
777dm9000_hash_table(struct net_device *dev)
778{
779 board_info_t *db = netdev_priv(dev);
780 unsigned long flags;
781
782 spin_lock_irqsave(&db->lock, flags);
783 dm9000_hash_table_unlocked(dev);
768 spin_unlock_irqrestore(&db->lock, flags); 784 spin_unlock_irqrestore(&db->lock, flags);
769} 785}
770 786
771/* 787/*
772 * Initilize dm9000 board 788 * Initialize dm9000 board
773 */ 789 */
774static void 790static void
775dm9000_init_dm9000(struct net_device *dev) 791dm9000_init_dm9000(struct net_device *dev)
@@ -784,7 +800,7 @@ dm9000_init_dm9000(struct net_device *dev)
784 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */ 800 db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
785 801
786 /* Checksum mode */ 802 /* Checksum mode */
787 dm9000_set_rx_csum(dev, db->rx_csum); 803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
788 804
789 /* GPIO0 on pre-activate PHY */ 805 /* GPIO0 on pre-activate PHY */
790 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 806 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
@@ -811,7 +827,7 @@ dm9000_init_dm9000(struct net_device *dev)
811 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */ 827 iow(db, DM9000_ISR, ISR_CLR_STATUS); /* Clear interrupt status */
812 828
813 /* Set address filter table */ 829 /* Set address filter table */
814 dm9000_hash_table(dev); 830 dm9000_hash_table_unlocked(dev);
815 831
816 imr = IMR_PAR | IMR_PTM | IMR_PRM; 832 imr = IMR_PAR | IMR_PTM | IMR_PRM;
817 if (db->type != TYPE_DM9000E) 833 if (db->type != TYPE_DM9000E)
@@ -825,7 +841,7 @@ dm9000_init_dm9000(struct net_device *dev)
825 /* Init Driver variable */ 841 /* Init Driver variable */
826 db->tx_pkt_cnt = 0; 842 db->tx_pkt_cnt = 0;
827 db->queue_pkt_len = 0; 843 db->queue_pkt_len = 0;
828 dev->trans_start = 0; 844 dev->trans_start = jiffies;
829} 845}
830 846
831/* Our watchdog timed out. Called by the networking layer */ 847/* Our watchdog timed out. Called by the networking layer */
@@ -843,7 +859,7 @@ static void dm9000_timeout(struct net_device *dev)
843 dm9000_reset(db); 859 dm9000_reset(db);
844 dm9000_init_dm9000(dev); 860 dm9000_init_dm9000(dev);
845 /* We can accept TX packets again */ 861 /* We can accept TX packets again */
846 dev->trans_start = jiffies; 862 dev->trans_start = jiffies; /* prevent tx timeout */
847 netif_wake_queue(dev); 863 netif_wake_queue(dev);
848 864
849 /* Restore previous register address */ 865 /* Restore previous register address */
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 234685213f1a..8b0f50bbf3e5 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -594,8 +594,6 @@ static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
594 594
595 spin_unlock_irqrestore(&bp->lock, flags); 595 spin_unlock_irqrestore(&bp->lock, flags);
596 596
597 dev->trans_start = jiffies;
598
599 return NETDEV_TX_OK; 597 return NETDEV_TX_OK;
600} 598}
601 599
@@ -918,7 +916,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
918 916
919 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 917 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
920 bp->regs, mem_base, dev->irq, dev->dev_addr); 918 bp->regs, mem_base, dev->irq, dev->dev_addr);
921 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n", 919 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
922 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", 920 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
923 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 921 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
924 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 922 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b997e578e58f..b194bad29ace 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs 147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
148 */ 148 */
149 149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
150#include <linux/module.h> 152#include <linux/module.h>
151#include <linux/moduleparam.h> 153#include <linux/moduleparam.h>
152#include <linux/kernel.h> 154#include <linux/kernel.h>
@@ -166,6 +168,7 @@
166#include <linux/ethtool.h> 168#include <linux/ethtool.h>
167#include <linux/string.h> 169#include <linux/string.h>
168#include <linux/firmware.h> 170#include <linux/firmware.h>
171#include <linux/rtnetlink.h>
169#include <asm/unaligned.h> 172#include <asm/unaligned.h>
170 173
171 174
@@ -174,7 +177,6 @@
174#define DRV_VERSION "3.5.24-k2"DRV_EXT 177#define DRV_VERSION "3.5.24-k2"DRV_EXT
175#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 178#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
176#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 179#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
177#define PFX DRV_NAME ": "
178 180
179#define E100_WATCHDOG_PERIOD (2 * HZ) 181#define E100_WATCHDOG_PERIOD (2 * HZ)
180#define E100_NAPI_WEIGHT 16 182#define E100_NAPI_WEIGHT 16
@@ -200,10 +202,6 @@ module_param(use_io, int, 0);
200MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 202MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
201MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); 203MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
202MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); 204MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
203#define DPRINTK(nlevel, klevel, fmt, args...) \
204 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
205 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
206 __func__ , ## args))
207 205
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 206#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 207 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -689,12 +687,13 @@ static int e100_self_test(struct nic *nic)
689 687
690 /* Check results of self-test */ 688 /* Check results of self-test */
691 if (nic->mem->selftest.result != 0) { 689 if (nic->mem->selftest.result != 0) {
692 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n", 690 netif_err(nic, hw, nic->netdev,
693 nic->mem->selftest.result); 691 "Self-test failed: result=0x%08X\n",
692 nic->mem->selftest.result);
694 return -ETIMEDOUT; 693 return -ETIMEDOUT;
695 } 694 }
696 if (nic->mem->selftest.signature == 0) { 695 if (nic->mem->selftest.signature == 0) {
697 DPRINTK(HW, ERR, "Self-test failed: timed out\n"); 696 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
698 return -ETIMEDOUT; 697 return -ETIMEDOUT;
699 } 698 }
700 699
@@ -797,7 +796,7 @@ static int e100_eeprom_load(struct nic *nic)
797 /* The checksum, stored in the last word, is calculated such that 796 /* The checksum, stored in the last word, is calculated such that
798 * the sum of words should be 0xBABA */ 797 * the sum of words should be 0xBABA */
799 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { 798 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
800 DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); 799 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
801 if (!eeprom_bad_csum_allow) 800 if (!eeprom_bad_csum_allow)
802 return -EAGAIN; 801 return -EAGAIN;
803 } 802 }
@@ -953,8 +952,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
953 udelay(20); 952 udelay(20);
954 } 953 }
955 if (unlikely(!i)) { 954 if (unlikely(!i)) {
956 printk("e100.mdio_ctrl(%s) won't go Ready\n", 955 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
957 nic->netdev->name );
958 spin_unlock_irqrestore(&nic->mdio_lock, flags); 956 spin_unlock_irqrestore(&nic->mdio_lock, flags);
959 return 0; /* No way to indicate timeout error */ 957 return 0; /* No way to indicate timeout error */
960 } 958 }
@@ -966,9 +964,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
966 break; 964 break;
967 } 965 }
968 spin_unlock_irqrestore(&nic->mdio_lock, flags); 966 spin_unlock_irqrestore(&nic->mdio_lock, flags);
969 DPRINTK(HW, DEBUG, 967 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
970 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", 968 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
971 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); 969 dir == mdi_read ? "READ" : "WRITE",
970 addr, reg, data, data_out);
972 return (u16)data_out; 971 return (u16)data_out;
973} 972}
974 973
@@ -1028,17 +1027,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1028 return ADVERTISE_10HALF | 1027 return ADVERTISE_10HALF |
1029 ADVERTISE_10FULL; 1028 ADVERTISE_10FULL;
1030 default: 1029 default:
1031 DPRINTK(HW, DEBUG, 1030 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1032 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", 1031 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1033 dir == mdi_read ? "READ" : "WRITE", addr, reg, data); 1032 dir == mdi_read ? "READ" : "WRITE",
1033 addr, reg, data);
1034 return 0xFFFF; 1034 return 0xFFFF;
1035 } 1035 }
1036 } else { 1036 } else {
1037 switch (reg) { 1037 switch (reg) {
1038 default: 1038 default:
1039 DPRINTK(HW, DEBUG, 1039 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", 1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE", addr, reg, data); 1041 dir == mdi_read ? "READ" : "WRITE",
1042 addr, reg, data);
1042 return 0xFFFF; 1043 return 0xFFFF;
1043 } 1044 }
1044 } 1045 }
@@ -1155,12 +1156,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1155 } 1156 }
1156 } 1157 }
1157 1158
1158 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1159 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1159 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); 1160 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1161 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1161 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); 1162 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1162 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1163 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1163 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1164 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1165 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1166 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1167 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1164} 1168}
1165 1169
1166/************************************************************************* 1170/*************************************************************************
@@ -1253,16 +1257,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1253 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1257 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1254 1258
1255 if (err) { 1259 if (err) {
1256 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", 1260 netif_err(nic, probe, nic->netdev,
1257 fw_name, err); 1261 "Failed to load firmware \"%s\": %d\n",
1262 fw_name, err);
1258 return ERR_PTR(err); 1263 return ERR_PTR(err);
1259 } 1264 }
1260 1265
1261 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1266 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1262 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ 1267 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1263 if (fw->size != UCODE_SIZE * 4 + 3) { 1268 if (fw->size != UCODE_SIZE * 4 + 3) {
1264 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n", 1269 netif_err(nic, probe, nic->netdev,
1265 fw_name, fw->size); 1270 "Firmware \"%s\" has wrong size %zu\n",
1271 fw_name, fw->size);
1266 release_firmware(fw); 1272 release_firmware(fw);
1267 return ERR_PTR(-EINVAL); 1273 return ERR_PTR(-EINVAL);
1268 } 1274 }
@@ -1274,9 +1280,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1274 1280
1275 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || 1281 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1276 min_size >= UCODE_SIZE) { 1282 min_size >= UCODE_SIZE) {
1277 DPRINTK(PROBE, ERR, 1283 netif_err(nic, probe, nic->netdev,
1278 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", 1284 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1279 fw_name, timer, bundle, min_size); 1285 fw_name, timer, bundle, min_size);
1280 release_firmware(fw); 1286 release_firmware(fw);
1281 return ERR_PTR(-EINVAL); 1287 return ERR_PTR(-EINVAL);
1282 } 1288 }
@@ -1328,7 +1334,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1328 return PTR_ERR(fw); 1334 return PTR_ERR(fw);
1329 1335
1330 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) 1336 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1331 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); 1337 netif_err(nic, probe, nic->netdev,
1338 "ucode cmd failed with error %d\n", err);
1332 1339
1333 /* must restart cuc */ 1340 /* must restart cuc */
1334 nic->cuc_cmd = cuc_start; 1341 nic->cuc_cmd = cuc_start;
@@ -1348,7 +1355,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1348 1355
1349 /* if the command failed, or is not OK, notify and return */ 1356 /* if the command failed, or is not OK, notify and return */
1350 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { 1357 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1351 DPRINTK(PROBE,ERR, "ucode load failed\n"); 1358 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1352 err = -EPERM; 1359 err = -EPERM;
1353 } 1360 }
1354 1361
@@ -1386,8 +1393,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
1386 * media is sensed automatically based on how the link partner 1393 * media is sensed automatically based on how the link partner
1387 * is configured. This is, in essence, manual configuration. 1394 * is configured. This is, in essence, manual configuration.
1388 */ 1395 */
1389 DPRINTK(PROBE, INFO, 1396 netif_info(nic, probe, nic->netdev,
1390 "found MII-less i82503 or 80c24 or other PHY\n"); 1397 "found MII-less i82503 or 80c24 or other PHY\n");
1391 1398
1392 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; 1399 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1393 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ 1400 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1434,18 +1441,20 @@ static int e100_phy_init(struct nic *nic)
1434 return 0; /* simply return and hope for the best */ 1441 return 0; /* simply return and hope for the best */
1435 else { 1442 else {
1436 /* for unknown cases log a fatal error */ 1443 /* for unknown cases log a fatal error */
1437 DPRINTK(HW, ERR, 1444 netif_err(nic, hw, nic->netdev,
1438 "Failed to locate any known PHY, aborting.\n"); 1445 "Failed to locate any known PHY, aborting\n");
1439 return -EAGAIN; 1446 return -EAGAIN;
1440 } 1447 }
1441 } else 1448 } else
1442 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1449 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1450 "phy_addr = %d\n", nic->mii.phy_id);
1443 1451
1444 /* Get phy ID */ 1452 /* Get phy ID */
1445 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); 1453 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1446 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); 1454 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1447 nic->phy = (u32)id_hi << 16 | (u32)id_lo; 1455 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1448 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); 1456 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1457 "phy ID = 0x%08X\n", nic->phy);
1449 1458
1450 /* Select the phy and isolate the rest */ 1459 /* Select the phy and isolate the rest */
1451 for (addr = 0; addr < 32; addr++) { 1460 for (addr = 0; addr < 32; addr++) {
@@ -1507,7 +1516,7 @@ static int e100_hw_init(struct nic *nic)
1507 1516
1508 e100_hw_reset(nic); 1517 e100_hw_reset(nic);
1509 1518
1510 DPRINTK(HW, ERR, "e100_hw_init\n"); 1519 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1511 if (!in_interrupt() && (err = e100_self_test(nic))) 1520 if (!in_interrupt() && (err = e100_self_test(nic)))
1512 return err; 1521 return err;
1513 1522
@@ -1537,16 +1546,16 @@ static int e100_hw_init(struct nic *nic)
1537static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1546static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1538{ 1547{
1539 struct net_device *netdev = nic->netdev; 1548 struct net_device *netdev = nic->netdev;
1540 struct dev_mc_list *list; 1549 struct netdev_hw_addr *ha;
1541 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); 1550 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1542 1551
1543 cb->command = cpu_to_le16(cb_multi); 1552 cb->command = cpu_to_le16(cb_multi);
1544 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); 1553 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1545 i = 0; 1554 i = 0;
1546 netdev_for_each_mc_addr(list, netdev) { 1555 netdev_for_each_mc_addr(ha, netdev) {
1547 if (i == count) 1556 if (i == count)
1548 break; 1557 break;
1549 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr, 1558 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1550 ETH_ALEN); 1559 ETH_ALEN);
1551 } 1560 }
1552} 1561}
@@ -1555,8 +1564,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
1555{ 1564{
1556 struct nic *nic = netdev_priv(netdev); 1565 struct nic *nic = netdev_priv(netdev);
1557 1566
1558 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", 1567 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1559 netdev_mc_count(netdev), netdev->flags); 1568 "mc_count=%d, flags=0x%04X\n",
1569 netdev_mc_count(netdev), netdev->flags);
1560 1570
1561 if (netdev->flags & IFF_PROMISC) 1571 if (netdev->flags & IFF_PROMISC)
1562 nic->flags |= promiscuous; 1572 nic->flags |= promiscuous;
@@ -1629,7 +1639,8 @@ static void e100_update_stats(struct nic *nic)
1629 1639
1630 1640
1631 if (e100_exec_cmd(nic, cuc_dump_reset, 0)) 1641 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1632 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1642 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1643 "exec cuc_dump_reset failed\n");
1633} 1644}
1634 1645
1635static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) 1646static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1659,20 +1670,19 @@ static void e100_watchdog(unsigned long data)
1659 struct nic *nic = (struct nic *)data; 1670 struct nic *nic = (struct nic *)data;
1660 struct ethtool_cmd cmd; 1671 struct ethtool_cmd cmd;
1661 1672
1662 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies); 1673 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1674 "right now = %ld\n", jiffies);
1663 1675
1664 /* mii library handles link maintenance tasks */ 1676 /* mii library handles link maintenance tasks */
1665 1677
1666 mii_ethtool_gset(&nic->mii, &cmd); 1678 mii_ethtool_gset(&nic->mii, &cmd);
1667 1679
1668 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { 1680 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1669 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n", 1681 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1670 nic->netdev->name, 1682 cmd.speed == SPEED_100 ? 100 : 10,
1671 cmd.speed == SPEED_100 ? "100" : "10", 1683 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1672 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1673 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { 1684 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1674 printk(KERN_INFO "e100: %s NIC Link is Down\n", 1685 netdev_info(nic->netdev, "NIC Link is Down\n");
1675 nic->netdev->name);
1676 } 1686 }
1677 1687
1678 mii_check_link(&nic->mii); 1688 mii_check_link(&nic->mii);
@@ -1732,7 +1742,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1732 Issue a NOP command followed by a 1us delay before 1742 Issue a NOP command followed by a 1us delay before
1733 issuing the Tx command. */ 1743 issuing the Tx command. */
1734 if (e100_exec_cmd(nic, cuc_nop, 0)) 1744 if (e100_exec_cmd(nic, cuc_nop, 0))
1735 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); 1745 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1746 "exec cuc_nop failed\n");
1736 udelay(1); 1747 udelay(1);
1737 } 1748 }
1738 1749
@@ -1741,17 +1752,18 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1741 switch (err) { 1752 switch (err) {
1742 case -ENOSPC: 1753 case -ENOSPC:
1743 /* We queued the skb, but now we're out of space. */ 1754 /* We queued the skb, but now we're out of space. */
1744 DPRINTK(TX_ERR, DEBUG, "No space for CB\n"); 1755 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1756 "No space for CB\n");
1745 netif_stop_queue(netdev); 1757 netif_stop_queue(netdev);
1746 break; 1758 break;
1747 case -ENOMEM: 1759 case -ENOMEM:
1748 /* This is a hard error - log it. */ 1760 /* This is a hard error - log it. */
1749 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n"); 1761 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1762 "Out of Tx resources, returning skb\n");
1750 netif_stop_queue(netdev); 1763 netif_stop_queue(netdev);
1751 return NETDEV_TX_BUSY; 1764 return NETDEV_TX_BUSY;
1752 } 1765 }
1753 1766
1754 netdev->trans_start = jiffies;
1755 return NETDEV_TX_OK; 1767 return NETDEV_TX_OK;
1756} 1768}
1757 1769
@@ -1767,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
1767 for (cb = nic->cb_to_clean; 1779 for (cb = nic->cb_to_clean;
1768 cb->status & cpu_to_le16(cb_complete); 1780 cb->status & cpu_to_le16(cb_complete);
1769 cb = nic->cb_to_clean = cb->next) { 1781 cb = nic->cb_to_clean = cb->next) {
1770 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n", 1782 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1771 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), 1783 "cb[%d]->status = 0x%04X\n",
1772 cb->status); 1784 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1785 cb->status);
1773 1786
1774 if (likely(cb->skb != NULL)) { 1787 if (likely(cb->skb != NULL)) {
1775 dev->stats.tx_packets++; 1788 dev->stats.tx_packets++;
@@ -1912,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1912 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); 1925 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1913 rfd_status = le16_to_cpu(rfd->status); 1926 rfd_status = le16_to_cpu(rfd->status);
1914 1927
1915 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); 1928 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1929 "status=0x%04X\n", rfd_status);
1916 1930
1917 /* If data isn't ready, nothing to indicate */ 1931 /* If data isn't ready, nothing to indicate */
1918 if (unlikely(!(rfd_status & cb_complete))) { 1932 if (unlikely(!(rfd_status & cb_complete))) {
@@ -2123,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
2123 struct nic *nic = netdev_priv(netdev); 2137 struct nic *nic = netdev_priv(netdev);
2124 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); 2138 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2125 2139
2126 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); 2140 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2141 "stat_ack = 0x%02X\n", stat_ack);
2127 2142
2128 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ 2143 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2129 stat_ack == stat_ack_not_present) /* Hardware is ejected */ 2144 stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2263,10 +2278,15 @@ static void e100_tx_timeout_task(struct work_struct *work)
2263 struct nic *nic = container_of(work, struct nic, tx_timeout_task); 2278 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2264 struct net_device *netdev = nic->netdev; 2279 struct net_device *netdev = nic->netdev;
2265 2280
2266 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2281 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2267 ioread8(&nic->csr->scb.status)); 2282 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2268 e100_down(netdev_priv(netdev)); 2283
2269 e100_up(netdev_priv(netdev)); 2284 rtnl_lock();
2285 if (netif_running(netdev)) {
2286 e100_down(netdev_priv(netdev));
2287 e100_up(netdev_priv(netdev));
2288 }
2289 rtnl_unlock();
2270} 2290}
2271 2291
2272static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode) 2292static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
@@ -2526,8 +2546,8 @@ static int e100_set_ringparam(struct net_device *netdev,
2526 rfds->count = min(rfds->count, rfds->max); 2546 rfds->count = min(rfds->count, rfds->max);
2527 cbs->count = max(ring->tx_pending, cbs->min); 2547 cbs->count = max(ring->tx_pending, cbs->min);
2528 cbs->count = min(cbs->count, cbs->max); 2548 cbs->count = min(cbs->count, cbs->max);
2529 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", 2549 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2530 rfds->count, cbs->count); 2550 rfds->count, cbs->count);
2531 if (netif_running(netdev)) 2551 if (netif_running(netdev))
2532 e100_up(nic); 2552 e100_up(nic);
2533 2553
@@ -2704,7 +2724,7 @@ static int e100_open(struct net_device *netdev)
2704 2724
2705 netif_carrier_off(netdev); 2725 netif_carrier_off(netdev);
2706 if ((err = e100_up(nic))) 2726 if ((err = e100_up(nic)))
2707 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n"); 2727 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2708 return err; 2728 return err;
2709} 2729}
2710 2730
@@ -2738,7 +2758,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2738 2758
2739 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { 2759 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2740 if (((1 << debug) - 1) & NETIF_MSG_PROBE) 2760 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
2741 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n"); 2761 pr_err("Etherdev alloc failed, aborting\n");
2742 return -ENOMEM; 2762 return -ENOMEM;
2743 } 2763 }
2744 2764
@@ -2756,35 +2776,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2756 pci_set_drvdata(pdev, netdev); 2776 pci_set_drvdata(pdev, netdev);
2757 2777
2758 if ((err = pci_enable_device(pdev))) { 2778 if ((err = pci_enable_device(pdev))) {
2759 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n"); 2779 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2760 goto err_out_free_dev; 2780 goto err_out_free_dev;
2761 } 2781 }
2762 2782
2763 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2783 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2764 DPRINTK(PROBE, ERR, "Cannot find proper PCI device " 2784 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2765 "base address, aborting.\n");
2766 err = -ENODEV; 2785 err = -ENODEV;
2767 goto err_out_disable_pdev; 2786 goto err_out_disable_pdev;
2768 } 2787 }
2769 2788
2770 if ((err = pci_request_regions(pdev, DRV_NAME))) { 2789 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2771 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n"); 2790 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2772 goto err_out_disable_pdev; 2791 goto err_out_disable_pdev;
2773 } 2792 }
2774 2793
2775 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 2794 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2776 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n"); 2795 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2777 goto err_out_free_res; 2796 goto err_out_free_res;
2778 } 2797 }
2779 2798
2780 SET_NETDEV_DEV(netdev, &pdev->dev); 2799 SET_NETDEV_DEV(netdev, &pdev->dev);
2781 2800
2782 if (use_io) 2801 if (use_io)
2783 DPRINTK(PROBE, INFO, "using i/o access mode\n"); 2802 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2784 2803
2785 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); 2804 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2786 if (!nic->csr) { 2805 if (!nic->csr) {
2787 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); 2806 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2788 err = -ENOMEM; 2807 err = -ENOMEM;
2789 goto err_out_free_res; 2808 goto err_out_free_res;
2790 } 2809 }
@@ -2818,7 +2837,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2818 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); 2837 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2819 2838
2820 if ((err = e100_alloc(nic))) { 2839 if ((err = e100_alloc(nic))) {
2821 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2840 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2822 goto err_out_iounmap; 2841 goto err_out_iounmap;
2823 } 2842 }
2824 2843
@@ -2831,13 +2850,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2831 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); 2850 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2832 if (!is_valid_ether_addr(netdev->perm_addr)) { 2851 if (!is_valid_ether_addr(netdev->perm_addr)) {
2833 if (!eeprom_bad_csum_allow) { 2852 if (!eeprom_bad_csum_allow) {
2834 DPRINTK(PROBE, ERR, "Invalid MAC address from " 2853 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2835 "EEPROM, aborting.\n");
2836 err = -EAGAIN; 2854 err = -EAGAIN;
2837 goto err_out_free; 2855 goto err_out_free;
2838 } else { 2856 } else {
2839 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, " 2857 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2840 "you MUST configure one.\n");
2841 } 2858 }
2842 } 2859 }
2843 2860
@@ -2853,7 +2870,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2853 2870
2854 strcpy(netdev->name, "eth%d"); 2871 strcpy(netdev->name, "eth%d");
2855 if ((err = register_netdev(netdev))) { 2872 if ((err = register_netdev(netdev))) {
2856 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); 2873 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2857 goto err_out_free; 2874 goto err_out_free;
2858 } 2875 }
2859 nic->cbs_pool = pci_pool_create(netdev->name, 2876 nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2861,9 +2878,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2861 nic->params.cbs.max * sizeof(struct cb), 2878 nic->params.cbs.max * sizeof(struct cb),
2862 sizeof(u32), 2879 sizeof(u32),
2863 0); 2880 0);
2864 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", 2881 netif_info(nic, probe, nic->netdev,
2865 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), 2882 "addr 0x%llx, irq %d, MAC addr %pM\n",
2866 pdev->irq, netdev->dev_addr); 2883 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2884 pdev->irq, netdev->dev_addr);
2867 2885
2868 return 0; 2886 return 0;
2869 2887
@@ -3021,7 +3039,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3021 struct nic *nic = netdev_priv(netdev); 3039 struct nic *nic = netdev_priv(netdev);
3022 3040
3023 if (pci_enable_device(pdev)) { 3041 if (pci_enable_device(pdev)) {
3024 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n"); 3042 pr_err("Cannot re-enable PCI device after reset\n");
3025 return PCI_ERS_RESULT_DISCONNECT; 3043 return PCI_ERS_RESULT_DISCONNECT;
3026 } 3044 }
3027 pci_set_master(pdev); 3045 pci_set_master(pdev);
@@ -3080,8 +3098,8 @@ static struct pci_driver e100_driver = {
3080static int __init e100_init_module(void) 3098static int __init e100_init_module(void)
3081{ 3099{
3082 if (((1 << debug) - 1) & NETIF_MSG_DRV) { 3100 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3083 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3101 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3084 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); 3102 pr_info("%s\n", DRV_COPYRIGHT);
3085 } 3103 }
3086 return pci_register_driver(&e100_driver); 3104 return pci_register_driver(&e100_driver);
3087} 3105}
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 2f29c2131851..40b62b406b08 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -81,23 +81,6 @@ struct e1000_adapter;
81 81
82#include "e1000_hw.h" 82#include "e1000_hw.h"
83 83
84#ifdef DBG
85#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
86#else
87#define E1000_DBG(args...)
88#endif
89
90#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
91
92#define PFX "e1000: "
93
94#define DPRINTK(nlevel, klevel, fmt, args...) \
95do { \
96 if (NETIF_MSG_##nlevel & adapter->msg_enable) \
97 printk(KERN_##klevel PFX "%s: %s: " fmt, \
98 adapter->netdev->name, __func__, ##args); \
99} while (0)
100
101#define E1000_MAX_INTR 10 84#define E1000_MAX_INTR 10
102 85
103/* TX/RX descriptor defines */ 86/* TX/RX descriptor defines */
@@ -335,6 +318,25 @@ enum e1000_state_t {
335 __E1000_DOWN 318 __E1000_DOWN
336}; 319};
337 320
321#undef pr_fmt
322#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
323
324extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
325#define e_dbg(format, arg...) \
326 netdev_dbg(e1000_get_hw_dev(hw), format, ## arg)
327#define e_err(format, arg...) \
328 netdev_err(adapter->netdev, format, ## arg)
329#define e_info(format, arg...) \
330 netdev_info(adapter->netdev, format, ## arg)
331#define e_warn(format, arg...) \
332 netdev_warn(adapter->netdev, format, ## arg)
333#define e_notice(format, arg...) \
334 netdev_notice(adapter->netdev, format, ## arg)
335#define e_dev_info(format, arg...) \
336 dev_info(&adapter->pdev->dev, format, ## arg)
337#define e_dev_warn(format, arg...) \
338 dev_warn(&adapter->pdev->dev, format, ## arg)
339
338extern char e1000_driver_name[]; 340extern char e1000_driver_name[];
339extern const char e1000_driver_version[]; 341extern const char e1000_driver_version[];
340 342
@@ -352,5 +354,6 @@ extern bool e1000_has_link(struct e1000_adapter *adapter);
352extern void e1000_power_up_phy(struct e1000_adapter *); 354extern void e1000_power_up_phy(struct e1000_adapter *);
353extern void e1000_set_ethtool_ops(struct net_device *netdev); 355extern void e1000_set_ethtool_ops(struct net_device *netdev);
354extern void e1000_check_options(struct e1000_adapter *adapter); 356extern void e1000_check_options(struct e1000_adapter *adapter);
357extern char *e1000_get_hw_dev_name(struct e1000_hw *hw);
355 358
356#endif /* _E1000_H_ */ 359#endif /* _E1000_H_ */
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index c67e93117271..d5ff029aa7b2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -346,7 +346,7 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
346 346
347 netdev->features &= ~NETIF_F_TSO6; 347 netdev->features &= ~NETIF_F_TSO6;
348 348
349 DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled"); 349 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
350 adapter->tso_force = true; 350 adapter->tso_force = true;
351 return 0; 351 return 0;
352} 352}
@@ -714,9 +714,9 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
714 writel(write & test[i], address); 714 writel(write & test[i], address);
715 read = readl(address); 715 read = readl(address);
716 if (read != (write & test[i] & mask)) { 716 if (read != (write & test[i] & mask)) {
717 DPRINTK(DRV, ERR, "pattern test reg %04X failed: " 717 e_info("pattern test reg %04X failed: "
718 "got 0x%08X expected 0x%08X\n", 718 "got 0x%08X expected 0x%08X\n",
719 reg, read, (write & test[i] & mask)); 719 reg, read, (write & test[i] & mask));
720 *data = reg; 720 *data = reg;
721 return true; 721 return true;
722 } 722 }
@@ -734,9 +734,9 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
734 writel(write & mask, address); 734 writel(write & mask, address);
735 read = readl(address); 735 read = readl(address);
736 if ((read & mask) != (write & mask)) { 736 if ((read & mask) != (write & mask)) {
737 DPRINTK(DRV, ERR, "set/check reg %04X test failed: " 737 e_err("set/check reg %04X test failed: "
738 "got 0x%08X expected 0x%08X\n", 738 "got 0x%08X expected 0x%08X\n",
739 reg, (read & mask), (write & mask)); 739 reg, (read & mask), (write & mask));
740 *data = reg; 740 *data = reg;
741 return true; 741 return true;
742 } 742 }
@@ -779,8 +779,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
779 ew32(STATUS, toggle); 779 ew32(STATUS, toggle);
780 after = er32(STATUS) & toggle; 780 after = er32(STATUS) & toggle;
781 if (value != after) { 781 if (value != after) {
782 DPRINTK(DRV, ERR, "failed STATUS register test got: " 782 e_err("failed STATUS register test got: "
783 "0x%08X expected: 0x%08X\n", after, value); 783 "0x%08X expected: 0x%08X\n", after, value);
784 *data = 1; 784 *data = 1;
785 return 1; 785 return 1;
786 } 786 }
@@ -894,8 +894,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
894 *data = 1; 894 *data = 1;
895 return -1; 895 return -1;
896 } 896 }
897 DPRINTK(HW, INFO, "testing %s interrupt\n", 897 e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared"));
898 (shared_int ? "shared" : "unshared"));
899 898
900 /* Disable all the interrupts */ 899 /* Disable all the interrupts */
901 ew32(IMC, 0xFFFFFFFF); 900 ew32(IMC, 0xFFFFFFFF);
@@ -980,9 +979,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
980 if (txdr->desc && txdr->buffer_info) { 979 if (txdr->desc && txdr->buffer_info) {
981 for (i = 0; i < txdr->count; i++) { 980 for (i = 0; i < txdr->count; i++) {
982 if (txdr->buffer_info[i].dma) 981 if (txdr->buffer_info[i].dma)
983 pci_unmap_single(pdev, txdr->buffer_info[i].dma, 982 dma_unmap_single(&pdev->dev,
983 txdr->buffer_info[i].dma,
984 txdr->buffer_info[i].length, 984 txdr->buffer_info[i].length,
985 PCI_DMA_TODEVICE); 985 DMA_TO_DEVICE);
986 if (txdr->buffer_info[i].skb) 986 if (txdr->buffer_info[i].skb)
987 dev_kfree_skb(txdr->buffer_info[i].skb); 987 dev_kfree_skb(txdr->buffer_info[i].skb);
988 } 988 }
@@ -991,20 +991,23 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
991 if (rxdr->desc && rxdr->buffer_info) { 991 if (rxdr->desc && rxdr->buffer_info) {
992 for (i = 0; i < rxdr->count; i++) { 992 for (i = 0; i < rxdr->count; i++) {
993 if (rxdr->buffer_info[i].dma) 993 if (rxdr->buffer_info[i].dma)
994 pci_unmap_single(pdev, rxdr->buffer_info[i].dma, 994 dma_unmap_single(&pdev->dev,
995 rxdr->buffer_info[i].dma,
995 rxdr->buffer_info[i].length, 996 rxdr->buffer_info[i].length,
996 PCI_DMA_FROMDEVICE); 997 DMA_FROM_DEVICE);
997 if (rxdr->buffer_info[i].skb) 998 if (rxdr->buffer_info[i].skb)
998 dev_kfree_skb(rxdr->buffer_info[i].skb); 999 dev_kfree_skb(rxdr->buffer_info[i].skb);
999 } 1000 }
1000 } 1001 }
1001 1002
1002 if (txdr->desc) { 1003 if (txdr->desc) {
1003 pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); 1004 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1005 txdr->dma);
1004 txdr->desc = NULL; 1006 txdr->desc = NULL;
1005 } 1007 }
1006 if (rxdr->desc) { 1008 if (rxdr->desc) {
1007 pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); 1009 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1010 rxdr->dma);
1008 rxdr->desc = NULL; 1011 rxdr->desc = NULL;
1009 } 1012 }
1010 1013
@@ -1012,8 +1015,6 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1012 txdr->buffer_info = NULL; 1015 txdr->buffer_info = NULL;
1013 kfree(rxdr->buffer_info); 1016 kfree(rxdr->buffer_info);
1014 rxdr->buffer_info = NULL; 1017 rxdr->buffer_info = NULL;
1015
1016 return;
1017} 1018}
1018 1019
1019static int e1000_setup_desc_rings(struct e1000_adapter *adapter) 1020static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
@@ -1039,7 +1040,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1039 1040
1040 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1041 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1041 txdr->size = ALIGN(txdr->size, 4096); 1042 txdr->size = ALIGN(txdr->size, 4096);
1042 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1043 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1044 GFP_KERNEL);
1043 if (!txdr->desc) { 1045 if (!txdr->desc) {
1044 ret_val = 2; 1046 ret_val = 2;
1045 goto err_nomem; 1047 goto err_nomem;
@@ -1070,8 +1072,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1070 txdr->buffer_info[i].skb = skb; 1072 txdr->buffer_info[i].skb = skb;
1071 txdr->buffer_info[i].length = skb->len; 1073 txdr->buffer_info[i].length = skb->len;
1072 txdr->buffer_info[i].dma = 1074 txdr->buffer_info[i].dma =
1073 pci_map_single(pdev, skb->data, skb->len, 1075 dma_map_single(&pdev->dev, skb->data, skb->len,
1074 PCI_DMA_TODEVICE); 1076 DMA_TO_DEVICE);
1075 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); 1077 tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
1076 tx_desc->lower.data = cpu_to_le32(skb->len); 1078 tx_desc->lower.data = cpu_to_le32(skb->len);
1077 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1079 tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1093,7 +1095,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1093 } 1095 }
1094 1096
1095 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); 1097 rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1096 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1098 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1099 GFP_KERNEL);
1097 if (!rxdr->desc) { 1100 if (!rxdr->desc) {
1098 ret_val = 5; 1101 ret_val = 5;
1099 goto err_nomem; 1102 goto err_nomem;
@@ -1126,8 +1129,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1126 rxdr->buffer_info[i].skb = skb; 1129 rxdr->buffer_info[i].skb = skb;
1127 rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; 1130 rxdr->buffer_info[i].length = E1000_RXBUFFER_2048;
1128 rxdr->buffer_info[i].dma = 1131 rxdr->buffer_info[i].dma =
1129 pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048, 1132 dma_map_single(&pdev->dev, skb->data,
1130 PCI_DMA_FROMDEVICE); 1133 E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
1131 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); 1134 rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
1132 memset(skb->data, 0x00, skb->len); 1135 memset(skb->data, 0x00, skb->len);
1133 } 1136 }
@@ -1444,10 +1447,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1444 for (i = 0; i < 64; i++) { /* send the packets */ 1447 for (i = 0; i < 64; i++) { /* send the packets */
1445 e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1448 e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1446 1024); 1449 1024);
1447 pci_dma_sync_single_for_device(pdev, 1450 dma_sync_single_for_device(&pdev->dev,
1448 txdr->buffer_info[k].dma, 1451 txdr->buffer_info[k].dma,
1449 txdr->buffer_info[k].length, 1452 txdr->buffer_info[k].length,
1450 PCI_DMA_TODEVICE); 1453 DMA_TO_DEVICE);
1451 if (unlikely(++k == txdr->count)) k = 0; 1454 if (unlikely(++k == txdr->count)) k = 0;
1452 } 1455 }
1453 ew32(TDT, k); 1456 ew32(TDT, k);
@@ -1455,10 +1458,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1455 time = jiffies; /* set the start time for the receive */ 1458 time = jiffies; /* set the start time for the receive */
1456 good_cnt = 0; 1459 good_cnt = 0;
1457 do { /* receive the sent packets */ 1460 do { /* receive the sent packets */
1458 pci_dma_sync_single_for_cpu(pdev, 1461 dma_sync_single_for_cpu(&pdev->dev,
1459 rxdr->buffer_info[l].dma, 1462 rxdr->buffer_info[l].dma,
1460 rxdr->buffer_info[l].length, 1463 rxdr->buffer_info[l].length,
1461 PCI_DMA_FROMDEVICE); 1464 DMA_FROM_DEVICE);
1462 1465
1463 ret_val = e1000_check_lbtest_frame( 1466 ret_val = e1000_check_lbtest_frame(
1464 rxdr->buffer_info[l].skb, 1467 rxdr->buffer_info[l].skb,
@@ -1558,7 +1561,7 @@ static void e1000_diag_test(struct net_device *netdev,
1558 u8 forced_speed_duplex = hw->forced_speed_duplex; 1561 u8 forced_speed_duplex = hw->forced_speed_duplex;
1559 u8 autoneg = hw->autoneg; 1562 u8 autoneg = hw->autoneg;
1560 1563
1561 DPRINTK(HW, INFO, "offline testing starting\n"); 1564 e_info("offline testing starting\n");
1562 1565
1563 /* Link test performed before hardware reset so autoneg doesn't 1566 /* Link test performed before hardware reset so autoneg doesn't
1564 * interfere with test result */ 1567 * interfere with test result */
@@ -1598,7 +1601,7 @@ static void e1000_diag_test(struct net_device *netdev,
1598 if (if_running) 1601 if (if_running)
1599 dev_open(netdev); 1602 dev_open(netdev);
1600 } else { 1603 } else {
1601 DPRINTK(HW, INFO, "online testing starting\n"); 1604 e_info("online testing starting\n");
1602 /* Online tests */ 1605 /* Online tests */
1603 if (e1000_link_test(adapter, &data[4])) 1606 if (e1000_link_test(adapter, &data[4]))
1604 eth_test->flags |= ETH_TEST_FL_FAILED; 1607 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1691,7 +1694,7 @@ static void e1000_get_wol(struct net_device *netdev,
1691 wol->supported &= ~WAKE_UCAST; 1694 wol->supported &= ~WAKE_UCAST;
1692 1695
1693 if (adapter->wol & E1000_WUFC_EX) 1696 if (adapter->wol & E1000_WUFC_EX)
1694 DPRINTK(DRV, ERR, "Interface does not support " 1697 e_err("Interface does not support "
1695 "directed (unicast) frame wake-up packets\n"); 1698 "directed (unicast) frame wake-up packets\n");
1696 break; 1699 break;
1697 default: 1700 default:
@@ -1706,8 +1709,6 @@ static void e1000_get_wol(struct net_device *netdev,
1706 wol->wolopts |= WAKE_BCAST; 1709 wol->wolopts |= WAKE_BCAST;
1707 if (adapter->wol & E1000_WUFC_MAG) 1710 if (adapter->wol & E1000_WUFC_MAG)
1708 wol->wolopts |= WAKE_MAGIC; 1711 wol->wolopts |= WAKE_MAGIC;
1709
1710 return;
1711} 1712}
1712 1713
1713static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1714static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -1725,8 +1726,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1725 switch (hw->device_id) { 1726 switch (hw->device_id) {
1726 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: 1727 case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1727 if (wol->wolopts & WAKE_UCAST) { 1728 if (wol->wolopts & WAKE_UCAST) {
1728 DPRINTK(DRV, ERR, "Interface does not support " 1729 e_err("Interface does not support "
1729 "directed (unicast) frame wake-up packets\n"); 1730 "directed (unicast) frame wake-up packets\n");
1730 return -EOPNOTSUPP; 1731 return -EOPNOTSUPP;
1731 } 1732 }
1732 break; 1733 break;
@@ -1803,7 +1804,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
1803 if (adapter->hw.mac_type < e1000_82545) 1804 if (adapter->hw.mac_type < e1000_82545)
1804 return -EOPNOTSUPP; 1805 return -EOPNOTSUPP;
1805 1806
1806 if (adapter->itr_setting <= 3) 1807 if (adapter->itr_setting <= 4)
1807 ec->rx_coalesce_usecs = adapter->itr_setting; 1808 ec->rx_coalesce_usecs = adapter->itr_setting;
1808 else 1809 else
1809 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1810 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1821,12 +1822,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
1821 return -EOPNOTSUPP; 1822 return -EOPNOTSUPP;
1822 1823
1823 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || 1824 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1824 ((ec->rx_coalesce_usecs > 3) && 1825 ((ec->rx_coalesce_usecs > 4) &&
1825 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || 1826 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1826 (ec->rx_coalesce_usecs == 2)) 1827 (ec->rx_coalesce_usecs == 2))
1827 return -EINVAL; 1828 return -EINVAL;
1828 1829
1829 if (ec->rx_coalesce_usecs <= 3) { 1830 if (ec->rx_coalesce_usecs == 4) {
1831 adapter->itr = adapter->itr_setting = 4;
1832 } else if (ec->rx_coalesce_usecs <= 3) {
1830 adapter->itr = 20000; 1833 adapter->itr = 20000;
1831 adapter->itr_setting = ec->rx_coalesce_usecs; 1834 adapter->itr_setting = ec->rx_coalesce_usecs;
1832 } else { 1835 } else {
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f12827..c7e242b69a18 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -30,7 +30,7 @@
30 * Shared functions for accessing and configuring the MAC 30 * Shared functions for accessing and configuring the MAC
31 */ 31 */
32 32
33#include "e1000_hw.h" 33#include "e1000.h"
34 34
35static s32 e1000_check_downshift(struct e1000_hw *hw); 35static s32 e1000_check_downshift(struct e1000_hw *hw);
36static s32 e1000_check_polarity(struct e1000_hw *hw, 36static s32 e1000_check_polarity(struct e1000_hw *hw,
@@ -114,7 +114,7 @@ static DEFINE_SPINLOCK(e1000_eeprom_lock);
114 */ 114 */
115static s32 e1000_set_phy_type(struct e1000_hw *hw) 115static s32 e1000_set_phy_type(struct e1000_hw *hw)
116{ 116{
117 DEBUGFUNC("e1000_set_phy_type"); 117 e_dbg("e1000_set_phy_type");
118 118
119 if (hw->mac_type == e1000_undefined) 119 if (hw->mac_type == e1000_undefined)
120 return -E1000_ERR_PHY_TYPE; 120 return -E1000_ERR_PHY_TYPE;
@@ -152,7 +152,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
152 u32 ret_val; 152 u32 ret_val;
153 u16 phy_saved_data; 153 u16 phy_saved_data;
154 154
155 DEBUGFUNC("e1000_phy_init_script"); 155 e_dbg("e1000_phy_init_script");
156 156
157 if (hw->phy_init_script) { 157 if (hw->phy_init_script) {
158 msleep(20); 158 msleep(20);
@@ -245,7 +245,7 @@ static void e1000_phy_init_script(struct e1000_hw *hw)
245 */ 245 */
246s32 e1000_set_mac_type(struct e1000_hw *hw) 246s32 e1000_set_mac_type(struct e1000_hw *hw)
247{ 247{
248 DEBUGFUNC("e1000_set_mac_type"); 248 e_dbg("e1000_set_mac_type");
249 249
250 switch (hw->device_id) { 250 switch (hw->device_id) {
251 case E1000_DEV_ID_82542: 251 case E1000_DEV_ID_82542:
@@ -354,7 +354,7 @@ void e1000_set_media_type(struct e1000_hw *hw)
354{ 354{
355 u32 status; 355 u32 status;
356 356
357 DEBUGFUNC("e1000_set_media_type"); 357 e_dbg("e1000_set_media_type");
358 358
359 if (hw->mac_type != e1000_82543) { 359 if (hw->mac_type != e1000_82543) {
360 /* tbi_compatibility is only valid on 82543 */ 360 /* tbi_compatibility is only valid on 82543 */
@@ -401,16 +401,16 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
401 u32 led_ctrl; 401 u32 led_ctrl;
402 s32 ret_val; 402 s32 ret_val;
403 403
404 DEBUGFUNC("e1000_reset_hw"); 404 e_dbg("e1000_reset_hw");
405 405
406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ 406 /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
407 if (hw->mac_type == e1000_82542_rev2_0) { 407 if (hw->mac_type == e1000_82542_rev2_0) {
408 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 408 e_dbg("Disabling MWI on 82542 rev 2.0\n");
409 e1000_pci_clear_mwi(hw); 409 e1000_pci_clear_mwi(hw);
410 } 410 }
411 411
412 /* Clear interrupt mask to stop board from generating interrupts */ 412 /* Clear interrupt mask to stop board from generating interrupts */
413 DEBUGOUT("Masking off all interrupts\n"); 413 e_dbg("Masking off all interrupts\n");
414 ew32(IMC, 0xffffffff); 414 ew32(IMC, 0xffffffff);
415 415
416 /* Disable the Transmit and Receive units. Then delay to allow 416 /* Disable the Transmit and Receive units. Then delay to allow
@@ -442,7 +442,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
442 * the current PCI configuration. The global reset bit is self- 442 * the current PCI configuration. The global reset bit is self-
443 * clearing, and should clear within a microsecond. 443 * clearing, and should clear within a microsecond.
444 */ 444 */
445 DEBUGOUT("Issuing a global reset to MAC\n"); 445 e_dbg("Issuing a global reset to MAC\n");
446 446
447 switch (hw->mac_type) { 447 switch (hw->mac_type) {
448 case e1000_82544: 448 case e1000_82544:
@@ -516,7 +516,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
516 } 516 }
517 517
518 /* Clear interrupt mask to stop board from generating interrupts */ 518 /* Clear interrupt mask to stop board from generating interrupts */
519 DEBUGOUT("Masking off all interrupts\n"); 519 e_dbg("Masking off all interrupts\n");
520 ew32(IMC, 0xffffffff); 520 ew32(IMC, 0xffffffff);
521 521
522 /* Clear any pending interrupt events. */ 522 /* Clear any pending interrupt events. */
@@ -549,12 +549,12 @@ s32 e1000_init_hw(struct e1000_hw *hw)
549 u32 mta_size; 549 u32 mta_size;
550 u32 ctrl_ext; 550 u32 ctrl_ext;
551 551
552 DEBUGFUNC("e1000_init_hw"); 552 e_dbg("e1000_init_hw");
553 553
554 /* Initialize Identification LED */ 554 /* Initialize Identification LED */
555 ret_val = e1000_id_led_init(hw); 555 ret_val = e1000_id_led_init(hw);
556 if (ret_val) { 556 if (ret_val) {
557 DEBUGOUT("Error Initializing Identification LED\n"); 557 e_dbg("Error Initializing Identification LED\n");
558 return ret_val; 558 return ret_val;
559 } 559 }
560 560
@@ -562,14 +562,14 @@ s32 e1000_init_hw(struct e1000_hw *hw)
562 e1000_set_media_type(hw); 562 e1000_set_media_type(hw);
563 563
564 /* Disabling VLAN filtering. */ 564 /* Disabling VLAN filtering. */
565 DEBUGOUT("Initializing the IEEE VLAN\n"); 565 e_dbg("Initializing the IEEE VLAN\n");
566 if (hw->mac_type < e1000_82545_rev_3) 566 if (hw->mac_type < e1000_82545_rev_3)
567 ew32(VET, 0); 567 ew32(VET, 0);
568 e1000_clear_vfta(hw); 568 e1000_clear_vfta(hw);
569 569
570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ 570 /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
571 if (hw->mac_type == e1000_82542_rev2_0) { 571 if (hw->mac_type == e1000_82542_rev2_0) {
572 DEBUGOUT("Disabling MWI on 82542 rev 2.0\n"); 572 e_dbg("Disabling MWI on 82542 rev 2.0\n");
573 e1000_pci_clear_mwi(hw); 573 e1000_pci_clear_mwi(hw);
574 ew32(RCTL, E1000_RCTL_RST); 574 ew32(RCTL, E1000_RCTL_RST);
575 E1000_WRITE_FLUSH(); 575 E1000_WRITE_FLUSH();
@@ -591,7 +591,7 @@ s32 e1000_init_hw(struct e1000_hw *hw)
591 } 591 }
592 592
593 /* Zero out the Multicast HASH table */ 593 /* Zero out the Multicast HASH table */
594 DEBUGOUT("Zeroing the MTA\n"); 594 e_dbg("Zeroing the MTA\n");
595 mta_size = E1000_MC_TBL_SIZE; 595 mta_size = E1000_MC_TBL_SIZE;
596 for (i = 0; i < mta_size; i++) { 596 for (i = 0; i < mta_size; i++) {
597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); 597 E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
@@ -662,7 +662,7 @@ static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
662 u16 eeprom_data; 662 u16 eeprom_data;
663 s32 ret_val; 663 s32 ret_val;
664 664
665 DEBUGFUNC("e1000_adjust_serdes_amplitude"); 665 e_dbg("e1000_adjust_serdes_amplitude");
666 666
667 if (hw->media_type != e1000_media_type_internal_serdes) 667 if (hw->media_type != e1000_media_type_internal_serdes)
668 return E1000_SUCCESS; 668 return E1000_SUCCESS;
@@ -709,7 +709,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
709 s32 ret_val; 709 s32 ret_val;
710 u16 eeprom_data; 710 u16 eeprom_data;
711 711
712 DEBUGFUNC("e1000_setup_link"); 712 e_dbg("e1000_setup_link");
713 713
714 /* Read and store word 0x0F of the EEPROM. This word contains bits 714 /* Read and store word 0x0F of the EEPROM. This word contains bits
715 * that determine the hardware's default PAUSE (flow control) mode, 715 * that determine the hardware's default PAUSE (flow control) mode,
@@ -723,7 +723,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 723 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
724 1, &eeprom_data); 724 1, &eeprom_data);
725 if (ret_val) { 725 if (ret_val) {
726 DEBUGOUT("EEPROM Read Error\n"); 726 e_dbg("EEPROM Read Error\n");
727 return -E1000_ERR_EEPROM; 727 return -E1000_ERR_EEPROM;
728 } 728 }
729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) 729 if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
@@ -747,7 +747,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
747 747
748 hw->original_fc = hw->fc; 748 hw->original_fc = hw->fc;
749 749
750 DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc); 750 e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc);
751 751
752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial 752 /* Take the 4 bits from EEPROM word 0x0F that determine the initial
753 * polarity value for the SW controlled pins, and setup the 753 * polarity value for the SW controlled pins, and setup the
@@ -760,7 +760,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, 760 ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
761 1, &eeprom_data); 761 1, &eeprom_data);
762 if (ret_val) { 762 if (ret_val) {
763 DEBUGOUT("EEPROM Read Error\n"); 763 e_dbg("EEPROM Read Error\n");
764 return -E1000_ERR_EEPROM; 764 return -E1000_ERR_EEPROM;
765 } 765 }
766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << 766 ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
@@ -777,8 +777,7 @@ s32 e1000_setup_link(struct e1000_hw *hw)
777 * control is disabled, because it does not hurt anything to 777 * control is disabled, because it does not hurt anything to
778 * initialize these registers. 778 * initialize these registers.
779 */ 779 */
780 DEBUGOUT 780 e_dbg("Initializing the Flow Control address, type and timer regs\n");
781 ("Initializing the Flow Control address, type and timer regs\n");
782 781
783 ew32(FCT, FLOW_CONTROL_TYPE); 782 ew32(FCT, FLOW_CONTROL_TYPE);
784 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); 783 ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
@@ -827,7 +826,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
827 u32 signal = 0; 826 u32 signal = 0;
828 s32 ret_val; 827 s32 ret_val;
829 828
830 DEBUGFUNC("e1000_setup_fiber_serdes_link"); 829 e_dbg("e1000_setup_fiber_serdes_link");
831 830
832 /* On adapters with a MAC newer than 82544, SWDP 1 will be 831 /* On adapters with a MAC newer than 82544, SWDP 1 will be
833 * set when the optics detect a signal. On older adapters, it will be 832 * set when the optics detect a signal. On older adapters, it will be
@@ -893,7 +892,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
893 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); 892 txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
894 break; 893 break;
895 default: 894 default:
896 DEBUGOUT("Flow control param set incorrectly\n"); 895 e_dbg("Flow control param set incorrectly\n");
897 return -E1000_ERR_CONFIG; 896 return -E1000_ERR_CONFIG;
898 break; 897 break;
899 } 898 }
@@ -904,7 +903,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
904 * link-up status bit will be set and the flow control enable bits (RFCE 903 * link-up status bit will be set and the flow control enable bits (RFCE
905 * and TFCE) will be set according to their negotiated value. 904 * and TFCE) will be set according to their negotiated value.
906 */ 905 */
907 DEBUGOUT("Auto-negotiation enabled\n"); 906 e_dbg("Auto-negotiation enabled\n");
908 907
909 ew32(TXCW, txcw); 908 ew32(TXCW, txcw);
910 ew32(CTRL, ctrl); 909 ew32(CTRL, ctrl);
@@ -921,7 +920,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
921 */ 920 */
922 if (hw->media_type == e1000_media_type_internal_serdes || 921 if (hw->media_type == e1000_media_type_internal_serdes ||
923 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { 922 (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) {
924 DEBUGOUT("Looking for Link\n"); 923 e_dbg("Looking for Link\n");
925 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { 924 for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
926 msleep(10); 925 msleep(10);
927 status = er32(STATUS); 926 status = er32(STATUS);
@@ -929,7 +928,7 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
929 break; 928 break;
930 } 929 }
931 if (i == (LINK_UP_TIMEOUT / 10)) { 930 if (i == (LINK_UP_TIMEOUT / 10)) {
932 DEBUGOUT("Never got a valid link from auto-neg!!!\n"); 931 e_dbg("Never got a valid link from auto-neg!!!\n");
933 hw->autoneg_failed = 1; 932 hw->autoneg_failed = 1;
934 /* AutoNeg failed to achieve a link, so we'll call 933 /* AutoNeg failed to achieve a link, so we'll call
935 * e1000_check_for_link. This routine will force the link up if 934 * e1000_check_for_link. This routine will force the link up if
@@ -938,16 +937,16 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
938 */ 937 */
939 ret_val = e1000_check_for_link(hw); 938 ret_val = e1000_check_for_link(hw);
940 if (ret_val) { 939 if (ret_val) {
941 DEBUGOUT("Error while checking for link\n"); 940 e_dbg("Error while checking for link\n");
942 return ret_val; 941 return ret_val;
943 } 942 }
944 hw->autoneg_failed = 0; 943 hw->autoneg_failed = 0;
945 } else { 944 } else {
946 hw->autoneg_failed = 0; 945 hw->autoneg_failed = 0;
947 DEBUGOUT("Valid Link Found\n"); 946 e_dbg("Valid Link Found\n");
948 } 947 }
949 } else { 948 } else {
950 DEBUGOUT("No Signal Detected\n"); 949 e_dbg("No Signal Detected\n");
951 } 950 }
952 return E1000_SUCCESS; 951 return E1000_SUCCESS;
953} 952}
@@ -964,7 +963,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
964 s32 ret_val; 963 s32 ret_val;
965 u16 phy_data; 964 u16 phy_data;
966 965
967 DEBUGFUNC("e1000_copper_link_preconfig"); 966 e_dbg("e1000_copper_link_preconfig");
968 967
969 ctrl = er32(CTRL); 968 ctrl = er32(CTRL);
970 /* With 82543, we need to force speed and duplex on the MAC equal to what 969 /* With 82543, we need to force speed and duplex on the MAC equal to what
@@ -987,10 +986,10 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
987 /* Make sure we have a valid PHY */ 986 /* Make sure we have a valid PHY */
988 ret_val = e1000_detect_gig_phy(hw); 987 ret_val = e1000_detect_gig_phy(hw);
989 if (ret_val) { 988 if (ret_val) {
990 DEBUGOUT("Error, did not detect valid phy.\n"); 989 e_dbg("Error, did not detect valid phy.\n");
991 return ret_val; 990 return ret_val;
992 } 991 }
993 DEBUGOUT1("Phy ID = %x \n", hw->phy_id); 992 e_dbg("Phy ID = %x\n", hw->phy_id);
994 993
995 /* Set PHY to class A mode (if necessary) */ 994 /* Set PHY to class A mode (if necessary) */
996 ret_val = e1000_set_phy_mode(hw); 995 ret_val = e1000_set_phy_mode(hw);
@@ -1025,14 +1024,14 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1025 s32 ret_val; 1024 s32 ret_val;
1026 u16 phy_data; 1025 u16 phy_data;
1027 1026
1028 DEBUGFUNC("e1000_copper_link_igp_setup"); 1027 e_dbg("e1000_copper_link_igp_setup");
1029 1028
1030 if (hw->phy_reset_disable) 1029 if (hw->phy_reset_disable)
1031 return E1000_SUCCESS; 1030 return E1000_SUCCESS;
1032 1031
1033 ret_val = e1000_phy_reset(hw); 1032 ret_val = e1000_phy_reset(hw);
1034 if (ret_val) { 1033 if (ret_val) {
1035 DEBUGOUT("Error Resetting the PHY\n"); 1034 e_dbg("Error Resetting the PHY\n");
1036 return ret_val; 1035 return ret_val;
1037 } 1036 }
1038 1037
@@ -1049,7 +1048,7 @@ static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw)
1049 /* disable lplu d3 during driver init */ 1048 /* disable lplu d3 during driver init */
1050 ret_val = e1000_set_d3_lplu_state(hw, false); 1049 ret_val = e1000_set_d3_lplu_state(hw, false);
1051 if (ret_val) { 1050 if (ret_val) {
1052 DEBUGOUT("Error Disabling LPLU D3\n"); 1051 e_dbg("Error Disabling LPLU D3\n");
1053 return ret_val; 1052 return ret_val;
1054 } 1053 }
1055 } 1054 }
@@ -1166,7 +1165,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1166 s32 ret_val; 1165 s32 ret_val;
1167 u16 phy_data; 1166 u16 phy_data;
1168 1167
1169 DEBUGFUNC("e1000_copper_link_mgp_setup"); 1168 e_dbg("e1000_copper_link_mgp_setup");
1170 1169
1171 if (hw->phy_reset_disable) 1170 if (hw->phy_reset_disable)
1172 return E1000_SUCCESS; 1171 return E1000_SUCCESS;
@@ -1255,7 +1254,7 @@ static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw)
1255 /* SW Reset the PHY so all changes take effect */ 1254 /* SW Reset the PHY so all changes take effect */
1256 ret_val = e1000_phy_reset(hw); 1255 ret_val = e1000_phy_reset(hw);
1257 if (ret_val) { 1256 if (ret_val) {
1258 DEBUGOUT("Error Resetting the PHY\n"); 1257 e_dbg("Error Resetting the PHY\n");
1259 return ret_val; 1258 return ret_val;
1260 } 1259 }
1261 1260
@@ -1274,7 +1273,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1274 s32 ret_val; 1273 s32 ret_val;
1275 u16 phy_data; 1274 u16 phy_data;
1276 1275
1277 DEBUGFUNC("e1000_copper_link_autoneg"); 1276 e_dbg("e1000_copper_link_autoneg");
1278 1277
1279 /* Perform some bounds checking on the hw->autoneg_advertised 1278 /* Perform some bounds checking on the hw->autoneg_advertised
1280 * parameter. If this variable is zero, then set it to the default. 1279 * parameter. If this variable is zero, then set it to the default.
@@ -1287,13 +1286,13 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1287 if (hw->autoneg_advertised == 0) 1286 if (hw->autoneg_advertised == 0)
1288 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1289 1288
1290 DEBUGOUT("Reconfiguring auto-neg advertisement params\n"); 1289 e_dbg("Reconfiguring auto-neg advertisement params\n");
1291 ret_val = e1000_phy_setup_autoneg(hw); 1290 ret_val = e1000_phy_setup_autoneg(hw);
1292 if (ret_val) { 1291 if (ret_val) {
1293 DEBUGOUT("Error Setting up Auto-Negotiation\n"); 1292 e_dbg("Error Setting up Auto-Negotiation\n");
1294 return ret_val; 1293 return ret_val;
1295 } 1294 }
1296 DEBUGOUT("Restarting Auto-Neg\n"); 1295 e_dbg("Restarting Auto-Neg\n");
1297 1296
1298 /* Restart auto-negotiation by setting the Auto Neg Enable bit and 1297 /* Restart auto-negotiation by setting the Auto Neg Enable bit and
1299 * the Auto Neg Restart bit in the PHY control register. 1298 * the Auto Neg Restart bit in the PHY control register.
@@ -1313,7 +1312,7 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1313 if (hw->wait_autoneg_complete) { 1312 if (hw->wait_autoneg_complete) {
1314 ret_val = e1000_wait_autoneg(hw); 1313 ret_val = e1000_wait_autoneg(hw);
1315 if (ret_val) { 1314 if (ret_val) {
1316 DEBUGOUT 1315 e_dbg
1317 ("Error while waiting for autoneg to complete\n"); 1316 ("Error while waiting for autoneg to complete\n");
1318 return ret_val; 1317 return ret_val;
1319 } 1318 }
@@ -1340,20 +1339,20 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1340static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) 1339static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1341{ 1340{
1342 s32 ret_val; 1341 s32 ret_val;
1343 DEBUGFUNC("e1000_copper_link_postconfig"); 1342 e_dbg("e1000_copper_link_postconfig");
1344 1343
1345 if (hw->mac_type >= e1000_82544) { 1344 if (hw->mac_type >= e1000_82544) {
1346 e1000_config_collision_dist(hw); 1345 e1000_config_collision_dist(hw);
1347 } else { 1346 } else {
1348 ret_val = e1000_config_mac_to_phy(hw); 1347 ret_val = e1000_config_mac_to_phy(hw);
1349 if (ret_val) { 1348 if (ret_val) {
1350 DEBUGOUT("Error configuring MAC to PHY settings\n"); 1349 e_dbg("Error configuring MAC to PHY settings\n");
1351 return ret_val; 1350 return ret_val;
1352 } 1351 }
1353 } 1352 }
1354 ret_val = e1000_config_fc_after_link_up(hw); 1353 ret_val = e1000_config_fc_after_link_up(hw);
1355 if (ret_val) { 1354 if (ret_val) {
1356 DEBUGOUT("Error Configuring Flow Control\n"); 1355 e_dbg("Error Configuring Flow Control\n");
1357 return ret_val; 1356 return ret_val;
1358 } 1357 }
1359 1358
@@ -1361,7 +1360,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1361 if (hw->phy_type == e1000_phy_igp) { 1360 if (hw->phy_type == e1000_phy_igp) {
1362 ret_val = e1000_config_dsp_after_link_change(hw, true); 1361 ret_val = e1000_config_dsp_after_link_change(hw, true);
1363 if (ret_val) { 1362 if (ret_val) {
1364 DEBUGOUT("Error Configuring DSP after link up\n"); 1363 e_dbg("Error Configuring DSP after link up\n");
1365 return ret_val; 1364 return ret_val;
1366 } 1365 }
1367 } 1366 }
@@ -1381,7 +1380,7 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1381 u16 i; 1380 u16 i;
1382 u16 phy_data; 1381 u16 phy_data;
1383 1382
1384 DEBUGFUNC("e1000_setup_copper_link"); 1383 e_dbg("e1000_setup_copper_link");
1385 1384
1386 /* Check if it is a valid PHY and set PHY mode if necessary. */ 1385 /* Check if it is a valid PHY and set PHY mode if necessary. */
1387 ret_val = e1000_copper_link_preconfig(hw); 1386 ret_val = e1000_copper_link_preconfig(hw);
@@ -1407,10 +1406,10 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1407 } else { 1406 } else {
1408 /* PHY will be set to 10H, 10F, 100H,or 100F 1407 /* PHY will be set to 10H, 10F, 100H,or 100F
1409 * depending on value from forced_speed_duplex. */ 1408 * depending on value from forced_speed_duplex. */
1410 DEBUGOUT("Forcing speed and duplex\n"); 1409 e_dbg("Forcing speed and duplex\n");
1411 ret_val = e1000_phy_force_speed_duplex(hw); 1410 ret_val = e1000_phy_force_speed_duplex(hw);
1412 if (ret_val) { 1411 if (ret_val) {
1413 DEBUGOUT("Error Forcing Speed and Duplex\n"); 1412 e_dbg("Error Forcing Speed and Duplex\n");
1414 return ret_val; 1413 return ret_val;
1415 } 1414 }
1416 } 1415 }
@@ -1432,13 +1431,13 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1432 if (ret_val) 1431 if (ret_val)
1433 return ret_val; 1432 return ret_val;
1434 1433
1435 DEBUGOUT("Valid link established!!!\n"); 1434 e_dbg("Valid link established!!!\n");
1436 return E1000_SUCCESS; 1435 return E1000_SUCCESS;
1437 } 1436 }
1438 udelay(10); 1437 udelay(10);
1439 } 1438 }
1440 1439
1441 DEBUGOUT("Unable to establish link!!!\n"); 1440 e_dbg("Unable to establish link!!!\n");
1442 return E1000_SUCCESS; 1441 return E1000_SUCCESS;
1443} 1442}
1444 1443
@@ -1454,7 +1453,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1454 u16 mii_autoneg_adv_reg; 1453 u16 mii_autoneg_adv_reg;
1455 u16 mii_1000t_ctrl_reg; 1454 u16 mii_1000t_ctrl_reg;
1456 1455
1457 DEBUGFUNC("e1000_phy_setup_autoneg"); 1456 e_dbg("e1000_phy_setup_autoneg");
1458 1457
1459 /* Read the MII Auto-Neg Advertisement Register (Address 4). */ 1458 /* Read the MII Auto-Neg Advertisement Register (Address 4). */
1460 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); 1459 ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
@@ -1481,41 +1480,41 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1481 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; 1480 mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
1482 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; 1481 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
1483 1482
1484 DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised); 1483 e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised);
1485 1484
1486 /* Do we want to advertise 10 Mb Half Duplex? */ 1485 /* Do we want to advertise 10 Mb Half Duplex? */
1487 if (hw->autoneg_advertised & ADVERTISE_10_HALF) { 1486 if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
1488 DEBUGOUT("Advertise 10mb Half duplex\n"); 1487 e_dbg("Advertise 10mb Half duplex\n");
1489 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; 1488 mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
1490 } 1489 }
1491 1490
1492 /* Do we want to advertise 10 Mb Full Duplex? */ 1491 /* Do we want to advertise 10 Mb Full Duplex? */
1493 if (hw->autoneg_advertised & ADVERTISE_10_FULL) { 1492 if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
1494 DEBUGOUT("Advertise 10mb Full duplex\n"); 1493 e_dbg("Advertise 10mb Full duplex\n");
1495 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; 1494 mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
1496 } 1495 }
1497 1496
1498 /* Do we want to advertise 100 Mb Half Duplex? */ 1497 /* Do we want to advertise 100 Mb Half Duplex? */
1499 if (hw->autoneg_advertised & ADVERTISE_100_HALF) { 1498 if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
1500 DEBUGOUT("Advertise 100mb Half duplex\n"); 1499 e_dbg("Advertise 100mb Half duplex\n");
1501 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; 1500 mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
1502 } 1501 }
1503 1502
1504 /* Do we want to advertise 100 Mb Full Duplex? */ 1503 /* Do we want to advertise 100 Mb Full Duplex? */
1505 if (hw->autoneg_advertised & ADVERTISE_100_FULL) { 1504 if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
1506 DEBUGOUT("Advertise 100mb Full duplex\n"); 1505 e_dbg("Advertise 100mb Full duplex\n");
1507 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; 1506 mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
1508 } 1507 }
1509 1508
1510 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ 1509 /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
1511 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { 1510 if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
1512 DEBUGOUT 1511 e_dbg
1513 ("Advertise 1000mb Half duplex requested, request denied!\n"); 1512 ("Advertise 1000mb Half duplex requested, request denied!\n");
1514 } 1513 }
1515 1514
1516 /* Do we want to advertise 1000 Mb Full Duplex? */ 1515 /* Do we want to advertise 1000 Mb Full Duplex? */
1517 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { 1516 if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
1518 DEBUGOUT("Advertise 1000mb Full duplex\n"); 1517 e_dbg("Advertise 1000mb Full duplex\n");
1519 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; 1518 mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
1520 } 1519 }
1521 1520
@@ -1568,7 +1567,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1568 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); 1567 mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
1569 break; 1568 break;
1570 default: 1569 default:
1571 DEBUGOUT("Flow control param set incorrectly\n"); 1570 e_dbg("Flow control param set incorrectly\n");
1572 return -E1000_ERR_CONFIG; 1571 return -E1000_ERR_CONFIG;
1573 } 1572 }
1574 1573
@@ -1576,7 +1575,7 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1576 if (ret_val) 1575 if (ret_val)
1577 return ret_val; 1576 return ret_val;
1578 1577
1579 DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1580 1579
1581 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1580 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1582 if (ret_val) 1581 if (ret_val)
@@ -1600,12 +1599,12 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1600 u16 phy_data; 1599 u16 phy_data;
1601 u16 i; 1600 u16 i;
1602 1601
1603 DEBUGFUNC("e1000_phy_force_speed_duplex"); 1602 e_dbg("e1000_phy_force_speed_duplex");
1604 1603
1605 /* Turn off Flow control if we are forcing speed and duplex. */ 1604 /* Turn off Flow control if we are forcing speed and duplex. */
1606 hw->fc = E1000_FC_NONE; 1605 hw->fc = E1000_FC_NONE;
1607 1606
1608 DEBUGOUT1("hw->fc = %d\n", hw->fc); 1607 e_dbg("hw->fc = %d\n", hw->fc);
1609 1608
1610 /* Read the Device Control Register. */ 1609 /* Read the Device Control Register. */
1611 ctrl = er32(CTRL); 1610 ctrl = er32(CTRL);
@@ -1634,14 +1633,14 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1634 */ 1633 */
1635 ctrl |= E1000_CTRL_FD; 1634 ctrl |= E1000_CTRL_FD;
1636 mii_ctrl_reg |= MII_CR_FULL_DUPLEX; 1635 mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
1637 DEBUGOUT("Full Duplex\n"); 1636 e_dbg("Full Duplex\n");
1638 } else { 1637 } else {
1639 /* We want to force half duplex so we CLEAR the full duplex bits in 1638 /* We want to force half duplex so we CLEAR the full duplex bits in
1640 * the Device and MII Control Registers. 1639 * the Device and MII Control Registers.
1641 */ 1640 */
1642 ctrl &= ~E1000_CTRL_FD; 1641 ctrl &= ~E1000_CTRL_FD;
1643 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; 1642 mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
1644 DEBUGOUT("Half Duplex\n"); 1643 e_dbg("Half Duplex\n");
1645 } 1644 }
1646 1645
1647 /* Are we forcing 100Mbps??? */ 1646 /* Are we forcing 100Mbps??? */
@@ -1651,13 +1650,13 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1651 ctrl |= E1000_CTRL_SPD_100; 1650 ctrl |= E1000_CTRL_SPD_100;
1652 mii_ctrl_reg |= MII_CR_SPEED_100; 1651 mii_ctrl_reg |= MII_CR_SPEED_100;
1653 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); 1652 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
1654 DEBUGOUT("Forcing 100mb "); 1653 e_dbg("Forcing 100mb ");
1655 } else { 1654 } else {
1656 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ 1655 /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
1657 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); 1656 ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
1658 mii_ctrl_reg |= MII_CR_SPEED_10; 1657 mii_ctrl_reg |= MII_CR_SPEED_10;
1659 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); 1658 mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
1660 DEBUGOUT("Forcing 10mb "); 1659 e_dbg("Forcing 10mb ");
1661 } 1660 }
1662 1661
1663 e1000_config_collision_dist(hw); 1662 e1000_config_collision_dist(hw);
@@ -1680,7 +1679,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1680 if (ret_val) 1679 if (ret_val)
1681 return ret_val; 1680 return ret_val;
1682 1681
1683 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); 1682 e_dbg("M88E1000 PSCR: %x\n", phy_data);
1684 1683
1685 /* Need to reset the PHY or these changes will be ignored */ 1684 /* Need to reset the PHY or these changes will be ignored */
1686 mii_ctrl_reg |= MII_CR_RESET; 1685 mii_ctrl_reg |= MII_CR_RESET;
@@ -1720,7 +1719,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1720 */ 1719 */
1721 if (hw->wait_autoneg_complete) { 1720 if (hw->wait_autoneg_complete) {
1722 /* We will wait for autoneg to complete. */ 1721 /* We will wait for autoneg to complete. */
1723 DEBUGOUT("Waiting for forced speed/duplex link.\n"); 1722 e_dbg("Waiting for forced speed/duplex link.\n");
1724 mii_status_reg = 0; 1723 mii_status_reg = 0;
1725 1724
1726 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 1725 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
@@ -1746,7 +1745,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1746 /* We didn't get link. Reset the DSP and wait again for link. */ 1745 /* We didn't get link. Reset the DSP and wait again for link. */
1747 ret_val = e1000_phy_reset_dsp(hw); 1746 ret_val = e1000_phy_reset_dsp(hw);
1748 if (ret_val) { 1747 if (ret_val) {
1749 DEBUGOUT("Error Resetting PHY DSP\n"); 1748 e_dbg("Error Resetting PHY DSP\n");
1750 return ret_val; 1749 return ret_val;
1751 } 1750 }
1752 } 1751 }
@@ -1826,7 +1825,7 @@ void e1000_config_collision_dist(struct e1000_hw *hw)
1826{ 1825{
1827 u32 tctl, coll_dist; 1826 u32 tctl, coll_dist;
1828 1827
1829 DEBUGFUNC("e1000_config_collision_dist"); 1828 e_dbg("e1000_config_collision_dist");
1830 1829
1831 if (hw->mac_type < e1000_82543) 1830 if (hw->mac_type < e1000_82543)
1832 coll_dist = E1000_COLLISION_DISTANCE_82542; 1831 coll_dist = E1000_COLLISION_DISTANCE_82542;
@@ -1857,7 +1856,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1857 s32 ret_val; 1856 s32 ret_val;
1858 u16 phy_data; 1857 u16 phy_data;
1859 1858
1860 DEBUGFUNC("e1000_config_mac_to_phy"); 1859 e_dbg("e1000_config_mac_to_phy");
1861 1860
1862 /* 82544 or newer MAC, Auto Speed Detection takes care of 1861 /* 82544 or newer MAC, Auto Speed Detection takes care of
1863 * MAC speed/duplex configuration.*/ 1862 * MAC speed/duplex configuration.*/
@@ -1913,7 +1912,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
1913{ 1912{
1914 u32 ctrl; 1913 u32 ctrl;
1915 1914
1916 DEBUGFUNC("e1000_force_mac_fc"); 1915 e_dbg("e1000_force_mac_fc");
1917 1916
1918 /* Get the current configuration of the Device Control Register */ 1917 /* Get the current configuration of the Device Control Register */
1919 ctrl = er32(CTRL); 1918 ctrl = er32(CTRL);
@@ -1952,7 +1951,7 @@ s32 e1000_force_mac_fc(struct e1000_hw *hw)
1952 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); 1951 ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
1953 break; 1952 break;
1954 default: 1953 default:
1955 DEBUGOUT("Flow control param set incorrectly\n"); 1954 e_dbg("Flow control param set incorrectly\n");
1956 return -E1000_ERR_CONFIG; 1955 return -E1000_ERR_CONFIG;
1957 } 1956 }
1958 1957
@@ -1984,7 +1983,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
1984 u16 speed; 1983 u16 speed;
1985 u16 duplex; 1984 u16 duplex;
1986 1985
1987 DEBUGFUNC("e1000_config_fc_after_link_up"); 1986 e_dbg("e1000_config_fc_after_link_up");
1988 1987
1989 /* Check for the case where we have fiber media and auto-neg failed 1988 /* Check for the case where we have fiber media and auto-neg failed
1990 * so we had to force link. In this case, we need to force the 1989 * so we had to force link. In this case, we need to force the
@@ -1997,7 +1996,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
1997 && (!hw->autoneg))) { 1996 && (!hw->autoneg))) {
1998 ret_val = e1000_force_mac_fc(hw); 1997 ret_val = e1000_force_mac_fc(hw);
1999 if (ret_val) { 1998 if (ret_val) {
2000 DEBUGOUT("Error forcing flow control settings\n"); 1999 e_dbg("Error forcing flow control settings\n");
2001 return ret_val; 2000 return ret_val;
2002 } 2001 }
2003 } 2002 }
@@ -2079,10 +2078,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2079 */ 2078 */
2080 if (hw->original_fc == E1000_FC_FULL) { 2079 if (hw->original_fc == E1000_FC_FULL) {
2081 hw->fc = E1000_FC_FULL; 2080 hw->fc = E1000_FC_FULL;
2082 DEBUGOUT("Flow Control = FULL.\n"); 2081 e_dbg("Flow Control = FULL.\n");
2083 } else { 2082 } else {
2084 hw->fc = E1000_FC_RX_PAUSE; 2083 hw->fc = E1000_FC_RX_PAUSE;
2085 DEBUGOUT 2084 e_dbg
2086 ("Flow Control = RX PAUSE frames only.\n"); 2085 ("Flow Control = RX PAUSE frames only.\n");
2087 } 2086 }
2088 } 2087 }
@@ -2100,7 +2099,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2100 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) 2099 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2101 { 2100 {
2102 hw->fc = E1000_FC_TX_PAUSE; 2101 hw->fc = E1000_FC_TX_PAUSE;
2103 DEBUGOUT 2102 e_dbg
2104 ("Flow Control = TX PAUSE frames only.\n"); 2103 ("Flow Control = TX PAUSE frames only.\n");
2105 } 2104 }
2106 /* For transmitting PAUSE frames ONLY. 2105 /* For transmitting PAUSE frames ONLY.
@@ -2117,7 +2116,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2117 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) 2116 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR))
2118 { 2117 {
2119 hw->fc = E1000_FC_RX_PAUSE; 2118 hw->fc = E1000_FC_RX_PAUSE;
2120 DEBUGOUT 2119 e_dbg
2121 ("Flow Control = RX PAUSE frames only.\n"); 2120 ("Flow Control = RX PAUSE frames only.\n");
2122 } 2121 }
2123 /* Per the IEEE spec, at this point flow control should be 2122 /* Per the IEEE spec, at this point flow control should be
@@ -2144,10 +2143,10 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2144 hw->original_fc == E1000_FC_TX_PAUSE) || 2143 hw->original_fc == E1000_FC_TX_PAUSE) ||
2145 hw->fc_strict_ieee) { 2144 hw->fc_strict_ieee) {
2146 hw->fc = E1000_FC_NONE; 2145 hw->fc = E1000_FC_NONE;
2147 DEBUGOUT("Flow Control = NONE.\n"); 2146 e_dbg("Flow Control = NONE.\n");
2148 } else { 2147 } else {
2149 hw->fc = E1000_FC_RX_PAUSE; 2148 hw->fc = E1000_FC_RX_PAUSE;
2150 DEBUGOUT 2149 e_dbg
2151 ("Flow Control = RX PAUSE frames only.\n"); 2150 ("Flow Control = RX PAUSE frames only.\n");
2152 } 2151 }
2153 2152
@@ -2158,7 +2157,7 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2158 ret_val = 2157 ret_val =
2159 e1000_get_speed_and_duplex(hw, &speed, &duplex); 2158 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2160 if (ret_val) { 2159 if (ret_val) {
2161 DEBUGOUT 2160 e_dbg
2162 ("Error getting link speed and duplex\n"); 2161 ("Error getting link speed and duplex\n");
2163 return ret_val; 2162 return ret_val;
2164 } 2163 }
@@ -2171,12 +2170,12 @@ static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw)
2171 */ 2170 */
2172 ret_val = e1000_force_mac_fc(hw); 2171 ret_val = e1000_force_mac_fc(hw);
2173 if (ret_val) { 2172 if (ret_val) {
2174 DEBUGOUT 2173 e_dbg
2175 ("Error forcing flow control settings\n"); 2174 ("Error forcing flow control settings\n");
2176 return ret_val; 2175 return ret_val;
2177 } 2176 }
2178 } else { 2177 } else {
2179 DEBUGOUT 2178 e_dbg
2180 ("Copper PHY and Auto Neg has not completed.\n"); 2179 ("Copper PHY and Auto Neg has not completed.\n");
2181 } 2180 }
2182 } 2181 }
@@ -2197,7 +2196,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2197 u32 status; 2196 u32 status;
2198 s32 ret_val = E1000_SUCCESS; 2197 s32 ret_val = E1000_SUCCESS;
2199 2198
2200 DEBUGFUNC("e1000_check_for_serdes_link_generic"); 2199 e_dbg("e1000_check_for_serdes_link_generic");
2201 2200
2202 ctrl = er32(CTRL); 2201 ctrl = er32(CTRL);
2203 status = er32(STATUS); 2202 status = er32(STATUS);
@@ -2216,7 +2215,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2216 hw->autoneg_failed = 1; 2215 hw->autoneg_failed = 1;
2217 goto out; 2216 goto out;
2218 } 2217 }
2219 DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n"); 2218 e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n");
2220 2219
2221 /* Disable auto-negotiation in the TXCW register */ 2220 /* Disable auto-negotiation in the TXCW register */
2222 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); 2221 ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2229,7 +2228,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2229 /* Configure Flow Control after forcing link up. */ 2228 /* Configure Flow Control after forcing link up. */
2230 ret_val = e1000_config_fc_after_link_up(hw); 2229 ret_val = e1000_config_fc_after_link_up(hw);
2231 if (ret_val) { 2230 if (ret_val) {
2232 DEBUGOUT("Error configuring flow control\n"); 2231 e_dbg("Error configuring flow control\n");
2233 goto out; 2232 goto out;
2234 } 2233 }
2235 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { 2234 } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
@@ -2239,7 +2238,7 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2239 * and disable forced link in the Device Control register 2238 * and disable forced link in the Device Control register
2240 * in an attempt to auto-negotiate with our link partner. 2239 * in an attempt to auto-negotiate with our link partner.
2241 */ 2240 */
2242 DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n"); 2241 e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n");
2243 ew32(TXCW, hw->txcw); 2242 ew32(TXCW, hw->txcw);
2244 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 2243 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
2245 2244
@@ -2256,11 +2255,11 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2256 if (rxcw & E1000_RXCW_SYNCH) { 2255 if (rxcw & E1000_RXCW_SYNCH) {
2257 if (!(rxcw & E1000_RXCW_IV)) { 2256 if (!(rxcw & E1000_RXCW_IV)) {
2258 hw->serdes_has_link = true; 2257 hw->serdes_has_link = true;
2259 DEBUGOUT("SERDES: Link up - forced.\n"); 2258 e_dbg("SERDES: Link up - forced.\n");
2260 } 2259 }
2261 } else { 2260 } else {
2262 hw->serdes_has_link = false; 2261 hw->serdes_has_link = false;
2263 DEBUGOUT("SERDES: Link down - force failed.\n"); 2262 e_dbg("SERDES: Link down - force failed.\n");
2264 } 2263 }
2265 } 2264 }
2266 2265
@@ -2273,20 +2272,20 @@ static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
2273 if (rxcw & E1000_RXCW_SYNCH) { 2272 if (rxcw & E1000_RXCW_SYNCH) {
2274 if (!(rxcw & E1000_RXCW_IV)) { 2273 if (!(rxcw & E1000_RXCW_IV)) {
2275 hw->serdes_has_link = true; 2274 hw->serdes_has_link = true;
2276 DEBUGOUT("SERDES: Link up - autoneg " 2275 e_dbg("SERDES: Link up - autoneg "
2277 "completed successfully.\n"); 2276 "completed successfully.\n");
2278 } else { 2277 } else {
2279 hw->serdes_has_link = false; 2278 hw->serdes_has_link = false;
2280 DEBUGOUT("SERDES: Link down - invalid" 2279 e_dbg("SERDES: Link down - invalid"
2281 "codewords detected in autoneg.\n"); 2280 "codewords detected in autoneg.\n");
2282 } 2281 }
2283 } else { 2282 } else {
2284 hw->serdes_has_link = false; 2283 hw->serdes_has_link = false;
2285 DEBUGOUT("SERDES: Link down - no sync.\n"); 2284 e_dbg("SERDES: Link down - no sync.\n");
2286 } 2285 }
2287 } else { 2286 } else {
2288 hw->serdes_has_link = false; 2287 hw->serdes_has_link = false;
2289 DEBUGOUT("SERDES: Link down - autoneg failed\n"); 2288 e_dbg("SERDES: Link down - autoneg failed\n");
2290 } 2289 }
2291 } 2290 }
2292 2291
@@ -2312,7 +2311,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2312 s32 ret_val; 2311 s32 ret_val;
2313 u16 phy_data; 2312 u16 phy_data;
2314 2313
2315 DEBUGFUNC("e1000_check_for_link"); 2314 e_dbg("e1000_check_for_link");
2316 2315
2317 ctrl = er32(CTRL); 2316 ctrl = er32(CTRL);
2318 status = er32(STATUS); 2317 status = er32(STATUS);
@@ -2407,7 +2406,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2407 else { 2406 else {
2408 ret_val = e1000_config_mac_to_phy(hw); 2407 ret_val = e1000_config_mac_to_phy(hw);
2409 if (ret_val) { 2408 if (ret_val) {
2410 DEBUGOUT 2409 e_dbg
2411 ("Error configuring MAC to PHY settings\n"); 2410 ("Error configuring MAC to PHY settings\n");
2412 return ret_val; 2411 return ret_val;
2413 } 2412 }
@@ -2419,7 +2418,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2419 */ 2418 */
2420 ret_val = e1000_config_fc_after_link_up(hw); 2419 ret_val = e1000_config_fc_after_link_up(hw);
2421 if (ret_val) { 2420 if (ret_val) {
2422 DEBUGOUT("Error configuring flow control\n"); 2421 e_dbg("Error configuring flow control\n");
2423 return ret_val; 2422 return ret_val;
2424 } 2423 }
2425 2424
@@ -2435,7 +2434,7 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2435 ret_val = 2434 ret_val =
2436 e1000_get_speed_and_duplex(hw, &speed, &duplex); 2435 e1000_get_speed_and_duplex(hw, &speed, &duplex);
2437 if (ret_val) { 2436 if (ret_val) {
2438 DEBUGOUT 2437 e_dbg
2439 ("Error getting link speed and duplex\n"); 2438 ("Error getting link speed and duplex\n");
2440 return ret_val; 2439 return ret_val;
2441 } 2440 }
@@ -2487,30 +2486,30 @@ s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
2487 s32 ret_val; 2486 s32 ret_val;
2488 u16 phy_data; 2487 u16 phy_data;
2489 2488
2490 DEBUGFUNC("e1000_get_speed_and_duplex"); 2489 e_dbg("e1000_get_speed_and_duplex");
2491 2490
2492 if (hw->mac_type >= e1000_82543) { 2491 if (hw->mac_type >= e1000_82543) {
2493 status = er32(STATUS); 2492 status = er32(STATUS);
2494 if (status & E1000_STATUS_SPEED_1000) { 2493 if (status & E1000_STATUS_SPEED_1000) {
2495 *speed = SPEED_1000; 2494 *speed = SPEED_1000;
2496 DEBUGOUT("1000 Mbs, "); 2495 e_dbg("1000 Mbs, ");
2497 } else if (status & E1000_STATUS_SPEED_100) { 2496 } else if (status & E1000_STATUS_SPEED_100) {
2498 *speed = SPEED_100; 2497 *speed = SPEED_100;
2499 DEBUGOUT("100 Mbs, "); 2498 e_dbg("100 Mbs, ");
2500 } else { 2499 } else {
2501 *speed = SPEED_10; 2500 *speed = SPEED_10;
2502 DEBUGOUT("10 Mbs, "); 2501 e_dbg("10 Mbs, ");
2503 } 2502 }
2504 2503
2505 if (status & E1000_STATUS_FD) { 2504 if (status & E1000_STATUS_FD) {
2506 *duplex = FULL_DUPLEX; 2505 *duplex = FULL_DUPLEX;
2507 DEBUGOUT("Full Duplex\n"); 2506 e_dbg("Full Duplex\n");
2508 } else { 2507 } else {
2509 *duplex = HALF_DUPLEX; 2508 *duplex = HALF_DUPLEX;
2510 DEBUGOUT(" Half Duplex\n"); 2509 e_dbg(" Half Duplex\n");
2511 } 2510 }
2512 } else { 2511 } else {
2513 DEBUGOUT("1000 Mbs, Full Duplex\n"); 2512 e_dbg("1000 Mbs, Full Duplex\n");
2514 *speed = SPEED_1000; 2513 *speed = SPEED_1000;
2515 *duplex = FULL_DUPLEX; 2514 *duplex = FULL_DUPLEX;
2516 } 2515 }
@@ -2554,8 +2553,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
2554 u16 i; 2553 u16 i;
2555 u16 phy_data; 2554 u16 phy_data;
2556 2555
2557 DEBUGFUNC("e1000_wait_autoneg"); 2556 e_dbg("e1000_wait_autoneg");
2558 DEBUGOUT("Waiting for Auto-Neg to complete.\n"); 2557 e_dbg("Waiting for Auto-Neg to complete.\n");
2559 2558
2560 /* We will wait for autoneg to complete or 4.5 seconds to expire. */ 2559 /* We will wait for autoneg to complete or 4.5 seconds to expire. */
2561 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { 2560 for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
@@ -2718,7 +2717,7 @@ s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data)
2718{ 2717{
2719 u32 ret_val; 2718 u32 ret_val;
2720 2719
2721 DEBUGFUNC("e1000_read_phy_reg"); 2720 e_dbg("e1000_read_phy_reg");
2722 2721
2723 if ((hw->phy_type == e1000_phy_igp) && 2722 if ((hw->phy_type == e1000_phy_igp) &&
2724 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2723 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2741,10 +2740,10 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2741 u32 mdic = 0; 2740 u32 mdic = 0;
2742 const u32 phy_addr = 1; 2741 const u32 phy_addr = 1;
2743 2742
2744 DEBUGFUNC("e1000_read_phy_reg_ex"); 2743 e_dbg("e1000_read_phy_reg_ex");
2745 2744
2746 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2745 if (reg_addr > MAX_PHY_REG_ADDRESS) {
2747 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2746 e_dbg("PHY Address %d is out of range\n", reg_addr);
2748 return -E1000_ERR_PARAM; 2747 return -E1000_ERR_PARAM;
2749 } 2748 }
2750 2749
@@ -2767,11 +2766,11 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2767 break; 2766 break;
2768 } 2767 }
2769 if (!(mdic & E1000_MDIC_READY)) { 2768 if (!(mdic & E1000_MDIC_READY)) {
2770 DEBUGOUT("MDI Read did not complete\n"); 2769 e_dbg("MDI Read did not complete\n");
2771 return -E1000_ERR_PHY; 2770 return -E1000_ERR_PHY;
2772 } 2771 }
2773 if (mdic & E1000_MDIC_ERROR) { 2772 if (mdic & E1000_MDIC_ERROR) {
2774 DEBUGOUT("MDI Error\n"); 2773 e_dbg("MDI Error\n");
2775 return -E1000_ERR_PHY; 2774 return -E1000_ERR_PHY;
2776 } 2775 }
2777 *phy_data = (u16) mdic; 2776 *phy_data = (u16) mdic;
@@ -2820,7 +2819,7 @@ s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data)
2820{ 2819{
2821 u32 ret_val; 2820 u32 ret_val;
2822 2821
2823 DEBUGFUNC("e1000_write_phy_reg"); 2822 e_dbg("e1000_write_phy_reg");
2824 2823
2825 if ((hw->phy_type == e1000_phy_igp) && 2824 if ((hw->phy_type == e1000_phy_igp) &&
2826 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { 2825 (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
@@ -2843,10 +2842,10 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2843 u32 mdic = 0; 2842 u32 mdic = 0;
2844 const u32 phy_addr = 1; 2843 const u32 phy_addr = 1;
2845 2844
2846 DEBUGFUNC("e1000_write_phy_reg_ex"); 2845 e_dbg("e1000_write_phy_reg_ex");
2847 2846
2848 if (reg_addr > MAX_PHY_REG_ADDRESS) { 2847 if (reg_addr > MAX_PHY_REG_ADDRESS) {
2849 DEBUGOUT1("PHY Address %d is out of range\n", reg_addr); 2848 e_dbg("PHY Address %d is out of range\n", reg_addr);
2850 return -E1000_ERR_PARAM; 2849 return -E1000_ERR_PARAM;
2851 } 2850 }
2852 2851
@@ -2870,7 +2869,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2870 break; 2869 break;
2871 } 2870 }
2872 if (!(mdic & E1000_MDIC_READY)) { 2871 if (!(mdic & E1000_MDIC_READY)) {
2873 DEBUGOUT("MDI Write did not complete\n"); 2872 e_dbg("MDI Write did not complete\n");
2874 return -E1000_ERR_PHY; 2873 return -E1000_ERR_PHY;
2875 } 2874 }
2876 } else { 2875 } else {
@@ -2910,9 +2909,9 @@ s32 e1000_phy_hw_reset(struct e1000_hw *hw)
2910 u32 led_ctrl; 2909 u32 led_ctrl;
2911 s32 ret_val; 2910 s32 ret_val;
2912 2911
2913 DEBUGFUNC("e1000_phy_hw_reset"); 2912 e_dbg("e1000_phy_hw_reset");
2914 2913
2915 DEBUGOUT("Resetting Phy...\n"); 2914 e_dbg("Resetting Phy...\n");
2916 2915
2917 if (hw->mac_type > e1000_82543) { 2916 if (hw->mac_type > e1000_82543) {
2918 /* Read the device control register and assert the E1000_CTRL_PHY_RST 2917 /* Read the device control register and assert the E1000_CTRL_PHY_RST
@@ -2973,7 +2972,7 @@ s32 e1000_phy_reset(struct e1000_hw *hw)
2973 s32 ret_val; 2972 s32 ret_val;
2974 u16 phy_data; 2973 u16 phy_data;
2975 2974
2976 DEBUGFUNC("e1000_phy_reset"); 2975 e_dbg("e1000_phy_reset");
2977 2976
2978 switch (hw->phy_type) { 2977 switch (hw->phy_type) {
2979 case e1000_phy_igp: 2978 case e1000_phy_igp:
@@ -3013,7 +3012,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3013 u16 phy_id_high, phy_id_low; 3012 u16 phy_id_high, phy_id_low;
3014 bool match = false; 3013 bool match = false;
3015 3014
3016 DEBUGFUNC("e1000_detect_gig_phy"); 3015 e_dbg("e1000_detect_gig_phy");
3017 3016
3018 if (hw->phy_id != 0) 3017 if (hw->phy_id != 0)
3019 return E1000_SUCCESS; 3018 return E1000_SUCCESS;
@@ -3057,16 +3056,16 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3057 match = true; 3056 match = true;
3058 break; 3057 break;
3059 default: 3058 default:
3060 DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type); 3059 e_dbg("Invalid MAC type %d\n", hw->mac_type);
3061 return -E1000_ERR_CONFIG; 3060 return -E1000_ERR_CONFIG;
3062 } 3061 }
3063 phy_init_status = e1000_set_phy_type(hw); 3062 phy_init_status = e1000_set_phy_type(hw);
3064 3063
3065 if ((match) && (phy_init_status == E1000_SUCCESS)) { 3064 if ((match) && (phy_init_status == E1000_SUCCESS)) {
3066 DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id); 3065 e_dbg("PHY ID 0x%X detected\n", hw->phy_id);
3067 return E1000_SUCCESS; 3066 return E1000_SUCCESS;
3068 } 3067 }
3069 DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id); 3068 e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id);
3070 return -E1000_ERR_PHY; 3069 return -E1000_ERR_PHY;
3071} 3070}
3072 3071
@@ -3079,7 +3078,7 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3079static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) 3078static s32 e1000_phy_reset_dsp(struct e1000_hw *hw)
3080{ 3079{
3081 s32 ret_val; 3080 s32 ret_val;
3082 DEBUGFUNC("e1000_phy_reset_dsp"); 3081 e_dbg("e1000_phy_reset_dsp");
3083 3082
3084 do { 3083 do {
3085 ret_val = e1000_write_phy_reg(hw, 29, 0x001d); 3084 ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
@@ -3111,7 +3110,7 @@ static s32 e1000_phy_igp_get_info(struct e1000_hw *hw,
3111 u16 phy_data, min_length, max_length, average; 3110 u16 phy_data, min_length, max_length, average;
3112 e1000_rev_polarity polarity; 3111 e1000_rev_polarity polarity;
3113 3112
3114 DEBUGFUNC("e1000_phy_igp_get_info"); 3113 e_dbg("e1000_phy_igp_get_info");
3115 3114
3116 /* The downshift status is checked only once, after link is established, 3115 /* The downshift status is checked only once, after link is established,
3117 * and it stored in the hw->speed_downgraded parameter. */ 3116 * and it stored in the hw->speed_downgraded parameter. */
@@ -3189,7 +3188,7 @@ static s32 e1000_phy_m88_get_info(struct e1000_hw *hw,
3189 u16 phy_data; 3188 u16 phy_data;
3190 e1000_rev_polarity polarity; 3189 e1000_rev_polarity polarity;
3191 3190
3192 DEBUGFUNC("e1000_phy_m88_get_info"); 3191 e_dbg("e1000_phy_m88_get_info");
3193 3192
3194 /* The downshift status is checked only once, after link is established, 3193 /* The downshift status is checked only once, after link is established,
3195 * and it stored in the hw->speed_downgraded parameter. */ 3194 * and it stored in the hw->speed_downgraded parameter. */
@@ -3261,7 +3260,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3261 s32 ret_val; 3260 s32 ret_val;
3262 u16 phy_data; 3261 u16 phy_data;
3263 3262
3264 DEBUGFUNC("e1000_phy_get_info"); 3263 e_dbg("e1000_phy_get_info");
3265 3264
3266 phy_info->cable_length = e1000_cable_length_undefined; 3265 phy_info->cable_length = e1000_cable_length_undefined;
3267 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; 3266 phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
@@ -3273,7 +3272,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3273 phy_info->remote_rx = e1000_1000t_rx_status_undefined; 3272 phy_info->remote_rx = e1000_1000t_rx_status_undefined;
3274 3273
3275 if (hw->media_type != e1000_media_type_copper) { 3274 if (hw->media_type != e1000_media_type_copper) {
3276 DEBUGOUT("PHY info is only valid for copper media\n"); 3275 e_dbg("PHY info is only valid for copper media\n");
3277 return -E1000_ERR_CONFIG; 3276 return -E1000_ERR_CONFIG;
3278 } 3277 }
3279 3278
@@ -3286,7 +3285,7 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3286 return ret_val; 3285 return ret_val;
3287 3286
3288 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { 3287 if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
3289 DEBUGOUT("PHY info is only valid if link is up\n"); 3288 e_dbg("PHY info is only valid if link is up\n");
3290 return -E1000_ERR_CONFIG; 3289 return -E1000_ERR_CONFIG;
3291 } 3290 }
3292 3291
@@ -3298,10 +3297,10 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3298 3297
3299s32 e1000_validate_mdi_setting(struct e1000_hw *hw) 3298s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
3300{ 3299{
3301 DEBUGFUNC("e1000_validate_mdi_settings"); 3300 e_dbg("e1000_validate_mdi_settings");
3302 3301
3303 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { 3302 if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
3304 DEBUGOUT("Invalid MDI setting detected\n"); 3303 e_dbg("Invalid MDI setting detected\n");
3305 hw->mdix = 1; 3304 hw->mdix = 1;
3306 return -E1000_ERR_CONFIG; 3305 return -E1000_ERR_CONFIG;
3307 } 3306 }
@@ -3322,7 +3321,7 @@ s32 e1000_init_eeprom_params(struct e1000_hw *hw)
3322 s32 ret_val = E1000_SUCCESS; 3321 s32 ret_val = E1000_SUCCESS;
3323 u16 eeprom_size; 3322 u16 eeprom_size;
3324 3323
3325 DEBUGFUNC("e1000_init_eeprom_params"); 3324 e_dbg("e1000_init_eeprom_params");
3326 3325
3327 switch (hw->mac_type) { 3326 switch (hw->mac_type) {
3328 case e1000_82542_rev2_0: 3327 case e1000_82542_rev2_0:
@@ -3539,7 +3538,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3539 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3538 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3540 u32 eecd, i = 0; 3539 u32 eecd, i = 0;
3541 3540
3542 DEBUGFUNC("e1000_acquire_eeprom"); 3541 e_dbg("e1000_acquire_eeprom");
3543 3542
3544 eecd = er32(EECD); 3543 eecd = er32(EECD);
3545 3544
@@ -3557,7 +3556,7 @@ static s32 e1000_acquire_eeprom(struct e1000_hw *hw)
3557 if (!(eecd & E1000_EECD_GNT)) { 3556 if (!(eecd & E1000_EECD_GNT)) {
3558 eecd &= ~E1000_EECD_REQ; 3557 eecd &= ~E1000_EECD_REQ;
3559 ew32(EECD, eecd); 3558 ew32(EECD, eecd);
3560 DEBUGOUT("Could not acquire EEPROM grant\n"); 3559 e_dbg("Could not acquire EEPROM grant\n");
3561 return -E1000_ERR_EEPROM; 3560 return -E1000_ERR_EEPROM;
3562 } 3561 }
3563 } 3562 }
@@ -3639,7 +3638,7 @@ static void e1000_release_eeprom(struct e1000_hw *hw)
3639{ 3638{
3640 u32 eecd; 3639 u32 eecd;
3641 3640
3642 DEBUGFUNC("e1000_release_eeprom"); 3641 e_dbg("e1000_release_eeprom");
3643 3642
3644 eecd = er32(EECD); 3643 eecd = er32(EECD);
3645 3644
@@ -3687,7 +3686,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
3687 u16 retry_count = 0; 3686 u16 retry_count = 0;
3688 u8 spi_stat_reg; 3687 u8 spi_stat_reg;
3689 3688
3690 DEBUGFUNC("e1000_spi_eeprom_ready"); 3689 e_dbg("e1000_spi_eeprom_ready");
3691 3690
3692 /* Read "Status Register" repeatedly until the LSB is cleared. The 3691 /* Read "Status Register" repeatedly until the LSB is cleared. The
3693 * EEPROM will signal that the command has been completed by clearing 3692 * EEPROM will signal that the command has been completed by clearing
@@ -3712,7 +3711,7 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
3712 * only 0-5mSec on 5V devices) 3711 * only 0-5mSec on 5V devices)
3713 */ 3712 */
3714 if (retry_count >= EEPROM_MAX_RETRY_SPI) { 3713 if (retry_count >= EEPROM_MAX_RETRY_SPI) {
3715 DEBUGOUT("SPI EEPROM Status error\n"); 3714 e_dbg("SPI EEPROM Status error\n");
3716 return -E1000_ERR_EEPROM; 3715 return -E1000_ERR_EEPROM;
3717 } 3716 }
3718 3717
@@ -3741,7 +3740,7 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3741 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3740 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3742 u32 i = 0; 3741 u32 i = 0;
3743 3742
3744 DEBUGFUNC("e1000_read_eeprom"); 3743 e_dbg("e1000_read_eeprom");
3745 3744
3746 /* If eeprom is not yet detected, do so now */ 3745 /* If eeprom is not yet detected, do so now */
3747 if (eeprom->word_size == 0) 3746 if (eeprom->word_size == 0)
@@ -3752,9 +3751,8 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3752 */ 3751 */
3753 if ((offset >= eeprom->word_size) 3752 if ((offset >= eeprom->word_size)
3754 || (words > eeprom->word_size - offset) || (words == 0)) { 3753 || (words > eeprom->word_size - offset) || (words == 0)) {
3755 DEBUGOUT2 3754 e_dbg("\"words\" parameter out of bounds. Words = %d,"
3756 ("\"words\" parameter out of bounds. Words = %d, size = %d\n", 3755 "size = %d\n", offset, eeprom->word_size);
3757 offset, eeprom->word_size);
3758 return -E1000_ERR_EEPROM; 3756 return -E1000_ERR_EEPROM;
3759 } 3757 }
3760 3758
@@ -3832,11 +3830,11 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3832 u16 checksum = 0; 3830 u16 checksum = 0;
3833 u16 i, eeprom_data; 3831 u16 i, eeprom_data;
3834 3832
3835 DEBUGFUNC("e1000_validate_eeprom_checksum"); 3833 e_dbg("e1000_validate_eeprom_checksum");
3836 3834
3837 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { 3835 for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
3838 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3836 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
3839 DEBUGOUT("EEPROM Read Error\n"); 3837 e_dbg("EEPROM Read Error\n");
3840 return -E1000_ERR_EEPROM; 3838 return -E1000_ERR_EEPROM;
3841 } 3839 }
3842 checksum += eeprom_data; 3840 checksum += eeprom_data;
@@ -3845,7 +3843,7 @@ s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw)
3845 if (checksum == (u16) EEPROM_SUM) 3843 if (checksum == (u16) EEPROM_SUM)
3846 return E1000_SUCCESS; 3844 return E1000_SUCCESS;
3847 else { 3845 else {
3848 DEBUGOUT("EEPROM Checksum Invalid\n"); 3846 e_dbg("EEPROM Checksum Invalid\n");
3849 return -E1000_ERR_EEPROM; 3847 return -E1000_ERR_EEPROM;
3850 } 3848 }
3851} 3849}
@@ -3862,18 +3860,18 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
3862 u16 checksum = 0; 3860 u16 checksum = 0;
3863 u16 i, eeprom_data; 3861 u16 i, eeprom_data;
3864 3862
3865 DEBUGFUNC("e1000_update_eeprom_checksum"); 3863 e_dbg("e1000_update_eeprom_checksum");
3866 3864
3867 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { 3865 for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
3868 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { 3866 if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
3869 DEBUGOUT("EEPROM Read Error\n"); 3867 e_dbg("EEPROM Read Error\n");
3870 return -E1000_ERR_EEPROM; 3868 return -E1000_ERR_EEPROM;
3871 } 3869 }
3872 checksum += eeprom_data; 3870 checksum += eeprom_data;
3873 } 3871 }
3874 checksum = (u16) EEPROM_SUM - checksum; 3872 checksum = (u16) EEPROM_SUM - checksum;
3875 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { 3873 if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
3876 DEBUGOUT("EEPROM Write Error\n"); 3874 e_dbg("EEPROM Write Error\n");
3877 return -E1000_ERR_EEPROM; 3875 return -E1000_ERR_EEPROM;
3878 } 3876 }
3879 return E1000_SUCCESS; 3877 return E1000_SUCCESS;
@@ -3904,7 +3902,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3904 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3902 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3905 s32 status = 0; 3903 s32 status = 0;
3906 3904
3907 DEBUGFUNC("e1000_write_eeprom"); 3905 e_dbg("e1000_write_eeprom");
3908 3906
3909 /* If eeprom is not yet detected, do so now */ 3907 /* If eeprom is not yet detected, do so now */
3910 if (eeprom->word_size == 0) 3908 if (eeprom->word_size == 0)
@@ -3915,7 +3913,7 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3915 */ 3913 */
3916 if ((offset >= eeprom->word_size) 3914 if ((offset >= eeprom->word_size)
3917 || (words > eeprom->word_size - offset) || (words == 0)) { 3915 || (words > eeprom->word_size - offset) || (words == 0)) {
3918 DEBUGOUT("\"words\" parameter out of bounds\n"); 3916 e_dbg("\"words\" parameter out of bounds\n");
3919 return -E1000_ERR_EEPROM; 3917 return -E1000_ERR_EEPROM;
3920 } 3918 }
3921 3919
@@ -3949,7 +3947,7 @@ static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words,
3949 struct e1000_eeprom_info *eeprom = &hw->eeprom; 3947 struct e1000_eeprom_info *eeprom = &hw->eeprom;
3950 u16 widx = 0; 3948 u16 widx = 0;
3951 3949
3952 DEBUGFUNC("e1000_write_eeprom_spi"); 3950 e_dbg("e1000_write_eeprom_spi");
3953 3951
3954 while (widx < words) { 3952 while (widx < words) {
3955 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; 3953 u8 write_opcode = EEPROM_WRITE_OPCODE_SPI;
@@ -4013,7 +4011,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4013 u16 words_written = 0; 4011 u16 words_written = 0;
4014 u16 i = 0; 4012 u16 i = 0;
4015 4013
4016 DEBUGFUNC("e1000_write_eeprom_microwire"); 4014 e_dbg("e1000_write_eeprom_microwire");
4017 4015
4018 /* Send the write enable command to the EEPROM (3-bit opcode plus 4016 /* Send the write enable command to the EEPROM (3-bit opcode plus
4019 * 6/8-bit dummy address beginning with 11). It's less work to include 4017 * 6/8-bit dummy address beginning with 11). It's less work to include
@@ -4056,7 +4054,7 @@ static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset,
4056 udelay(50); 4054 udelay(50);
4057 } 4055 }
4058 if (i == 200) { 4056 if (i == 200) {
4059 DEBUGOUT("EEPROM Write did not complete\n"); 4057 e_dbg("EEPROM Write did not complete\n");
4060 return -E1000_ERR_EEPROM; 4058 return -E1000_ERR_EEPROM;
4061 } 4059 }
4062 4060
@@ -4092,12 +4090,12 @@ s32 e1000_read_mac_addr(struct e1000_hw *hw)
4092 u16 offset; 4090 u16 offset;
4093 u16 eeprom_data, i; 4091 u16 eeprom_data, i;
4094 4092
4095 DEBUGFUNC("e1000_read_mac_addr"); 4093 e_dbg("e1000_read_mac_addr");
4096 4094
4097 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { 4095 for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
4098 offset = i >> 1; 4096 offset = i >> 1;
4099 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { 4097 if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
4100 DEBUGOUT("EEPROM Read Error\n"); 4098 e_dbg("EEPROM Read Error\n");
4101 return -E1000_ERR_EEPROM; 4099 return -E1000_ERR_EEPROM;
4102 } 4100 }
4103 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); 4101 hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF);
@@ -4132,17 +4130,17 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
4132 u32 i; 4130 u32 i;
4133 u32 rar_num; 4131 u32 rar_num;
4134 4132
4135 DEBUGFUNC("e1000_init_rx_addrs"); 4133 e_dbg("e1000_init_rx_addrs");
4136 4134
4137 /* Setup the receive address. */ 4135 /* Setup the receive address. */
4138 DEBUGOUT("Programming MAC Address into RAR[0]\n"); 4136 e_dbg("Programming MAC Address into RAR[0]\n");
4139 4137
4140 e1000_rar_set(hw, hw->mac_addr, 0); 4138 e1000_rar_set(hw, hw->mac_addr, 0);
4141 4139
4142 rar_num = E1000_RAR_ENTRIES; 4140 rar_num = E1000_RAR_ENTRIES;
4143 4141
4144 /* Zero out the other 15 receive addresses. */ 4142 /* Zero out the other 15 receive addresses. */
4145 DEBUGOUT("Clearing RAR[1-15]\n"); 4143 e_dbg("Clearing RAR[1-15]\n");
4146 for (i = 1; i < rar_num; i++) { 4144 for (i = 1; i < rar_num; i++) {
4147 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4145 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
4148 E1000_WRITE_FLUSH(); 4146 E1000_WRITE_FLUSH();
@@ -4290,7 +4288,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
4290 u16 eeprom_data, i, temp; 4288 u16 eeprom_data, i, temp;
4291 const u16 led_mask = 0x0F; 4289 const u16 led_mask = 0x0F;
4292 4290
4293 DEBUGFUNC("e1000_id_led_init"); 4291 e_dbg("e1000_id_led_init");
4294 4292
4295 if (hw->mac_type < e1000_82540) { 4293 if (hw->mac_type < e1000_82540) {
4296 /* Nothing to do */ 4294 /* Nothing to do */
@@ -4303,7 +4301,7 @@ static s32 e1000_id_led_init(struct e1000_hw *hw)
4303 hw->ledctl_mode2 = hw->ledctl_default; 4301 hw->ledctl_mode2 = hw->ledctl_default;
4304 4302
4305 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { 4303 if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
4306 DEBUGOUT("EEPROM Read Error\n"); 4304 e_dbg("EEPROM Read Error\n");
4307 return -E1000_ERR_EEPROM; 4305 return -E1000_ERR_EEPROM;
4308 } 4306 }
4309 4307
@@ -4363,7 +4361,7 @@ s32 e1000_setup_led(struct e1000_hw *hw)
4363 u32 ledctl; 4361 u32 ledctl;
4364 s32 ret_val = E1000_SUCCESS; 4362 s32 ret_val = E1000_SUCCESS;
4365 4363
4366 DEBUGFUNC("e1000_setup_led"); 4364 e_dbg("e1000_setup_led");
4367 4365
4368 switch (hw->mac_type) { 4366 switch (hw->mac_type) {
4369 case e1000_82542_rev2_0: 4367 case e1000_82542_rev2_0:
@@ -4415,7 +4413,7 @@ s32 e1000_cleanup_led(struct e1000_hw *hw)
4415{ 4413{
4416 s32 ret_val = E1000_SUCCESS; 4414 s32 ret_val = E1000_SUCCESS;
4417 4415
4418 DEBUGFUNC("e1000_cleanup_led"); 4416 e_dbg("e1000_cleanup_led");
4419 4417
4420 switch (hw->mac_type) { 4418 switch (hw->mac_type) {
4421 case e1000_82542_rev2_0: 4419 case e1000_82542_rev2_0:
@@ -4451,7 +4449,7 @@ s32 e1000_led_on(struct e1000_hw *hw)
4451{ 4449{
4452 u32 ctrl = er32(CTRL); 4450 u32 ctrl = er32(CTRL);
4453 4451
4454 DEBUGFUNC("e1000_led_on"); 4452 e_dbg("e1000_led_on");
4455 4453
4456 switch (hw->mac_type) { 4454 switch (hw->mac_type) {
4457 case e1000_82542_rev2_0: 4455 case e1000_82542_rev2_0:
@@ -4497,7 +4495,7 @@ s32 e1000_led_off(struct e1000_hw *hw)
4497{ 4495{
4498 u32 ctrl = er32(CTRL); 4496 u32 ctrl = er32(CTRL);
4499 4497
4500 DEBUGFUNC("e1000_led_off"); 4498 e_dbg("e1000_led_off");
4501 4499
4502 switch (hw->mac_type) { 4500 switch (hw->mac_type) {
4503 case e1000_82542_rev2_0: 4501 case e1000_82542_rev2_0:
@@ -4626,7 +4624,7 @@ static void e1000_clear_hw_cntrs(struct e1000_hw *hw)
4626 */ 4624 */
4627void e1000_reset_adaptive(struct e1000_hw *hw) 4625void e1000_reset_adaptive(struct e1000_hw *hw)
4628{ 4626{
4629 DEBUGFUNC("e1000_reset_adaptive"); 4627 e_dbg("e1000_reset_adaptive");
4630 4628
4631 if (hw->adaptive_ifs) { 4629 if (hw->adaptive_ifs) {
4632 if (!hw->ifs_params_forced) { 4630 if (!hw->ifs_params_forced) {
@@ -4639,7 +4637,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
4639 hw->in_ifs_mode = false; 4637 hw->in_ifs_mode = false;
4640 ew32(AIT, 0); 4638 ew32(AIT, 0);
4641 } else { 4639 } else {
4642 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4640 e_dbg("Not in Adaptive IFS mode!\n");
4643 } 4641 }
4644} 4642}
4645 4643
@@ -4654,7 +4652,7 @@ void e1000_reset_adaptive(struct e1000_hw *hw)
4654 */ 4652 */
4655void e1000_update_adaptive(struct e1000_hw *hw) 4653void e1000_update_adaptive(struct e1000_hw *hw)
4656{ 4654{
4657 DEBUGFUNC("e1000_update_adaptive"); 4655 e_dbg("e1000_update_adaptive");
4658 4656
4659 if (hw->adaptive_ifs) { 4657 if (hw->adaptive_ifs) {
4660 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { 4658 if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) {
@@ -4679,7 +4677,7 @@ void e1000_update_adaptive(struct e1000_hw *hw)
4679 } 4677 }
4680 } 4678 }
4681 } else { 4679 } else {
4682 DEBUGOUT("Not in Adaptive IFS mode!\n"); 4680 e_dbg("Not in Adaptive IFS mode!\n");
4683 } 4681 }
4684} 4682}
4685 4683
@@ -4851,7 +4849,7 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
4851 u16 i, phy_data; 4849 u16 i, phy_data;
4852 u16 cable_length; 4850 u16 cable_length;
4853 4851
4854 DEBUGFUNC("e1000_get_cable_length"); 4852 e_dbg("e1000_get_cable_length");
4855 4853
4856 *min_length = *max_length = 0; 4854 *min_length = *max_length = 0;
4857 4855
@@ -4968,7 +4966,7 @@ static s32 e1000_check_polarity(struct e1000_hw *hw,
4968 s32 ret_val; 4966 s32 ret_val;
4969 u16 phy_data; 4967 u16 phy_data;
4970 4968
4971 DEBUGFUNC("e1000_check_polarity"); 4969 e_dbg("e1000_check_polarity");
4972 4970
4973 if (hw->phy_type == e1000_phy_m88) { 4971 if (hw->phy_type == e1000_phy_m88) {
4974 /* return the Polarity bit in the Status register. */ 4972 /* return the Polarity bit in the Status register. */
@@ -5034,7 +5032,7 @@ static s32 e1000_check_downshift(struct e1000_hw *hw)
5034 s32 ret_val; 5032 s32 ret_val;
5035 u16 phy_data; 5033 u16 phy_data;
5036 5034
5037 DEBUGFUNC("e1000_check_downshift"); 5035 e_dbg("e1000_check_downshift");
5038 5036
5039 if (hw->phy_type == e1000_phy_igp) { 5037 if (hw->phy_type == e1000_phy_igp) {
5040 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, 5038 ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
@@ -5081,7 +5079,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5081 }; 5079 };
5082 u16 min_length, max_length; 5080 u16 min_length, max_length;
5083 5081
5084 DEBUGFUNC("e1000_config_dsp_after_link_change"); 5082 e_dbg("e1000_config_dsp_after_link_change");
5085 5083
5086 if (hw->phy_type != e1000_phy_igp) 5084 if (hw->phy_type != e1000_phy_igp)
5087 return E1000_SUCCESS; 5085 return E1000_SUCCESS;
@@ -5089,7 +5087,7 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5089 if (link_up) { 5087 if (link_up) {
5090 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); 5088 ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
5091 if (ret_val) { 5089 if (ret_val) {
5092 DEBUGOUT("Error getting link speed and duplex\n"); 5090 e_dbg("Error getting link speed and duplex\n");
5093 return ret_val; 5091 return ret_val;
5094 } 5092 }
5095 5093
@@ -5289,7 +5287,7 @@ static s32 e1000_set_phy_mode(struct e1000_hw *hw)
5289 s32 ret_val; 5287 s32 ret_val;
5290 u16 eeprom_data; 5288 u16 eeprom_data;
5291 5289
5292 DEBUGFUNC("e1000_set_phy_mode"); 5290 e_dbg("e1000_set_phy_mode");
5293 5291
5294 if ((hw->mac_type == e1000_82545_rev_3) && 5292 if ((hw->mac_type == e1000_82545_rev_3) &&
5295 (hw->media_type == e1000_media_type_copper)) { 5293 (hw->media_type == e1000_media_type_copper)) {
@@ -5337,7 +5335,7 @@ static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
5337{ 5335{
5338 s32 ret_val; 5336 s32 ret_val;
5339 u16 phy_data; 5337 u16 phy_data;
5340 DEBUGFUNC("e1000_set_d3_lplu_state"); 5338 e_dbg("e1000_set_d3_lplu_state");
5341 5339
5342 if (hw->phy_type != e1000_phy_igp) 5340 if (hw->phy_type != e1000_phy_igp)
5343 return E1000_SUCCESS; 5341 return E1000_SUCCESS;
@@ -5440,7 +5438,7 @@ static s32 e1000_set_vco_speed(struct e1000_hw *hw)
5440 u16 default_page = 0; 5438 u16 default_page = 0;
5441 u16 phy_data; 5439 u16 phy_data;
5442 5440
5443 DEBUGFUNC("e1000_set_vco_speed"); 5441 e_dbg("e1000_set_vco_speed");
5444 5442
5445 switch (hw->mac_type) { 5443 switch (hw->mac_type) {
5446 case e1000_82545_rev_3: 5444 case e1000_82545_rev_3:
@@ -5613,7 +5611,7 @@ static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw)
5613 */ 5611 */
5614static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) 5612static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5615{ 5613{
5616 DEBUGFUNC("e1000_get_auto_rd_done"); 5614 e_dbg("e1000_get_auto_rd_done");
5617 msleep(5); 5615 msleep(5);
5618 return E1000_SUCCESS; 5616 return E1000_SUCCESS;
5619} 5617}
@@ -5628,7 +5626,7 @@ static s32 e1000_get_auto_rd_done(struct e1000_hw *hw)
5628 */ 5626 */
5629static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) 5627static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
5630{ 5628{
5631 DEBUGFUNC("e1000_get_phy_cfg_done"); 5629 e_dbg("e1000_get_phy_cfg_done");
5632 mdelay(10); 5630 mdelay(10);
5633 return E1000_SUCCESS; 5631 return E1000_SUCCESS;
5634} 5632}
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 9acfddb0dafb..ecd9f6c6bcd5 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -35,6 +35,7 @@
35 35
36#include "e1000_osdep.h" 36#include "e1000_osdep.h"
37 37
38
38/* Forward declarations of structures used by the shared code */ 39/* Forward declarations of structures used by the shared code */
39struct e1000_hw; 40struct e1000_hw;
40struct e1000_hw_stats; 41struct e1000_hw_stats;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed84..ebdea0891665 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k5-NAPI" 34#define DRV_VERSION "7.3.21-k6-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -214,6 +214,17 @@ module_param(debug, int, 0);
214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 214MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
215 215
216/** 216/**
217 * e1000_get_hw_dev - return device
218 * used by hardware layer to print debugging information
219 *
220 **/
221struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
222{
223 struct e1000_adapter *adapter = hw->back;
224 return adapter->netdev;
225}
226
227/**
217 * e1000_init_module - Driver Registration Routine 228 * e1000_init_module - Driver Registration Routine
218 * 229 *
219 * e1000_init_module is the first routine called when the driver is 230 * e1000_init_module is the first routine called when the driver is
@@ -223,18 +234,17 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
223static int __init e1000_init_module(void) 234static int __init e1000_init_module(void)
224{ 235{
225 int ret; 236 int ret;
226 printk(KERN_INFO "%s - version %s\n", 237 pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
227 e1000_driver_string, e1000_driver_version);
228 238
229 printk(KERN_INFO "%s\n", e1000_copyright); 239 pr_info("%s\n", e1000_copyright);
230 240
231 ret = pci_register_driver(&e1000_driver); 241 ret = pci_register_driver(&e1000_driver);
232 if (copybreak != COPYBREAK_DEFAULT) { 242 if (copybreak != COPYBREAK_DEFAULT) {
233 if (copybreak == 0) 243 if (copybreak == 0)
234 printk(KERN_INFO "e1000: copybreak disabled\n"); 244 pr_info("copybreak disabled\n");
235 else 245 else
236 printk(KERN_INFO "e1000: copybreak enabled for " 246 pr_info("copybreak enabled for "
237 "packets <= %u bytes\n", copybreak); 247 "packets <= %u bytes\n", copybreak);
238 } 248 }
239 return ret; 249 return ret;
240} 250}
@@ -265,8 +275,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
265 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, 275 err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
266 netdev); 276 netdev);
267 if (err) { 277 if (err) {
268 DPRINTK(PROBE, ERR, 278 e_err("Unable to allocate interrupt Error: %d\n", err);
269 "Unable to allocate interrupt Error: %d\n", err);
270 } 279 }
271 280
272 return err; 281 return err;
@@ -648,7 +657,7 @@ void e1000_reset(struct e1000_adapter *adapter)
648 ew32(WUC, 0); 657 ew32(WUC, 0);
649 658
650 if (e1000_init_hw(hw)) 659 if (e1000_init_hw(hw))
651 DPRINTK(PROBE, ERR, "Hardware Error\n"); 660 e_err("Hardware Error\n");
652 e1000_update_mng_vlan(adapter); 661 e1000_update_mng_vlan(adapter);
653 662
654 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ 663 /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -689,8 +698,7 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
689 698
690 data = kmalloc(eeprom.len, GFP_KERNEL); 699 data = kmalloc(eeprom.len, GFP_KERNEL);
691 if (!data) { 700 if (!data) {
692 printk(KERN_ERR "Unable to allocate memory to dump EEPROM" 701 pr_err("Unable to allocate memory to dump EEPROM data\n");
693 " data\n");
694 return; 702 return;
695 } 703 }
696 704
@@ -702,30 +710,25 @@ static void e1000_dump_eeprom(struct e1000_adapter *adapter)
702 csum_new += data[i] + (data[i + 1] << 8); 710 csum_new += data[i] + (data[i + 1] << 8);
703 csum_new = EEPROM_SUM - csum_new; 711 csum_new = EEPROM_SUM - csum_new;
704 712
705 printk(KERN_ERR "/*********************/\n"); 713 pr_err("/*********************/\n");
706 printk(KERN_ERR "Current EEPROM Checksum : 0x%04x\n", csum_old); 714 pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
707 printk(KERN_ERR "Calculated : 0x%04x\n", csum_new); 715 pr_err("Calculated : 0x%04x\n", csum_new);
708 716
709 printk(KERN_ERR "Offset Values\n"); 717 pr_err("Offset Values\n");
710 printk(KERN_ERR "======== ======\n"); 718 pr_err("======== ======\n");
711 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); 719 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
712 720
713 printk(KERN_ERR "Include this output when contacting your support " 721 pr_err("Include this output when contacting your support provider.\n");
714 "provider.\n"); 722 pr_err("This is not a software error! Something bad happened to\n");
715 printk(KERN_ERR "This is not a software error! Something bad " 723 pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
716 "happened to your hardware or\n"); 724 pr_err("result in further problems, possibly loss of data,\n");
717 printk(KERN_ERR "EEPROM image. Ignoring this " 725 pr_err("corruption or system hangs!\n");
718 "problem could result in further problems,\n"); 726 pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
719 printk(KERN_ERR "possibly loss of data, corruption or system hangs!\n"); 727 pr_err("which is invalid and requires you to set the proper MAC\n");
720 printk(KERN_ERR "The MAC Address will be reset to 00:00:00:00:00:00, " 728 pr_err("address manually before continuing to enable this network\n");
721 "which is invalid\n"); 729 pr_err("device. Please inspect the EEPROM dump and report the\n");
722 printk(KERN_ERR "and requires you to set the proper MAC " 730 pr_err("issue to your hardware vendor or Intel Customer Support.\n");
723 "address manually before continuing\n"); 731 pr_err("/*********************/\n");
724 printk(KERN_ERR "to enable this network device.\n");
725 printk(KERN_ERR "Please inspect the EEPROM dump and report the issue "
726 "to your hardware vendor\n");
727 printk(KERN_ERR "or Intel Customer Support.\n");
728 printk(KERN_ERR "/*********************/\n");
729 732
730 kfree(data); 733 kfree(data);
731} 734}
@@ -823,16 +826,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
823 if (err) 826 if (err)
824 return err; 827 return err;
825 828
826 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 829 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
827 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 830 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
828 pci_using_dac = 1; 831 pci_using_dac = 1;
829 } else { 832 } else {
830 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 833 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
831 if (err) { 834 if (err) {
832 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 835 err = dma_set_coherent_mask(&pdev->dev,
836 DMA_BIT_MASK(32));
833 if (err) { 837 if (err) {
834 E1000_ERR("No usable DMA configuration, " 838 pr_err("No usable DMA config, aborting\n");
835 "aborting\n");
836 goto err_dma; 839 goto err_dma;
837 } 840 }
838 } 841 }
@@ -922,7 +925,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
922 925
923 /* initialize eeprom parameters */ 926 /* initialize eeprom parameters */
924 if (e1000_init_eeprom_params(hw)) { 927 if (e1000_init_eeprom_params(hw)) {
925 E1000_ERR("EEPROM initialization failed\n"); 928 e_err("EEPROM initialization failed\n");
926 goto err_eeprom; 929 goto err_eeprom;
927 } 930 }
928 931
@@ -933,7 +936,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
933 936
934 /* make sure the EEPROM is good */ 937 /* make sure the EEPROM is good */
935 if (e1000_validate_eeprom_checksum(hw) < 0) { 938 if (e1000_validate_eeprom_checksum(hw) < 0) {
936 DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n"); 939 e_err("The EEPROM Checksum Is Not Valid\n");
937 e1000_dump_eeprom(adapter); 940 e1000_dump_eeprom(adapter);
938 /* 941 /*
939 * set MAC address to all zeroes to invalidate and temporary 942 * set MAC address to all zeroes to invalidate and temporary
@@ -947,14 +950,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
947 } else { 950 } else {
948 /* copy the MAC address out of the EEPROM */ 951 /* copy the MAC address out of the EEPROM */
949 if (e1000_read_mac_addr(hw)) 952 if (e1000_read_mac_addr(hw))
950 DPRINTK(PROBE, ERR, "EEPROM Read Error\n"); 953 e_err("EEPROM Read Error\n");
951 } 954 }
952 /* don't block initalization here due to bad MAC address */ 955 /* don't block initalization here due to bad MAC address */
953 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); 956 memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
954 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len); 957 memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
955 958
956 if (!is_valid_ether_addr(netdev->perm_addr)) 959 if (!is_valid_ether_addr(netdev->perm_addr))
957 DPRINTK(PROBE, ERR, "Invalid MAC Address\n"); 960 e_err("Invalid MAC Address\n");
958 961
959 e1000_get_bus_info(hw); 962 e1000_get_bus_info(hw);
960 963
@@ -1035,8 +1038,16 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1035 adapter->wol = adapter->eeprom_wol; 1038 adapter->wol = adapter->eeprom_wol;
1036 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1039 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1037 1040
1041 /* reset the hardware with the new settings */
1042 e1000_reset(adapter);
1043
1044 strcpy(netdev->name, "eth%d");
1045 err = register_netdev(netdev);
1046 if (err)
1047 goto err_register;
1048
1038 /* print bus type/speed/width info */ 1049 /* print bus type/speed/width info */
1039 DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ", 1050 e_info("(PCI%s:%s:%s) ",
1040 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), 1051 ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1041 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" : 1052 ((hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
1042 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" : 1053 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
@@ -1044,20 +1055,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1044 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"), 1055 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
1045 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit")); 1056 ((hw->bus_width == e1000_bus_width_64) ? "64-bit" : "32-bit"));
1046 1057
1047 printk("%pM\n", netdev->dev_addr); 1058 e_info("%pM\n", netdev->dev_addr);
1048
1049 /* reset the hardware with the new settings */
1050 e1000_reset(adapter);
1051
1052 strcpy(netdev->name, "eth%d");
1053 err = register_netdev(netdev);
1054 if (err)
1055 goto err_register;
1056 1059
1057 /* carrier off reporting is important to ethtool even BEFORE open */ 1060 /* carrier off reporting is important to ethtool even BEFORE open */
1058 netif_carrier_off(netdev); 1061 netif_carrier_off(netdev);
1059 1062
1060 DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n"); 1063 e_info("Intel(R) PRO/1000 Network Connection\n");
1061 1064
1062 cards_found++; 1065 cards_found++;
1063 return 0; 1066 return 0;
@@ -1157,7 +1160,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1157 /* identify the MAC */ 1160 /* identify the MAC */
1158 1161
1159 if (e1000_set_mac_type(hw)) { 1162 if (e1000_set_mac_type(hw)) {
1160 DPRINTK(PROBE, ERR, "Unknown MAC Type\n"); 1163 e_err("Unknown MAC Type\n");
1161 return -EIO; 1164 return -EIO;
1162 } 1165 }
1163 1166
@@ -1190,7 +1193,7 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1190 adapter->num_rx_queues = 1; 1193 adapter->num_rx_queues = 1;
1191 1194
1192 if (e1000_alloc_queues(adapter)) { 1195 if (e1000_alloc_queues(adapter)) {
1193 DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n"); 1196 e_err("Unable to allocate memory for queues\n");
1194 return -ENOMEM; 1197 return -ENOMEM;
1195 } 1198 }
1196 1199
@@ -1384,8 +1387,7 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1384 size = sizeof(struct e1000_buffer) * txdr->count; 1387 size = sizeof(struct e1000_buffer) * txdr->count;
1385 txdr->buffer_info = vmalloc(size); 1388 txdr->buffer_info = vmalloc(size);
1386 if (!txdr->buffer_info) { 1389 if (!txdr->buffer_info) {
1387 DPRINTK(PROBE, ERR, 1390 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1388 "Unable to allocate memory for the transmit descriptor ring\n");
1389 return -ENOMEM; 1391 return -ENOMEM;
1390 } 1392 }
1391 memset(txdr->buffer_info, 0, size); 1393 memset(txdr->buffer_info, 0, size);
@@ -1395,12 +1397,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1395 txdr->size = txdr->count * sizeof(struct e1000_tx_desc); 1397 txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1396 txdr->size = ALIGN(txdr->size, 4096); 1398 txdr->size = ALIGN(txdr->size, 4096);
1397 1399
1398 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1400 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1401 GFP_KERNEL);
1399 if (!txdr->desc) { 1402 if (!txdr->desc) {
1400setup_tx_desc_die: 1403setup_tx_desc_die:
1401 vfree(txdr->buffer_info); 1404 vfree(txdr->buffer_info);
1402 DPRINTK(PROBE, ERR, 1405 e_err("Unable to allocate memory for the Tx descriptor ring\n");
1403 "Unable to allocate memory for the transmit descriptor ring\n");
1404 return -ENOMEM; 1406 return -ENOMEM;
1405 } 1407 }
1406 1408
@@ -1408,29 +1410,32 @@ setup_tx_desc_die:
1408 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1410 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1409 void *olddesc = txdr->desc; 1411 void *olddesc = txdr->desc;
1410 dma_addr_t olddma = txdr->dma; 1412 dma_addr_t olddma = txdr->dma;
1411 DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes " 1413 e_err("txdr align check failed: %u bytes at %p\n",
1412 "at %p\n", txdr->size, txdr->desc); 1414 txdr->size, txdr->desc);
1413 /* Try again, without freeing the previous */ 1415 /* Try again, without freeing the previous */
1414 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 1416 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1417 &txdr->dma, GFP_KERNEL);
1415 /* Failed allocation, critical failure */ 1418 /* Failed allocation, critical failure */
1416 if (!txdr->desc) { 1419 if (!txdr->desc) {
1417 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1420 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1421 olddma);
1418 goto setup_tx_desc_die; 1422 goto setup_tx_desc_die;
1419 } 1423 }
1420 1424
1421 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { 1425 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1422 /* give up */ 1426 /* give up */
1423 pci_free_consistent(pdev, txdr->size, txdr->desc, 1427 dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1424 txdr->dma); 1428 txdr->dma);
1425 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1429 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1426 DPRINTK(PROBE, ERR, 1430 olddma);
1427 "Unable to allocate aligned memory " 1431 e_err("Unable to allocate aligned memory "
1428 "for the transmit descriptor ring\n"); 1432 "for the transmit descriptor ring\n");
1429 vfree(txdr->buffer_info); 1433 vfree(txdr->buffer_info);
1430 return -ENOMEM; 1434 return -ENOMEM;
1431 } else { 1435 } else {
1432 /* Free old allocation, new allocation was successful */ 1436 /* Free old allocation, new allocation was successful */
1433 pci_free_consistent(pdev, txdr->size, olddesc, olddma); 1437 dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1438 olddma);
1434 } 1439 }
1435 } 1440 }
1436 memset(txdr->desc, 0, txdr->size); 1441 memset(txdr->desc, 0, txdr->size);
@@ -1456,8 +1461,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1456 for (i = 0; i < adapter->num_tx_queues; i++) { 1461 for (i = 0; i < adapter->num_tx_queues; i++) {
1457 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); 1462 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1458 if (err) { 1463 if (err) {
1459 DPRINTK(PROBE, ERR, 1464 e_err("Allocation for Tx Queue %u failed\n", i);
1460 "Allocation for Tx Queue %u failed\n", i);
1461 for (i-- ; i >= 0; i--) 1465 for (i-- ; i >= 0; i--)
1462 e1000_free_tx_resources(adapter, 1466 e1000_free_tx_resources(adapter,
1463 &adapter->tx_ring[i]); 1467 &adapter->tx_ring[i]);
@@ -1577,8 +1581,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1577 size = sizeof(struct e1000_buffer) * rxdr->count; 1581 size = sizeof(struct e1000_buffer) * rxdr->count;
1578 rxdr->buffer_info = vmalloc(size); 1582 rxdr->buffer_info = vmalloc(size);
1579 if (!rxdr->buffer_info) { 1583 if (!rxdr->buffer_info) {
1580 DPRINTK(PROBE, ERR, 1584 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1581 "Unable to allocate memory for the receive descriptor ring\n");
1582 return -ENOMEM; 1585 return -ENOMEM;
1583 } 1586 }
1584 memset(rxdr->buffer_info, 0, size); 1587 memset(rxdr->buffer_info, 0, size);
@@ -1590,11 +1593,11 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1590 rxdr->size = rxdr->count * desc_len; 1593 rxdr->size = rxdr->count * desc_len;
1591 rxdr->size = ALIGN(rxdr->size, 4096); 1594 rxdr->size = ALIGN(rxdr->size, 4096);
1592 1595
1593 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1596 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1597 GFP_KERNEL);
1594 1598
1595 if (!rxdr->desc) { 1599 if (!rxdr->desc) {
1596 DPRINTK(PROBE, ERR, 1600 e_err("Unable to allocate memory for the Rx descriptor ring\n");
1597 "Unable to allocate memory for the receive descriptor ring\n");
1598setup_rx_desc_die: 1601setup_rx_desc_die:
1599 vfree(rxdr->buffer_info); 1602 vfree(rxdr->buffer_info);
1600 return -ENOMEM; 1603 return -ENOMEM;
@@ -1604,31 +1607,33 @@ setup_rx_desc_die:
1604 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1607 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1605 void *olddesc = rxdr->desc; 1608 void *olddesc = rxdr->desc;
1606 dma_addr_t olddma = rxdr->dma; 1609 dma_addr_t olddma = rxdr->dma;
1607 DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes " 1610 e_err("rxdr align check failed: %u bytes at %p\n",
1608 "at %p\n", rxdr->size, rxdr->desc); 1611 rxdr->size, rxdr->desc);
1609 /* Try again, without freeing the previous */ 1612 /* Try again, without freeing the previous */
1610 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 1613 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1614 &rxdr->dma, GFP_KERNEL);
1611 /* Failed allocation, critical failure */ 1615 /* Failed allocation, critical failure */
1612 if (!rxdr->desc) { 1616 if (!rxdr->desc) {
1613 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1617 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1614 DPRINTK(PROBE, ERR, 1618 olddma);
1615 "Unable to allocate memory " 1619 e_err("Unable to allocate memory for the Rx descriptor "
1616 "for the receive descriptor ring\n"); 1620 "ring\n");
1617 goto setup_rx_desc_die; 1621 goto setup_rx_desc_die;
1618 } 1622 }
1619 1623
1620 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { 1624 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1621 /* give up */ 1625 /* give up */
1622 pci_free_consistent(pdev, rxdr->size, rxdr->desc, 1626 dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1623 rxdr->dma); 1627 rxdr->dma);
1624 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1628 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1625 DPRINTK(PROBE, ERR, 1629 olddma);
1626 "Unable to allocate aligned memory " 1630 e_err("Unable to allocate aligned memory for the Rx "
1627 "for the receive descriptor ring\n"); 1631 "descriptor ring\n");
1628 goto setup_rx_desc_die; 1632 goto setup_rx_desc_die;
1629 } else { 1633 } else {
1630 /* Free old allocation, new allocation was successful */ 1634 /* Free old allocation, new allocation was successful */
1631 pci_free_consistent(pdev, rxdr->size, olddesc, olddma); 1635 dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1636 olddma);
1632 } 1637 }
1633 } 1638 }
1634 memset(rxdr->desc, 0, rxdr->size); 1639 memset(rxdr->desc, 0, rxdr->size);
@@ -1655,8 +1660,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1655 for (i = 0; i < adapter->num_rx_queues; i++) { 1660 for (i = 0; i < adapter->num_rx_queues; i++) {
1656 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); 1661 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1657 if (err) { 1662 if (err) {
1658 DPRINTK(PROBE, ERR, 1663 e_err("Allocation for Rx Queue %u failed\n", i);
1659 "Allocation for Rx Queue %u failed\n", i);
1660 for (i-- ; i >= 0; i--) 1664 for (i-- ; i >= 0; i--)
1661 e1000_free_rx_resources(adapter, 1665 e1000_free_rx_resources(adapter,
1662 &adapter->rx_ring[i]); 1666 &adapter->rx_ring[i]);
@@ -1804,7 +1808,8 @@ static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1804 vfree(tx_ring->buffer_info); 1808 vfree(tx_ring->buffer_info);
1805 tx_ring->buffer_info = NULL; 1809 tx_ring->buffer_info = NULL;
1806 1810
1807 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 1811 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1812 tx_ring->dma);
1808 1813
1809 tx_ring->desc = NULL; 1814 tx_ring->desc = NULL;
1810} 1815}
@@ -1829,12 +1834,12 @@ static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1829{ 1834{
1830 if (buffer_info->dma) { 1835 if (buffer_info->dma) {
1831 if (buffer_info->mapped_as_page) 1836 if (buffer_info->mapped_as_page)
1832 pci_unmap_page(adapter->pdev, buffer_info->dma, 1837 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1833 buffer_info->length, PCI_DMA_TODEVICE); 1838 buffer_info->length, DMA_TO_DEVICE);
1834 else 1839 else
1835 pci_unmap_single(adapter->pdev, buffer_info->dma, 1840 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1836 buffer_info->length, 1841 buffer_info->length,
1837 PCI_DMA_TODEVICE); 1842 DMA_TO_DEVICE);
1838 buffer_info->dma = 0; 1843 buffer_info->dma = 0;
1839 } 1844 }
1840 if (buffer_info->skb) { 1845 if (buffer_info->skb) {
@@ -1912,7 +1917,8 @@ static void e1000_free_rx_resources(struct e1000_adapter *adapter,
1912 vfree(rx_ring->buffer_info); 1917 vfree(rx_ring->buffer_info);
1913 rx_ring->buffer_info = NULL; 1918 rx_ring->buffer_info = NULL;
1914 1919
1915 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 1920 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1921 rx_ring->dma);
1916 1922
1917 rx_ring->desc = NULL; 1923 rx_ring->desc = NULL;
1918} 1924}
@@ -1952,14 +1958,14 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
1952 buffer_info = &rx_ring->buffer_info[i]; 1958 buffer_info = &rx_ring->buffer_info[i];
1953 if (buffer_info->dma && 1959 if (buffer_info->dma &&
1954 adapter->clean_rx == e1000_clean_rx_irq) { 1960 adapter->clean_rx == e1000_clean_rx_irq) {
1955 pci_unmap_single(pdev, buffer_info->dma, 1961 dma_unmap_single(&pdev->dev, buffer_info->dma,
1956 buffer_info->length, 1962 buffer_info->length,
1957 PCI_DMA_FROMDEVICE); 1963 DMA_FROM_DEVICE);
1958 } else if (buffer_info->dma && 1964 } else if (buffer_info->dma &&
1959 adapter->clean_rx == e1000_clean_jumbo_rx_irq) { 1965 adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
1960 pci_unmap_page(pdev, buffer_info->dma, 1966 dma_unmap_page(&pdev->dev, buffer_info->dma,
1961 buffer_info->length, 1967 buffer_info->length,
1962 PCI_DMA_FROMDEVICE); 1968 DMA_FROM_DEVICE);
1963 } 1969 }
1964 1970
1965 buffer_info->dma = 0; 1971 buffer_info->dma = 0;
@@ -2098,7 +2104,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2098 struct e1000_hw *hw = &adapter->hw; 2104 struct e1000_hw *hw = &adapter->hw;
2099 struct netdev_hw_addr *ha; 2105 struct netdev_hw_addr *ha;
2100 bool use_uc = false; 2106 bool use_uc = false;
2101 struct dev_addr_list *mc_ptr;
2102 u32 rctl; 2107 u32 rctl;
2103 u32 hash_value; 2108 u32 hash_value;
2104 int i, rar_entries = E1000_RAR_ENTRIES; 2109 int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2106,7 +2111,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2106 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); 2111 u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2107 2112
2108 if (!mcarray) { 2113 if (!mcarray) {
2109 DPRINTK(PROBE, ERR, "memory allocation failed\n"); 2114 e_err("memory allocation failed\n");
2110 return; 2115 return;
2111 } 2116 }
2112 2117
@@ -2156,19 +2161,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2156 e1000_rar_set(hw, ha->addr, i++); 2161 e1000_rar_set(hw, ha->addr, i++);
2157 } 2162 }
2158 2163
2159 WARN_ON(i == rar_entries); 2164 netdev_for_each_mc_addr(ha, netdev) {
2160
2161 netdev_for_each_mc_addr(mc_ptr, netdev) {
2162 if (i == rar_entries) { 2165 if (i == rar_entries) {
2163 /* load any remaining addresses into the hash table */ 2166 /* load any remaining addresses into the hash table */
2164 u32 hash_reg, hash_bit, mta; 2167 u32 hash_reg, hash_bit, mta;
2165 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); 2168 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2166 hash_reg = (hash_value >> 5) & 0x7F; 2169 hash_reg = (hash_value >> 5) & 0x7F;
2167 hash_bit = hash_value & 0x1F; 2170 hash_bit = hash_value & 0x1F;
2168 mta = (1 << hash_bit); 2171 mta = (1 << hash_bit);
2169 mcarray[hash_reg] |= mta; 2172 mcarray[hash_reg] |= mta;
2170 } else { 2173 } else {
2171 e1000_rar_set(hw, mc_ptr->da_addr, i++); 2174 e1000_rar_set(hw, ha->addr, i++);
2172 } 2175 }
2173 } 2176 }
2174 2177
@@ -2302,16 +2305,16 @@ static void e1000_watchdog(unsigned long data)
2302 &adapter->link_duplex); 2305 &adapter->link_duplex);
2303 2306
2304 ctrl = er32(CTRL); 2307 ctrl = er32(CTRL);
2305 printk(KERN_INFO "e1000: %s NIC Link is Up %d Mbps %s, " 2308 pr_info("%s NIC Link is Up %d Mbps %s, "
2306 "Flow Control: %s\n", 2309 "Flow Control: %s\n",
2307 netdev->name, 2310 netdev->name,
2308 adapter->link_speed, 2311 adapter->link_speed,
2309 adapter->link_duplex == FULL_DUPLEX ? 2312 adapter->link_duplex == FULL_DUPLEX ?
2310 "Full Duplex" : "Half Duplex", 2313 "Full Duplex" : "Half Duplex",
2311 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2314 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2312 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2315 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2313 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2316 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2314 E1000_CTRL_TFCE) ? "TX" : "None" ))); 2317 E1000_CTRL_TFCE) ? "TX" : "None")));
2315 2318
2316 /* adjust timeout factor according to speed/duplex */ 2319 /* adjust timeout factor according to speed/duplex */
2317 adapter->tx_timeout_factor = 1; 2320 adapter->tx_timeout_factor = 1;
@@ -2341,8 +2344,8 @@ static void e1000_watchdog(unsigned long data)
2341 if (netif_carrier_ok(netdev)) { 2344 if (netif_carrier_ok(netdev)) {
2342 adapter->link_speed = 0; 2345 adapter->link_speed = 0;
2343 adapter->link_duplex = 0; 2346 adapter->link_duplex = 0;
2344 printk(KERN_INFO "e1000: %s NIC Link is Down\n", 2347 pr_info("%s NIC Link is Down\n",
2345 netdev->name); 2348 netdev->name);
2346 netif_carrier_off(netdev); 2349 netif_carrier_off(netdev);
2347 2350
2348 if (!test_bit(__E1000_DOWN, &adapter->flags)) 2351 if (!test_bit(__E1000_DOWN, &adapter->flags))
@@ -2381,6 +2384,22 @@ link_up:
2381 } 2384 }
2382 } 2385 }
2383 2386
2387 /* Simple mode for Interrupt Throttle Rate (ITR) */
2388 if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2389 /*
2390 * Symmetric Tx/Rx gets a reduced ITR=2000;
2391 * Total asymmetrical Tx or Rx gets ITR=8000;
2392 * everyone else is between 2000-8000.
2393 */
2394 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2395 u32 dif = (adapter->gotcl > adapter->gorcl ?
2396 adapter->gotcl - adapter->gorcl :
2397 adapter->gorcl - adapter->gotcl) / 10000;
2398 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2399
2400 ew32(ITR, 1000000000 / (itr * 256));
2401 }
2402
2384 /* Cause software interrupt to ensure rx ring is cleaned */ 2403 /* Cause software interrupt to ensure rx ring is cleaned */
2385 ew32(ICS, E1000_ICS_RXDMT0); 2404 ew32(ICS, E1000_ICS_RXDMT0);
2386 2405
@@ -2525,8 +2544,6 @@ set_itr_now:
2525 adapter->itr = new_itr; 2544 adapter->itr = new_itr;
2526 ew32(ITR, 1000000000 / (new_itr * 256)); 2545 ew32(ITR, 1000000000 / (new_itr * 256));
2527 } 2546 }
2528
2529 return;
2530} 2547}
2531 2548
2532#define E1000_TX_FLAGS_CSUM 0x00000001 2549#define E1000_TX_FLAGS_CSUM 0x00000001
@@ -2632,8 +2649,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2632 break; 2649 break;
2633 default: 2650 default:
2634 if (unlikely(net_ratelimit())) 2651 if (unlikely(net_ratelimit()))
2635 DPRINTK(DRV, WARNING, 2652 e_warn("checksum_partial proto=%x!\n", skb->protocol);
2636 "checksum_partial proto=%x!\n", skb->protocol);
2637 break; 2653 break;
2638 } 2654 }
2639 2655
@@ -2715,9 +2731,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2715 /* set time_stamp *before* dma to help avoid a possible race */ 2731 /* set time_stamp *before* dma to help avoid a possible race */
2716 buffer_info->time_stamp = jiffies; 2732 buffer_info->time_stamp = jiffies;
2717 buffer_info->mapped_as_page = false; 2733 buffer_info->mapped_as_page = false;
2718 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 2734 buffer_info->dma = dma_map_single(&pdev->dev,
2719 size, PCI_DMA_TODEVICE); 2735 skb->data + offset,
2720 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2736 size, DMA_TO_DEVICE);
2737 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2721 goto dma_error; 2738 goto dma_error;
2722 buffer_info->next_to_watch = i; 2739 buffer_info->next_to_watch = i;
2723 2740
@@ -2761,10 +2778,10 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
2761 buffer_info->length = size; 2778 buffer_info->length = size;
2762 buffer_info->time_stamp = jiffies; 2779 buffer_info->time_stamp = jiffies;
2763 buffer_info->mapped_as_page = true; 2780 buffer_info->mapped_as_page = true;
2764 buffer_info->dma = pci_map_page(pdev, frag->page, 2781 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
2765 offset, size, 2782 offset, size,
2766 PCI_DMA_TODEVICE); 2783 DMA_TO_DEVICE);
2767 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2784 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2768 goto dma_error; 2785 goto dma_error;
2769 buffer_info->next_to_watch = i; 2786 buffer_info->next_to_watch = i;
2770 2787
@@ -2930,7 +2947,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2930 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2947 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2931 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2948 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2932 unsigned int tx_flags = 0; 2949 unsigned int tx_flags = 0;
2933 unsigned int len = skb->len - skb->data_len; 2950 unsigned int len = skb_headlen(skb);
2934 unsigned int nr_frags; 2951 unsigned int nr_frags;
2935 unsigned int mss; 2952 unsigned int mss;
2936 int count = 0; 2953 int count = 0;
@@ -2976,12 +2993,11 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2976 /* fall through */ 2993 /* fall through */
2977 pull_size = min((unsigned int)4, skb->data_len); 2994 pull_size = min((unsigned int)4, skb->data_len);
2978 if (!__pskb_pull_tail(skb, pull_size)) { 2995 if (!__pskb_pull_tail(skb, pull_size)) {
2979 DPRINTK(DRV, ERR, 2996 e_err("__pskb_pull_tail failed.\n");
2980 "__pskb_pull_tail failed.\n");
2981 dev_kfree_skb_any(skb); 2997 dev_kfree_skb_any(skb);
2982 return NETDEV_TX_OK; 2998 return NETDEV_TX_OK;
2983 } 2999 }
2984 len = skb->len - skb->data_len; 3000 len = skb_headlen(skb);
2985 break; 3001 break;
2986 default: 3002 default:
2987 /* do nothing */ 3003 /* do nothing */
@@ -3125,7 +3141,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3125 3141
3126 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || 3142 if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3127 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3143 (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3128 DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); 3144 e_err("Invalid MTU setting\n");
3129 return -EINVAL; 3145 return -EINVAL;
3130 } 3146 }
3131 3147
@@ -3133,7 +3149,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3133 switch (hw->mac_type) { 3149 switch (hw->mac_type) {
3134 case e1000_undefined ... e1000_82542_rev2_1: 3150 case e1000_undefined ... e1000_82542_rev2_1:
3135 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { 3151 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3136 DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n"); 3152 e_err("Jumbo Frames not supported.\n");
3137 return -EINVAL; 3153 return -EINVAL;
3138 } 3154 }
3139 break; 3155 break;
@@ -3171,8 +3187,8 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3171 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) 3187 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3172 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 3188 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3173 3189
3174 printk(KERN_INFO "e1000: %s changing MTU from %d to %d\n", 3190 pr_info("%s changing MTU from %d to %d\n",
3175 netdev->name, netdev->mtu, new_mtu); 3191 netdev->name, netdev->mtu, new_mtu);
3176 netdev->mtu = new_mtu; 3192 netdev->mtu = new_mtu;
3177 3193
3178 if (netif_running(netdev)) 3194 if (netif_running(netdev))
@@ -3485,17 +3501,17 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3485 !(er32(STATUS) & E1000_STATUS_TXOFF)) { 3501 !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3486 3502
3487 /* detected Tx unit hang */ 3503 /* detected Tx unit hang */
3488 DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" 3504 e_err("Detected Tx Unit Hang\n"
3489 " Tx Queue <%lu>\n" 3505 " Tx Queue <%lu>\n"
3490 " TDH <%x>\n" 3506 " TDH <%x>\n"
3491 " TDT <%x>\n" 3507 " TDT <%x>\n"
3492 " next_to_use <%x>\n" 3508 " next_to_use <%x>\n"
3493 " next_to_clean <%x>\n" 3509 " next_to_clean <%x>\n"
3494 "buffer_info[next_to_clean]\n" 3510 "buffer_info[next_to_clean]\n"
3495 " time_stamp <%lx>\n" 3511 " time_stamp <%lx>\n"
3496 " next_to_watch <%x>\n" 3512 " next_to_watch <%x>\n"
3497 " jiffies <%lx>\n" 3513 " jiffies <%lx>\n"
3498 " next_to_watch.status <%x>\n", 3514 " next_to_watch.status <%x>\n",
3499 (unsigned long)((tx_ring - adapter->tx_ring) / 3515 (unsigned long)((tx_ring - adapter->tx_ring) /
3500 sizeof(struct e1000_tx_ring)), 3516 sizeof(struct e1000_tx_ring)),
3501 readl(hw->hw_addr + tx_ring->tdh), 3517 readl(hw->hw_addr + tx_ring->tdh),
@@ -3635,8 +3651,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3635 3651
3636 cleaned = true; 3652 cleaned = true;
3637 cleaned_count++; 3653 cleaned_count++;
3638 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, 3654 dma_unmap_page(&pdev->dev, buffer_info->dma,
3639 PCI_DMA_FROMDEVICE); 3655 buffer_info->length, DMA_FROM_DEVICE);
3640 buffer_info->dma = 0; 3656 buffer_info->dma = 0;
3641 3657
3642 length = le16_to_cpu(rx_desc->length); 3658 length = le16_to_cpu(rx_desc->length);
@@ -3734,7 +3750,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
3734 3750
3735 /* eth type trans needs skb->data to point to something */ 3751 /* eth type trans needs skb->data to point to something */
3736 if (!pskb_may_pull(skb, ETH_HLEN)) { 3752 if (!pskb_may_pull(skb, ETH_HLEN)) {
3737 DPRINTK(DRV, ERR, "pskb_may_pull failed.\n"); 3753 e_err("pskb_may_pull failed.\n");
3738 dev_kfree_skb(skb); 3754 dev_kfree_skb(skb);
3739 goto next_desc; 3755 goto next_desc;
3740 } 3756 }
@@ -3769,6 +3785,31 @@ next_desc:
3769 return cleaned; 3785 return cleaned;
3770} 3786}
3771 3787
3788/*
3789 * this should improve performance for small packets with large amounts
3790 * of reassembly being done in the stack
3791 */
3792static void e1000_check_copybreak(struct net_device *netdev,
3793 struct e1000_buffer *buffer_info,
3794 u32 length, struct sk_buff **skb)
3795{
3796 struct sk_buff *new_skb;
3797
3798 if (length > copybreak)
3799 return;
3800
3801 new_skb = netdev_alloc_skb_ip_align(netdev, length);
3802 if (!new_skb)
3803 return;
3804
3805 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
3806 (*skb)->data - NET_IP_ALIGN,
3807 length + NET_IP_ALIGN);
3808 /* save the skb in buffer_info as good */
3809 buffer_info->skb = *skb;
3810 *skb = new_skb;
3811}
3812
3772/** 3813/**
3773 * e1000_clean_rx_irq - Send received data up the network stack; legacy 3814 * e1000_clean_rx_irq - Send received data up the network stack; legacy
3774 * @adapter: board private structure 3815 * @adapter: board private structure
@@ -3818,8 +3859,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3818 3859
3819 cleaned = true; 3860 cleaned = true;
3820 cleaned_count++; 3861 cleaned_count++;
3821 pci_unmap_single(pdev, buffer_info->dma, buffer_info->length, 3862 dma_unmap_single(&pdev->dev, buffer_info->dma,
3822 PCI_DMA_FROMDEVICE); 3863 buffer_info->length, DMA_FROM_DEVICE);
3823 buffer_info->dma = 0; 3864 buffer_info->dma = 0;
3824 3865
3825 length = le16_to_cpu(rx_desc->length); 3866 length = le16_to_cpu(rx_desc->length);
@@ -3834,8 +3875,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3834 3875
3835 if (adapter->discarding) { 3876 if (adapter->discarding) {
3836 /* All receives must fit into a single buffer */ 3877 /* All receives must fit into a single buffer */
3837 E1000_DBG("%s: Receive packet consumed multiple" 3878 e_info("Receive packet consumed multiple buffers\n");
3838 " buffers\n", netdev->name);
3839 /* recycle */ 3879 /* recycle */
3840 buffer_info->skb = skb; 3880 buffer_info->skb = skb;
3841 if (status & E1000_RXD_STAT_EOP) 3881 if (status & E1000_RXD_STAT_EOP)
@@ -3868,26 +3908,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3868 total_rx_bytes += length; 3908 total_rx_bytes += length;
3869 total_rx_packets++; 3909 total_rx_packets++;
3870 3910
3871 /* code added for copybreak, this should improve 3911 e1000_check_copybreak(netdev, buffer_info, length, &skb);
3872 * performance for small packets with large amounts 3912
3873 * of reassembly being done in the stack */
3874 if (length < copybreak) {
3875 struct sk_buff *new_skb =
3876 netdev_alloc_skb_ip_align(netdev, length);
3877 if (new_skb) {
3878 skb_copy_to_linear_data_offset(new_skb,
3879 -NET_IP_ALIGN,
3880 (skb->data -
3881 NET_IP_ALIGN),
3882 (length +
3883 NET_IP_ALIGN));
3884 /* save the skb in buffer_info as good */
3885 buffer_info->skb = skb;
3886 skb = new_skb;
3887 }
3888 /* else just continue with the old one */
3889 }
3890 /* end copybreak code */
3891 skb_put(skb, length); 3913 skb_put(skb, length);
3892 3914
3893 /* Receive Checksum Offload */ 3915 /* Receive Checksum Offload */
@@ -3965,8 +3987,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3965 /* Fix for errata 23, can't cross 64kB boundary */ 3987 /* Fix for errata 23, can't cross 64kB boundary */
3966 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 3988 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
3967 struct sk_buff *oldskb = skb; 3989 struct sk_buff *oldskb = skb;
3968 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3990 e_err("skb align check failed: %u bytes at %p\n",
3969 "at %p\n", bufsz, skb->data); 3991 bufsz, skb->data);
3970 /* Try again, without freeing the previous */ 3992 /* Try again, without freeing the previous */
3971 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 3993 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3972 /* Failed allocation, critical failure */ 3994 /* Failed allocation, critical failure */
@@ -3999,11 +4021,11 @@ check_page:
3999 } 4021 }
4000 4022
4001 if (!buffer_info->dma) { 4023 if (!buffer_info->dma) {
4002 buffer_info->dma = pci_map_page(pdev, 4024 buffer_info->dma = dma_map_page(&pdev->dev,
4003 buffer_info->page, 0, 4025 buffer_info->page, 0,
4004 buffer_info->length, 4026 buffer_info->length,
4005 PCI_DMA_FROMDEVICE); 4027 DMA_FROM_DEVICE);
4006 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4028 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4007 put_page(buffer_info->page); 4029 put_page(buffer_info->page);
4008 dev_kfree_skb(skb); 4030 dev_kfree_skb(skb);
4009 buffer_info->page = NULL; 4031 buffer_info->page = NULL;
@@ -4074,8 +4096,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4074 /* Fix for errata 23, can't cross 64kB boundary */ 4096 /* Fix for errata 23, can't cross 64kB boundary */
4075 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { 4097 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4076 struct sk_buff *oldskb = skb; 4098 struct sk_buff *oldskb = skb;
4077 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4099 e_err("skb align check failed: %u bytes at %p\n",
4078 "at %p\n", bufsz, skb->data); 4100 bufsz, skb->data);
4079 /* Try again, without freeing the previous */ 4101 /* Try again, without freeing the previous */
4080 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 4102 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4081 /* Failed allocation, critical failure */ 4103 /* Failed allocation, critical failure */
@@ -4099,11 +4121,11 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4099 buffer_info->skb = skb; 4121 buffer_info->skb = skb;
4100 buffer_info->length = adapter->rx_buffer_len; 4122 buffer_info->length = adapter->rx_buffer_len;
4101map_skb: 4123map_skb:
4102 buffer_info->dma = pci_map_single(pdev, 4124 buffer_info->dma = dma_map_single(&pdev->dev,
4103 skb->data, 4125 skb->data,
4104 buffer_info->length, 4126 buffer_info->length,
4105 PCI_DMA_FROMDEVICE); 4127 DMA_FROM_DEVICE);
4106 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 4128 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4107 dev_kfree_skb(skb); 4129 dev_kfree_skb(skb);
4108 buffer_info->skb = NULL; 4130 buffer_info->skb = NULL;
4109 buffer_info->dma = 0; 4131 buffer_info->dma = 0;
@@ -4120,16 +4142,15 @@ map_skb:
4120 if (!e1000_check_64k_bound(adapter, 4142 if (!e1000_check_64k_bound(adapter,
4121 (void *)(unsigned long)buffer_info->dma, 4143 (void *)(unsigned long)buffer_info->dma,
4122 adapter->rx_buffer_len)) { 4144 adapter->rx_buffer_len)) {
4123 DPRINTK(RX_ERR, ERR, 4145 e_err("dma align check failed: %u bytes at %p\n",
4124 "dma align check failed: %u bytes at %p\n", 4146 adapter->rx_buffer_len,
4125 adapter->rx_buffer_len, 4147 (void *)(unsigned long)buffer_info->dma);
4126 (void *)(unsigned long)buffer_info->dma);
4127 dev_kfree_skb(skb); 4148 dev_kfree_skb(skb);
4128 buffer_info->skb = NULL; 4149 buffer_info->skb = NULL;
4129 4150
4130 pci_unmap_single(pdev, buffer_info->dma, 4151 dma_unmap_single(&pdev->dev, buffer_info->dma,
4131 adapter->rx_buffer_len, 4152 adapter->rx_buffer_len,
4132 PCI_DMA_FROMDEVICE); 4153 DMA_FROM_DEVICE);
4133 buffer_info->dma = 0; 4154 buffer_info->dma = 0;
4134 4155
4135 adapter->alloc_rx_buff_failed++; 4156 adapter->alloc_rx_buff_failed++;
@@ -4335,7 +4356,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
4335 int ret_val = pci_set_mwi(adapter->pdev); 4356 int ret_val = pci_set_mwi(adapter->pdev);
4336 4357
4337 if (ret_val) 4358 if (ret_val)
4338 DPRINTK(PROBE, ERR, "Error in setting MWI\n"); 4359 e_err("Error in setting MWI\n");
4339} 4360}
4340 4361
4341void e1000_pci_clear_mwi(struct e1000_hw *hw) 4362void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4466,7 +4487,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4466 /* Fiber NICs only allow 1000 gbps Full duplex */ 4487 /* Fiber NICs only allow 1000 gbps Full duplex */
4467 if ((hw->media_type == e1000_media_type_fiber) && 4488 if ((hw->media_type == e1000_media_type_fiber) &&
4468 spddplx != (SPEED_1000 + DUPLEX_FULL)) { 4489 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
4469 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4490 e_err("Unsupported Speed/Duplex configuration\n");
4470 return -EINVAL; 4491 return -EINVAL;
4471 } 4492 }
4472 4493
@@ -4489,7 +4510,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
4489 break; 4510 break;
4490 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 4511 case SPEED_1000 + DUPLEX_HALF: /* not supported */
4491 default: 4512 default:
4492 DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n"); 4513 e_err("Unsupported Speed/Duplex configuration\n");
4493 return -EINVAL; 4514 return -EINVAL;
4494 } 4515 }
4495 return 0; 4516 return 0;
@@ -4612,7 +4633,7 @@ static int e1000_resume(struct pci_dev *pdev)
4612 else 4633 else
4613 err = pci_enable_device_mem(pdev); 4634 err = pci_enable_device_mem(pdev);
4614 if (err) { 4635 if (err) {
4615 printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n"); 4636 pr_err("Cannot enable PCI device from suspend\n");
4616 return err; 4637 return err;
4617 } 4638 }
4618 pci_set_master(pdev); 4639 pci_set_master(pdev);
@@ -4715,7 +4736,7 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4715 else 4736 else
4716 err = pci_enable_device_mem(pdev); 4737 err = pci_enable_device_mem(pdev);
4717 if (err) { 4738 if (err) {
4718 printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n"); 4739 pr_err("Cannot re-enable PCI device after reset.\n");
4719 return PCI_ERS_RESULT_DISCONNECT; 4740 return PCI_ERS_RESULT_DISCONNECT;
4720 } 4741 }
4721 pci_set_master(pdev); 4742 pci_set_master(pdev);
@@ -4746,7 +4767,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4746 4767
4747 if (netif_running(netdev)) { 4768 if (netif_running(netdev)) {
4748 if (e1000_up(adapter)) { 4769 if (e1000_up(adapter)) {
4749 printk("e1000: can't bring device back up after reset\n"); 4770 pr_info("can't bring device back up after reset\n");
4750 return; 4771 return;
4751 } 4772 }
4752 } 4773 }
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index d9298522f5ae..edd1c75aa895 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -41,20 +41,6 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/sched.h> 42#include <linux/sched.h>
43 43
44#ifdef DBG
45#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
46#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
47#else
48#define DEBUGOUT(S)
49#define DEBUGOUT1(S, A...)
50#endif
51
52#define DEBUGFUNC(F) DEBUGOUT(F "\n")
53#define DEBUGOUT2 DEBUGOUT1
54#define DEBUGOUT3 DEBUGOUT2
55#define DEBUGOUT7 DEBUGOUT3
56
57
58#define er32(reg) \ 44#define er32(reg) \
59 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ 45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
60 ? E1000_##reg : E1000_82542_##reg))) 46 ? E1000_##reg : E1000_82542_##reg)))
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 38d2741ccae9..10d8d98bb797 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -188,14 +188,6 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
188 */ 188 */
189E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); 189E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
190 190
191/* Enable Kumeran Lock Loss workaround
192 *
193 * Valid Range: 0, 1
194 *
195 * Default Value: 1 (enabled)
196 */
197E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
198
199struct e1000_option { 191struct e1000_option {
200 enum { enable_option, range_option, list_option } type; 192 enum { enable_option, range_option, list_option } type;
201 const char *name; 193 const char *name;
@@ -226,17 +218,16 @@ static int __devinit e1000_validate_option(unsigned int *value,
226 case enable_option: 218 case enable_option:
227 switch (*value) { 219 switch (*value) {
228 case OPTION_ENABLED: 220 case OPTION_ENABLED:
229 DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name); 221 e_dev_info("%s Enabled\n", opt->name);
230 return 0; 222 return 0;
231 case OPTION_DISABLED: 223 case OPTION_DISABLED:
232 DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name); 224 e_dev_info("%s Disabled\n", opt->name);
233 return 0; 225 return 0;
234 } 226 }
235 break; 227 break;
236 case range_option: 228 case range_option:
237 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 229 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
238 DPRINTK(PROBE, INFO, 230 e_dev_info("%s set to %i\n", opt->name, *value);
239 "%s set to %i\n", opt->name, *value);
240 return 0; 231 return 0;
241 } 232 }
242 break; 233 break;
@@ -248,7 +239,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
248 ent = &opt->arg.l.p[i]; 239 ent = &opt->arg.l.p[i];
249 if (*value == ent->i) { 240 if (*value == ent->i) {
250 if (ent->str[0] != '\0') 241 if (ent->str[0] != '\0')
251 DPRINTK(PROBE, INFO, "%s\n", ent->str); 242 e_dev_info("%s\n", ent->str);
252 return 0; 243 return 0;
253 } 244 }
254 } 245 }
@@ -258,7 +249,7 @@ static int __devinit e1000_validate_option(unsigned int *value,
258 BUG(); 249 BUG();
259 } 250 }
260 251
261 DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n", 252 e_dev_info("Invalid %s value specified (%i) %s\n",
262 opt->name, *value, opt->err); 253 opt->name, *value, opt->err);
263 *value = opt->def; 254 *value = opt->def;
264 return -1; 255 return -1;
@@ -283,9 +274,8 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
283 int bd = adapter->bd_number; 274 int bd = adapter->bd_number;
284 275
285 if (bd >= E1000_MAX_NIC) { 276 if (bd >= E1000_MAX_NIC) {
286 DPRINTK(PROBE, NOTICE, 277 e_dev_warn("Warning: no configuration for board #%i "
287 "Warning: no configuration for board #%i\n", bd); 278 "using defaults for all values\n", bd);
288 DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
289 } 279 }
290 280
291 { /* Transmit Descriptor Count */ 281 { /* Transmit Descriptor Count */
@@ -472,27 +462,31 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
472 adapter->itr = InterruptThrottleRate[bd]; 462 adapter->itr = InterruptThrottleRate[bd];
473 switch (adapter->itr) { 463 switch (adapter->itr) {
474 case 0: 464 case 0:
475 DPRINTK(PROBE, INFO, "%s turned off\n", 465 e_dev_info("%s turned off\n", opt.name);
476 opt.name);
477 break; 466 break;
478 case 1: 467 case 1:
479 DPRINTK(PROBE, INFO, "%s set to dynamic mode\n", 468 e_dev_info("%s set to dynamic mode\n",
480 opt.name); 469 opt.name);
481 adapter->itr_setting = adapter->itr; 470 adapter->itr_setting = adapter->itr;
482 adapter->itr = 20000; 471 adapter->itr = 20000;
483 break; 472 break;
484 case 3: 473 case 3:
485 DPRINTK(PROBE, INFO, 474 e_dev_info("%s set to dynamic conservative "
486 "%s set to dynamic conservative mode\n", 475 "mode\n", opt.name);
487 opt.name);
488 adapter->itr_setting = adapter->itr; 476 adapter->itr_setting = adapter->itr;
489 adapter->itr = 20000; 477 adapter->itr = 20000;
490 break; 478 break;
479 case 4:
480 e_dev_info("%s set to simplified "
481 "(2000-8000) ints mode\n", opt.name);
482 adapter->itr_setting = adapter->itr;
483 break;
491 default: 484 default:
492 e1000_validate_option(&adapter->itr, &opt, 485 e1000_validate_option(&adapter->itr, &opt,
493 adapter); 486 adapter);
494 /* save the setting, because the dynamic bits change itr */ 487 /* save the setting, because the dynamic bits
495 /* clear the lower two bits because they are 488 * change itr.
489 * clear the lower two bits because they are
496 * used as control */ 490 * used as control */
497 adapter->itr_setting = adapter->itr & ~3; 491 adapter->itr_setting = adapter->itr & ~3;
498 break; 492 break;
@@ -543,19 +537,18 @@ static void __devinit e1000_check_fiber_options(struct e1000_adapter *adapter)
543{ 537{
544 int bd = adapter->bd_number; 538 int bd = adapter->bd_number;
545 if (num_Speed > bd) { 539 if (num_Speed > bd) {
546 DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, " 540 e_dev_info("Speed not valid for fiber adapters, parameter "
547 "parameter ignored\n"); 541 "ignored\n");
548 } 542 }
549 543
550 if (num_Duplex > bd) { 544 if (num_Duplex > bd) {
551 DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, " 545 e_dev_info("Duplex not valid for fiber adapters, parameter "
552 "parameter ignored\n"); 546 "ignored\n");
553 } 547 }
554 548
555 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { 549 if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
556 DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is " 550 e_dev_info("AutoNeg other than 1000/Full is not valid for fiber"
557 "not valid for fiber adapters, " 551 "adapters, parameter ignored\n");
558 "parameter ignored\n");
559 } 552 }
560} 553}
561 554
@@ -619,9 +612,8 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
619 } 612 }
620 613
621 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { 614 if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
622 DPRINTK(PROBE, INFO, 615 e_dev_info("AutoNeg specified along with Speed or Duplex, "
623 "AutoNeg specified along with Speed or Duplex, " 616 "parameter ignored\n");
624 "parameter ignored\n");
625 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; 617 adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
626 } else { /* Autoneg */ 618 } else { /* Autoneg */
627 static const struct e1000_opt_list an_list[] = 619 static const struct e1000_opt_list an_list[] =
@@ -680,79 +672,72 @@ static void __devinit e1000_check_copper_options(struct e1000_adapter *adapter)
680 case 0: 672 case 0:
681 adapter->hw.autoneg = adapter->fc_autoneg = 1; 673 adapter->hw.autoneg = adapter->fc_autoneg = 1;
682 if ((num_Speed > bd) && (speed != 0 || dplx != 0)) 674 if ((num_Speed > bd) && (speed != 0 || dplx != 0))
683 DPRINTK(PROBE, INFO, 675 e_dev_info("Speed and duplex autonegotiation "
684 "Speed and duplex autonegotiation enabled\n"); 676 "enabled\n");
685 break; 677 break;
686 case HALF_DUPLEX: 678 case HALF_DUPLEX:
687 DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n"); 679 e_dev_info("Half Duplex specified without Speed\n");
688 DPRINTK(PROBE, INFO, "Using Autonegotiation at " 680 e_dev_info("Using Autonegotiation at Half Duplex only\n");
689 "Half Duplex only\n");
690 adapter->hw.autoneg = adapter->fc_autoneg = 1; 681 adapter->hw.autoneg = adapter->fc_autoneg = 1;
691 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 682 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
692 ADVERTISE_100_HALF; 683 ADVERTISE_100_HALF;
693 break; 684 break;
694 case FULL_DUPLEX: 685 case FULL_DUPLEX:
695 DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n"); 686 e_dev_info("Full Duplex specified without Speed\n");
696 DPRINTK(PROBE, INFO, "Using Autonegotiation at " 687 e_dev_info("Using Autonegotiation at Full Duplex only\n");
697 "Full Duplex only\n");
698 adapter->hw.autoneg = adapter->fc_autoneg = 1; 688 adapter->hw.autoneg = adapter->fc_autoneg = 1;
699 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | 689 adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
700 ADVERTISE_100_FULL | 690 ADVERTISE_100_FULL |
701 ADVERTISE_1000_FULL; 691 ADVERTISE_1000_FULL;
702 break; 692 break;
703 case SPEED_10: 693 case SPEED_10:
704 DPRINTK(PROBE, INFO, "10 Mbps Speed specified " 694 e_dev_info("10 Mbps Speed specified without Duplex\n");
705 "without Duplex\n"); 695 e_dev_info("Using Autonegotiation at 10 Mbps only\n");
706 DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
707 adapter->hw.autoneg = adapter->fc_autoneg = 1; 696 adapter->hw.autoneg = adapter->fc_autoneg = 1;
708 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | 697 adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
709 ADVERTISE_10_FULL; 698 ADVERTISE_10_FULL;
710 break; 699 break;
711 case SPEED_10 + HALF_DUPLEX: 700 case SPEED_10 + HALF_DUPLEX:
712 DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n"); 701 e_dev_info("Forcing to 10 Mbps Half Duplex\n");
713 adapter->hw.autoneg = adapter->fc_autoneg = 0; 702 adapter->hw.autoneg = adapter->fc_autoneg = 0;
714 adapter->hw.forced_speed_duplex = e1000_10_half; 703 adapter->hw.forced_speed_duplex = e1000_10_half;
715 adapter->hw.autoneg_advertised = 0; 704 adapter->hw.autoneg_advertised = 0;
716 break; 705 break;
717 case SPEED_10 + FULL_DUPLEX: 706 case SPEED_10 + FULL_DUPLEX:
718 DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n"); 707 e_dev_info("Forcing to 10 Mbps Full Duplex\n");
719 adapter->hw.autoneg = adapter->fc_autoneg = 0; 708 adapter->hw.autoneg = adapter->fc_autoneg = 0;
720 adapter->hw.forced_speed_duplex = e1000_10_full; 709 adapter->hw.forced_speed_duplex = e1000_10_full;
721 adapter->hw.autoneg_advertised = 0; 710 adapter->hw.autoneg_advertised = 0;
722 break; 711 break;
723 case SPEED_100: 712 case SPEED_100:
724 DPRINTK(PROBE, INFO, "100 Mbps Speed specified " 713 e_dev_info("100 Mbps Speed specified without Duplex\n");
725 "without Duplex\n"); 714 e_dev_info("Using Autonegotiation at 100 Mbps only\n");
726 DPRINTK(PROBE, INFO, "Using Autonegotiation at "
727 "100 Mbps only\n");
728 adapter->hw.autoneg = adapter->fc_autoneg = 1; 715 adapter->hw.autoneg = adapter->fc_autoneg = 1;
729 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | 716 adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
730 ADVERTISE_100_FULL; 717 ADVERTISE_100_FULL;
731 break; 718 break;
732 case SPEED_100 + HALF_DUPLEX: 719 case SPEED_100 + HALF_DUPLEX:
733 DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n"); 720 e_dev_info("Forcing to 100 Mbps Half Duplex\n");
734 adapter->hw.autoneg = adapter->fc_autoneg = 0; 721 adapter->hw.autoneg = adapter->fc_autoneg = 0;
735 adapter->hw.forced_speed_duplex = e1000_100_half; 722 adapter->hw.forced_speed_duplex = e1000_100_half;
736 adapter->hw.autoneg_advertised = 0; 723 adapter->hw.autoneg_advertised = 0;
737 break; 724 break;
738 case SPEED_100 + FULL_DUPLEX: 725 case SPEED_100 + FULL_DUPLEX:
739 DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n"); 726 e_dev_info("Forcing to 100 Mbps Full Duplex\n");
740 adapter->hw.autoneg = adapter->fc_autoneg = 0; 727 adapter->hw.autoneg = adapter->fc_autoneg = 0;
741 adapter->hw.forced_speed_duplex = e1000_100_full; 728 adapter->hw.forced_speed_duplex = e1000_100_full;
742 adapter->hw.autoneg_advertised = 0; 729 adapter->hw.autoneg_advertised = 0;
743 break; 730 break;
744 case SPEED_1000: 731 case SPEED_1000:
745 DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without " 732 e_dev_info("1000 Mbps Speed specified without Duplex\n");
746 "Duplex\n");
747 goto full_duplex_only; 733 goto full_duplex_only;
748 case SPEED_1000 + HALF_DUPLEX: 734 case SPEED_1000 + HALF_DUPLEX:
749 DPRINTK(PROBE, INFO, 735 e_dev_info("Half Duplex is not supported at 1000 Mbps\n");
750 "Half Duplex is not supported at 1000 Mbps\n");
751 /* fall through */ 736 /* fall through */
752 case SPEED_1000 + FULL_DUPLEX: 737 case SPEED_1000 + FULL_DUPLEX:
753full_duplex_only: 738full_duplex_only:
754 DPRINTK(PROBE, INFO, 739 e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex "
755 "Using Autonegotiation at 1000 Mbps Full Duplex only\n"); 740 "only\n");
756 adapter->hw.autoneg = adapter->fc_autoneg = 1; 741 adapter->hw.autoneg = adapter->fc_autoneg = 1;
757 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; 742 adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
758 break; 743 break;
@@ -762,9 +747,8 @@ full_duplex_only:
762 747
763 /* Speed, AutoNeg and MDI/MDI-X must all play nice */ 748 /* Speed, AutoNeg and MDI/MDI-X must all play nice */
764 if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { 749 if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
765 DPRINTK(PROBE, INFO, 750 e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. "
766 "Speed, AutoNeg and MDI-X specifications are " 751 "Setting MDI-X to a compatible value.\n");
767 "incompatible. Setting MDI-X to a compatible value.\n");
768 } 752 }
769} 753}
770 754
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 712ccc66ba25..f654db9121de 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -234,9 +234,6 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
234 mac->mta_reg_count = 128; 234 mac->mta_reg_count = 128;
235 /* Set rar entry count */ 235 /* Set rar entry count */
236 mac->rar_entry_count = E1000_RAR_ENTRIES; 236 mac->rar_entry_count = E1000_RAR_ENTRIES;
237 /* Set if manageability features are enabled. */
238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK)
239 ? true : false;
240 /* Adaptive IFS supported */ 237 /* Adaptive IFS supported */
241 mac->adaptive_ifs = true; 238 mac->adaptive_ifs = true;
242 239
@@ -271,6 +268,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
271 func->set_lan_id = e1000_set_lan_id_single_port; 268 func->set_lan_id = e1000_set_lan_id_single_port;
272 func->check_mng_mode = e1000e_check_mng_mode_generic; 269 func->check_mng_mode = e1000e_check_mng_mode_generic;
273 func->led_on = e1000e_led_on_generic; 270 func->led_on = e1000e_led_on_generic;
271
272 /* FWSM register */
273 mac->has_fwsm = true;
274 /*
275 * ARC supported; valid only if manageability features are
276 * enabled.
277 */
278 mac->arc_subsystem_valid =
279 (er32(FWSM) & E1000_FWSM_MODE_MASK)
280 ? true : false;
274 break; 281 break;
275 case e1000_82574: 282 case e1000_82574:
276 case e1000_82583: 283 case e1000_82583:
@@ -281,6 +288,9 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
281 default: 288 default:
282 func->check_mng_mode = e1000e_check_mng_mode_generic; 289 func->check_mng_mode = e1000e_check_mng_mode_generic;
283 func->led_on = e1000e_led_on_generic; 290 func->led_on = e1000e_led_on_generic;
291
292 /* FWSM register */
293 mac->has_fwsm = true;
284 break; 294 break;
285 } 295 }
286 296
@@ -323,7 +333,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
323 } 333 }
324 334
325 /* 335 /*
326 * Initialze device specific counter of SMBI acquisition 336 * Initialize device specific counter of SMBI acquisition
327 * timeouts. 337 * timeouts.
328 */ 338 */
329 hw->dev_spec.e82571.smb_counter = 0; 339 hw->dev_spec.e82571.smb_counter = 0;
@@ -336,7 +346,6 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
336 struct e1000_hw *hw = &adapter->hw; 346 struct e1000_hw *hw = &adapter->hw;
337 static int global_quad_port_a; /* global port a indication */ 347 static int global_quad_port_a; /* global port a indication */
338 struct pci_dev *pdev = adapter->pdev; 348 struct pci_dev *pdev = adapter->pdev;
339 u16 eeprom_data = 0;
340 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; 349 int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
341 s32 rc; 350 s32 rc;
342 351
@@ -387,16 +396,15 @@ static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
387 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) 396 if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
388 adapter->flags &= ~FLAG_HAS_WOL; 397 adapter->flags &= ~FLAG_HAS_WOL;
389 break; 398 break;
390
391 case e1000_82573: 399 case e1000_82573:
400 case e1000_82574:
401 case e1000_82583:
402 /* Disable ASPM L0s due to hardware errata */
403 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L0S);
404
392 if (pdev->device == E1000_DEV_ID_82573L) { 405 if (pdev->device == E1000_DEV_ID_82573L) {
393 if (e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, 406 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
394 &eeprom_data) < 0) 407 adapter->max_hw_frame_size = DEFAULT_JUMBO;
395 break;
396 if (!(eeprom_data & NVM_WORD1A_ASPM_MASK)) {
397 adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
398 adapter->max_hw_frame_size = DEFAULT_JUMBO;
399 }
400 } 408 }
401 break; 409 break;
402 default: 410 default:
@@ -995,9 +1003,10 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
995 /* ...for both queues. */ 1003 /* ...for both queues. */
996 switch (mac->type) { 1004 switch (mac->type) {
997 case e1000_82573: 1005 case e1000_82573:
1006 e1000e_enable_tx_pkt_filtering(hw);
1007 /* fall through */
998 case e1000_82574: 1008 case e1000_82574:
999 case e1000_82583: 1009 case e1000_82583:
1000 e1000e_enable_tx_pkt_filtering(hw);
1001 reg_data = er32(GCR); 1010 reg_data = er32(GCR);
1002 reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; 1011 reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
1003 ew32(GCR, reg_data); 1012 ew32(GCR, reg_data);
@@ -1139,8 +1148,6 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
1139 default: 1148 default:
1140 break; 1149 break;
1141 } 1150 }
1142
1143 return;
1144} 1151}
1145 1152
1146/** 1153/**
@@ -1644,8 +1651,6 @@ static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
1644 /* If the management interface is not enabled, then power down */ 1651 /* If the management interface is not enabled, then power down */
1645 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) 1652 if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
1646 e1000_power_down_phy_copper(hw); 1653 e1000_power_down_phy_copper(hw);
1647
1648 return;
1649} 1654}
1650 1655
1651/** 1656/**
@@ -1792,6 +1797,7 @@ struct e1000_info e1000_82571_info = {
1792 | FLAG_RESET_OVERWRITES_LAA /* errata */ 1797 | FLAG_RESET_OVERWRITES_LAA /* errata */
1793 | FLAG_TARC_SPEED_MODE_BIT /* errata */ 1798 | FLAG_TARC_SPEED_MODE_BIT /* errata */
1794 | FLAG_APME_CHECK_PORT_B, 1799 | FLAG_APME_CHECK_PORT_B,
1800 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1795 .pba = 38, 1801 .pba = 38,
1796 .max_hw_frame_size = DEFAULT_JUMBO, 1802 .max_hw_frame_size = DEFAULT_JUMBO,
1797 .get_variants = e1000_get_variants_82571, 1803 .get_variants = e1000_get_variants_82571,
@@ -1809,6 +1815,7 @@ struct e1000_info e1000_82572_info = {
1809 | FLAG_RX_CSUM_ENABLED 1815 | FLAG_RX_CSUM_ENABLED
1810 | FLAG_HAS_CTRLEXT_ON_LOAD 1816 | FLAG_HAS_CTRLEXT_ON_LOAD
1811 | FLAG_TARC_SPEED_MODE_BIT, /* errata */ 1817 | FLAG_TARC_SPEED_MODE_BIT, /* errata */
1818 .flags2 = FLAG2_DISABLE_ASPM_L1, /* errata 13 */
1812 .pba = 38, 1819 .pba = 38,
1813 .max_hw_frame_size = DEFAULT_JUMBO, 1820 .max_hw_frame_size = DEFAULT_JUMBO,
1814 .get_variants = e1000_get_variants_82571, 1821 .get_variants = e1000_get_variants_82571,
@@ -1820,13 +1827,11 @@ struct e1000_info e1000_82572_info = {
1820struct e1000_info e1000_82573_info = { 1827struct e1000_info e1000_82573_info = {
1821 .mac = e1000_82573, 1828 .mac = e1000_82573,
1822 .flags = FLAG_HAS_HW_VLAN_FILTER 1829 .flags = FLAG_HAS_HW_VLAN_FILTER
1823 | FLAG_HAS_JUMBO_FRAMES
1824 | FLAG_HAS_WOL 1830 | FLAG_HAS_WOL
1825 | FLAG_APME_IN_CTRL3 1831 | FLAG_APME_IN_CTRL3
1826 | FLAG_RX_CSUM_ENABLED 1832 | FLAG_RX_CSUM_ENABLED
1827 | FLAG_HAS_SMART_POWER_DOWN 1833 | FLAG_HAS_SMART_POWER_DOWN
1828 | FLAG_HAS_AMT 1834 | FLAG_HAS_AMT
1829 | FLAG_HAS_ERT
1830 | FLAG_HAS_SWSM_ON_LOAD, 1835 | FLAG_HAS_SWSM_ON_LOAD,
1831 .pba = 20, 1836 .pba = 20,
1832 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1837 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
@@ -1847,7 +1852,7 @@ struct e1000_info e1000_82574_info = {
1847 | FLAG_HAS_SMART_POWER_DOWN 1852 | FLAG_HAS_SMART_POWER_DOWN
1848 | FLAG_HAS_AMT 1853 | FLAG_HAS_AMT
1849 | FLAG_HAS_CTRLEXT_ON_LOAD, 1854 | FLAG_HAS_CTRLEXT_ON_LOAD,
1850 .pba = 20, 1855 .pba = 36,
1851 .max_hw_frame_size = DEFAULT_JUMBO, 1856 .max_hw_frame_size = DEFAULT_JUMBO,
1852 .get_variants = e1000_get_variants_82571, 1857 .get_variants = e1000_get_variants_82571,
1853 .mac_ops = &e82571_mac_ops, 1858 .mac_ops = &e82571_mac_ops,
@@ -1864,7 +1869,7 @@ struct e1000_info e1000_82583_info = {
1864 | FLAG_HAS_SMART_POWER_DOWN 1869 | FLAG_HAS_SMART_POWER_DOWN
1865 | FLAG_HAS_AMT 1870 | FLAG_HAS_AMT
1866 | FLAG_HAS_CTRLEXT_ON_LOAD, 1871 | FLAG_HAS_CTRLEXT_ON_LOAD,
1867 .pba = 20, 1872 .pba = 36,
1868 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 1873 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1869 .get_variants = e1000_get_variants_82571, 1874 .get_variants = e1000_get_variants_82571,
1870 .mac_ops = &e82571_mac_ops, 1875 .mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e301e26d6897..4dc02c71ffd6 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -138,6 +138,11 @@
138/* Enable MNG packets to host memory */ 138/* Enable MNG packets to host memory */
139#define E1000_MANC_EN_MNG2HOST 0x00200000 139#define E1000_MANC_EN_MNG2HOST 0x00200000
140 140
141#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */
142#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */
143#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */
144#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */
145
141/* Receive Control */ 146/* Receive Control */
142#define E1000_RCTL_EN 0x00000002 /* enable */ 147#define E1000_RCTL_EN 0x00000002 /* enable */
143#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ 148#define E1000_RCTL_SBP 0x00000004 /* store bad packet */
@@ -214,6 +219,8 @@
214#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ 219#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */
215#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ 220#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */
216#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ 221#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */
222#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
223#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */
217#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 224#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
218#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 225#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
219#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ 226#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */
@@ -622,6 +629,8 @@
622#define NVM_ALT_MAC_ADDR_PTR 0x0037 629#define NVM_ALT_MAC_ADDR_PTR 0x0037
623#define NVM_CHECKSUM_REG 0x003F 630#define NVM_CHECKSUM_REG 0x003F
624 631
632#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
633
625#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ 634#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */
626#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ 635#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */
627 636
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 118bdf483593..c0b3db40bd73 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -37,30 +37,22 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h>
40 41
41#include "hw.h" 42#include "hw.h"
42 43
43struct e1000_info; 44struct e1000_info;
44 45
45#define e_printk(level, adapter, format, arg...) \
46 printk(level "%s: %s: " format, pci_name(adapter->pdev), \
47 adapter->netdev->name, ## arg)
48
49#ifdef DEBUG
50#define e_dbg(format, arg...) \ 46#define e_dbg(format, arg...) \
51 e_printk(KERN_DEBUG , hw->adapter, format, ## arg) 47 netdev_dbg(hw->adapter->netdev, format, ## arg)
52#else
53#define e_dbg(format, arg...) do { (void)(hw); } while (0)
54#endif
55
56#define e_err(format, arg...) \ 48#define e_err(format, arg...) \
57 e_printk(KERN_ERR, adapter, format, ## arg) 49 netdev_err(adapter->netdev, format, ## arg)
58#define e_info(format, arg...) \ 50#define e_info(format, arg...) \
59 e_printk(KERN_INFO, adapter, format, ## arg) 51 netdev_info(adapter->netdev, format, ## arg)
60#define e_warn(format, arg...) \ 52#define e_warn(format, arg...) \
61 e_printk(KERN_WARNING, adapter, format, ## arg) 53 netdev_warn(adapter->netdev, format, ## arg)
62#define e_notice(format, arg...) \ 54#define e_notice(format, arg...) \
63 e_printk(KERN_NOTICE, adapter, format, ## arg) 55 netdev_notice(adapter->netdev, format, ## arg)
64 56
65 57
66/* Interrupt modes, as used by the IntMode parameter */ 58/* Interrupt modes, as used by the IntMode parameter */
@@ -158,6 +150,9 @@ struct e1000_info;
158#define HV_M_STATUS_SPEED_1000 0x0200 150#define HV_M_STATUS_SPEED_1000 0x0200
159#define HV_M_STATUS_LINK_UP 0x0040 151#define HV_M_STATUS_LINK_UP 0x0040
160 152
153/* Time to wait before putting the device into D3 if there's no link (in ms). */
154#define LINK_TIMEOUT 100
155
161enum e1000_boards { 156enum e1000_boards {
162 board_82571, 157 board_82571,
163 board_82572, 158 board_82572,
@@ -194,6 +189,8 @@ struct e1000_buffer {
194 unsigned long time_stamp; 189 unsigned long time_stamp;
195 u16 length; 190 u16 length;
196 u16 next_to_watch; 191 u16 next_to_watch;
192 unsigned int segs;
193 unsigned int bytecount;
197 u16 mapped_as_page; 194 u16 mapped_as_page;
198 }; 195 };
199 /* Rx */ 196 /* Rx */
@@ -369,12 +366,14 @@ struct e1000_adapter {
369 struct work_struct update_phy_task; 366 struct work_struct update_phy_task;
370 struct work_struct led_blink_task; 367 struct work_struct led_blink_task;
371 struct work_struct print_hang_task; 368 struct work_struct print_hang_task;
369
370 bool idle_check;
372}; 371};
373 372
374struct e1000_info { 373struct e1000_info {
375 enum e1000_mac_type mac; 374 enum e1000_mac_type mac;
376 unsigned int flags; 375 unsigned int flags;
377 unsigned int flags2; 376 unsigned int flags2;
378 u32 pba; 377 u32 pba;
379 u32 max_hw_frame_size; 378 u32 max_hw_frame_size;
380 s32 (*get_variants)(struct e1000_adapter *); 379 s32 (*get_variants)(struct e1000_adapter *);
@@ -421,6 +420,7 @@ struct e1000_info {
421#define FLAG2_CRC_STRIPPING (1 << 0) 420#define FLAG2_CRC_STRIPPING (1 << 0)
422#define FLAG2_HAS_PHY_WAKEUP (1 << 1) 421#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
423#define FLAG2_IS_DISCARDING (1 << 2) 422#define FLAG2_IS_DISCARDING (1 << 2)
423#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
424 424
425#define E1000_RX_DESC_PS(R, i) \ 425#define E1000_RX_DESC_PS(R, i) \
426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) 426 (&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -461,6 +461,7 @@ extern void e1000e_update_stats(struct e1000_adapter *adapter);
461extern bool e1000e_has_link(struct e1000_adapter *adapter); 461extern bool e1000e_has_link(struct e1000_adapter *adapter);
462extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 462extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
463extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 463extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
464extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
464 465
465extern unsigned int copybreak; 466extern unsigned int copybreak;
466 467
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 27d21589a69a..38d79a669059 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -221,9 +221,12 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
221 mac->mta_reg_count = 128; 221 mac->mta_reg_count = 128;
222 /* Set rar entry count */ 222 /* Set rar entry count */
223 mac->rar_entry_count = E1000_RAR_ENTRIES; 223 mac->rar_entry_count = E1000_RAR_ENTRIES;
224 /* Set if manageability features are enabled. */ 224 /* FWSM register */
225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 225 mac->has_fwsm = true;
226 ? true : false; 226 /* ARC supported; valid only if manageability features are enabled. */
227 mac->arc_subsystem_valid =
228 (er32(FWSM) & E1000_FWSM_MODE_MASK)
229 ? true : false;
227 /* Adaptive IFS not supported */ 230 /* Adaptive IFS not supported */
228 mac->adaptive_ifs = false; 231 mac->adaptive_ifs = false;
229 232
@@ -1380,8 +1383,6 @@ static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
1380 if (!(hw->mac.ops.check_mng_mode(hw) || 1383 if (!(hw->mac.ops.check_mng_mode(hw) ||
1381 hw->phy.ops.check_reset_block(hw))) 1384 hw->phy.ops.check_reset_block(hw)))
1382 e1000_power_down_phy_copper(hw); 1385 e1000_power_down_phy_copper(hw);
1383
1384 return;
1385} 1386}
1386 1387
1387/** 1388/**
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 983493f2330c..2c521218102b 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -412,7 +412,6 @@ static int e1000_set_tso(struct net_device *netdev, u32 data)
412 netdev->features &= ~NETIF_F_TSO6; 412 netdev->features &= ~NETIF_F_TSO6;
413 } 413 }
414 414
415 e_info("TSO is %s\n", data ? "Enabled" : "Disabled");
416 adapter->flags |= FLAG_TSO_FORCE; 415 adapter->flags |= FLAG_TSO_FORCE;
417 return 0; 416 return 0;
418} 417}
@@ -1069,10 +1068,10 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1069 if (tx_ring->desc && tx_ring->buffer_info) { 1068 if (tx_ring->desc && tx_ring->buffer_info) {
1070 for (i = 0; i < tx_ring->count; i++) { 1069 for (i = 0; i < tx_ring->count; i++) {
1071 if (tx_ring->buffer_info[i].dma) 1070 if (tx_ring->buffer_info[i].dma)
1072 pci_unmap_single(pdev, 1071 dma_unmap_single(&pdev->dev,
1073 tx_ring->buffer_info[i].dma, 1072 tx_ring->buffer_info[i].dma,
1074 tx_ring->buffer_info[i].length, 1073 tx_ring->buffer_info[i].length,
1075 PCI_DMA_TODEVICE); 1074 DMA_TO_DEVICE);
1076 if (tx_ring->buffer_info[i].skb) 1075 if (tx_ring->buffer_info[i].skb)
1077 dev_kfree_skb(tx_ring->buffer_info[i].skb); 1076 dev_kfree_skb(tx_ring->buffer_info[i].skb);
1078 } 1077 }
@@ -1081,9 +1080,9 @@ static void e1000_free_desc_rings(struct e1000_adapter *adapter)
1081 if (rx_ring->desc && rx_ring->buffer_info) { 1080 if (rx_ring->desc && rx_ring->buffer_info) {
1082 for (i = 0; i < rx_ring->count; i++) { 1081 for (i = 0; i < rx_ring->count; i++) {
1083 if (rx_ring->buffer_info[i].dma) 1082 if (rx_ring->buffer_info[i].dma)
1084 pci_unmap_single(pdev, 1083 dma_unmap_single(&pdev->dev,
1085 rx_ring->buffer_info[i].dma, 1084 rx_ring->buffer_info[i].dma,
1086 2048, PCI_DMA_FROMDEVICE); 1085 2048, DMA_FROM_DEVICE);
1087 if (rx_ring->buffer_info[i].skb) 1086 if (rx_ring->buffer_info[i].skb)
1088 dev_kfree_skb(rx_ring->buffer_info[i].skb); 1087 dev_kfree_skb(rx_ring->buffer_info[i].skb);
1089 } 1088 }
@@ -1163,9 +1162,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1163 tx_ring->buffer_info[i].skb = skb; 1162 tx_ring->buffer_info[i].skb = skb;
1164 tx_ring->buffer_info[i].length = skb->len; 1163 tx_ring->buffer_info[i].length = skb->len;
1165 tx_ring->buffer_info[i].dma = 1164 tx_ring->buffer_info[i].dma =
1166 pci_map_single(pdev, skb->data, skb->len, 1165 dma_map_single(&pdev->dev, skb->data, skb->len,
1167 PCI_DMA_TODEVICE); 1166 DMA_TO_DEVICE);
1168 if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) { 1167 if (dma_mapping_error(&pdev->dev,
1168 tx_ring->buffer_info[i].dma)) {
1169 ret_val = 4; 1169 ret_val = 4;
1170 goto err_nomem; 1170 goto err_nomem;
1171 } 1171 }
@@ -1226,9 +1226,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
1226 skb_reserve(skb, NET_IP_ALIGN); 1226 skb_reserve(skb, NET_IP_ALIGN);
1227 rx_ring->buffer_info[i].skb = skb; 1227 rx_ring->buffer_info[i].skb = skb;
1228 rx_ring->buffer_info[i].dma = 1228 rx_ring->buffer_info[i].dma =
1229 pci_map_single(pdev, skb->data, 2048, 1229 dma_map_single(&pdev->dev, skb->data, 2048,
1230 PCI_DMA_FROMDEVICE); 1230 DMA_FROM_DEVICE);
1231 if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) { 1231 if (dma_mapping_error(&pdev->dev,
1232 rx_ring->buffer_info[i].dma)) {
1232 ret_val = 8; 1233 ret_val = 8;
1233 goto err_nomem; 1234 goto err_nomem;
1234 } 1235 }
@@ -1556,10 +1557,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1556 for (i = 0; i < 64; i++) { /* send the packets */ 1557 for (i = 0; i < 64; i++) { /* send the packets */
1557 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb, 1558 e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1558 1024); 1559 1024);
1559 pci_dma_sync_single_for_device(pdev, 1560 dma_sync_single_for_device(&pdev->dev,
1560 tx_ring->buffer_info[k].dma, 1561 tx_ring->buffer_info[k].dma,
1561 tx_ring->buffer_info[k].length, 1562 tx_ring->buffer_info[k].length,
1562 PCI_DMA_TODEVICE); 1563 DMA_TO_DEVICE);
1563 k++; 1564 k++;
1564 if (k == tx_ring->count) 1565 if (k == tx_ring->count)
1565 k = 0; 1566 k = 0;
@@ -1569,9 +1570,9 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1569 time = jiffies; /* set the start time for the receive */ 1570 time = jiffies; /* set the start time for the receive */
1570 good_cnt = 0; 1571 good_cnt = 0;
1571 do { /* receive the sent packets */ 1572 do { /* receive the sent packets */
1572 pci_dma_sync_single_for_cpu(pdev, 1573 dma_sync_single_for_cpu(&pdev->dev,
1573 rx_ring->buffer_info[l].dma, 2048, 1574 rx_ring->buffer_info[l].dma, 2048,
1574 PCI_DMA_FROMDEVICE); 1575 DMA_FROM_DEVICE);
1575 1576
1576 ret_val = e1000_check_lbtest_frame( 1577 ret_val = e1000_check_lbtest_frame(
1577 rx_ring->buffer_info[l].skb, 1024); 1578 rx_ring->buffer_info[l].skb, 1024);
@@ -1736,6 +1737,12 @@ static void e1000_diag_test(struct net_device *netdev,
1736 if (if_running) 1737 if (if_running)
1737 dev_open(netdev); 1738 dev_open(netdev);
1738 } else { 1739 } else {
1740 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) {
1741 clear_bit(__E1000_TESTING, &adapter->state);
1742 dev_open(netdev);
1743 set_bit(__E1000_TESTING, &adapter->state);
1744 }
1745
1739 e_info("online testing starting\n"); 1746 e_info("online testing starting\n");
1740 /* Online tests */ 1747 /* Online tests */
1741 if (e1000_link_test(adapter, &data[4])) 1748 if (e1000_link_test(adapter, &data[4]))
@@ -1747,6 +1754,9 @@ static void e1000_diag_test(struct net_device *netdev,
1747 data[2] = 0; 1754 data[2] = 0;
1748 data[3] = 0; 1755 data[3] = 0;
1749 1756
1757 if (!if_running && (adapter->flags & FLAG_HAS_AMT))
1758 dev_close(netdev);
1759
1750 clear_bit(__E1000_TESTING, &adapter->state); 1760 clear_bit(__E1000_TESTING, &adapter->state);
1751 } 1761 }
1752 msleep_interruptible(4 * 1000); 1762 msleep_interruptible(4 * 1000);
@@ -1889,7 +1899,7 @@ static int e1000_get_coalesce(struct net_device *netdev,
1889{ 1899{
1890 struct e1000_adapter *adapter = netdev_priv(netdev); 1900 struct e1000_adapter *adapter = netdev_priv(netdev);
1891 1901
1892 if (adapter->itr_setting <= 3) 1902 if (adapter->itr_setting <= 4)
1893 ec->rx_coalesce_usecs = adapter->itr_setting; 1903 ec->rx_coalesce_usecs = adapter->itr_setting;
1894 else 1904 else
1895 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; 1905 ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
@@ -1904,12 +1914,14 @@ static int e1000_set_coalesce(struct net_device *netdev,
1904 struct e1000_hw *hw = &adapter->hw; 1914 struct e1000_hw *hw = &adapter->hw;
1905 1915
1906 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || 1916 if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1907 ((ec->rx_coalesce_usecs > 3) && 1917 ((ec->rx_coalesce_usecs > 4) &&
1908 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || 1918 (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1909 (ec->rx_coalesce_usecs == 2)) 1919 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL; 1920 return -EINVAL;
1911 1921
1912 if (ec->rx_coalesce_usecs <= 3) { 1922 if (ec->rx_coalesce_usecs == 4) {
1923 adapter->itr = adapter->itr_setting = 4;
1924 } else if (ec->rx_coalesce_usecs <= 3) {
1913 adapter->itr = 20000; 1925 adapter->itr = 20000;
1914 adapter->itr_setting = ec->rx_coalesce_usecs; 1926 adapter->itr_setting = ec->rx_coalesce_usecs;
1915 } else { 1927 } else {
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index 8bdcd5f24eff..5d1220d188d4 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -208,6 +208,8 @@ enum e1e_registers {
208 208
209 E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */ 209 E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
210 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */ 210 E1000_MANC2H = 0x05860, /* Management Control To Host - RW */
211 E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
212#define E1000_MDEF(_n) (E1000_MDEF_BASE + ((_n) * 4))
211 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */ 213 E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
212 E1000_GCR = 0x05B00, /* PCI-Ex Control */ 214 E1000_GCR = 0x05B00, /* PCI-Ex Control */
213 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */ 215 E1000_GCR2 = 0x05B64, /* PCI-Ex Control #2 */
@@ -380,6 +382,7 @@ enum e1e_registers {
380#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE 382#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE
381#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE 383#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE
382#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF 384#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF
385#define E1000_DEV_ID_ICH10_D_BM_V 0x1525
383#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA 386#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA
384#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB 387#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB
385#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF 388#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF
@@ -828,6 +831,7 @@ struct e1000_mac_info {
828 u8 forced_speed_duplex; 831 u8 forced_speed_duplex;
829 832
830 bool adaptive_ifs; 833 bool adaptive_ifs;
834 bool has_fwsm;
831 bool arc_subsystem_valid; 835 bool arc_subsystem_valid;
832 bool autoneg; 836 bool autoneg;
833 bool autoneg_failed; 837 bool autoneg_failed;
@@ -898,6 +902,7 @@ struct e1000_fc_info {
898 u32 high_water; /* Flow control high-water mark */ 902 u32 high_water; /* Flow control high-water mark */
899 u32 low_water; /* Flow control low-water mark */ 903 u32 low_water; /* Flow control low-water mark */
900 u16 pause_time; /* Flow control pause timer */ 904 u16 pause_time; /* Flow control pause timer */
905 u16 refresh_time; /* Flow control refresh timer */
901 bool send_xon; /* Flow control send XON */ 906 bool send_xon; /* Flow control send XON */
902 bool strict_ieee; /* Strict IEEE mode */ 907 bool strict_ieee; /* Strict IEEE mode */
903 enum e1000_fc_mode current_mode; /* FC mode in effect */ 908 enum e1000_fc_mode current_mode; /* FC mode in effect */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c87..b2507d93de99 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -83,6 +83,8 @@
83 83
84 84
85#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ 85#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */
86/* FW established a valid mode */
87#define E1000_ICH_FWSM_FW_VALID 0x00008000
86 88
87#define E1000_ICH_MNG_IAMT_MODE 0x2 89#define E1000_ICH_MNG_IAMT_MODE 0x2
88 90
@@ -259,6 +261,7 @@ static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
259static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) 261static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
260{ 262{
261 struct e1000_phy_info *phy = &hw->phy; 263 struct e1000_phy_info *phy = &hw->phy;
264 u32 ctrl;
262 s32 ret_val = 0; 265 s32 ret_val = 0;
263 266
264 phy->addr = 1; 267 phy->addr = 1;
@@ -274,6 +277,33 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
274 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; 277 phy->ops.power_down = e1000_power_down_phy_copper_ich8lan;
275 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; 278 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
276 279
280 if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
281 /*
282 * The MAC-PHY interconnect may still be in SMBus mode
283 * after Sx->S0. Toggle the LANPHYPC Value bit to force
284 * the interconnect to PCIe mode, but only if there is no
285 * firmware present otherwise firmware will have done it.
286 */
287 ctrl = er32(CTRL);
288 ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
289 ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
290 ew32(CTRL, ctrl);
291 udelay(10);
292 ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
293 ew32(CTRL, ctrl);
294 msleep(50);
295 }
296
297 /*
298 * Reset the PHY before any acccess to it. Doing so, ensures that
299 * the PHY is in a known good state before we read/write PHY registers.
300 * The generic reset is sufficient here, because we haven't determined
301 * the PHY type yet.
302 */
303 ret_val = e1000e_phy_hw_reset_generic(hw);
304 if (ret_val)
305 goto out;
306
277 phy->id = e1000_phy_unknown; 307 phy->id = e1000_phy_unknown;
278 ret_val = e1000e_get_phy_id(hw); 308 ret_val = e1000e_get_phy_id(hw);
279 if (ret_val) 309 if (ret_val)
@@ -300,6 +330,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
300 phy->ops.get_cable_length = e1000_get_cable_length_82577; 330 phy->ops.get_cable_length = e1000_get_cable_length_82577;
301 phy->ops.get_info = e1000_get_phy_info_82577; 331 phy->ops.get_info = e1000_get_phy_info_82577;
302 phy->ops.commit = e1000e_phy_sw_reset; 332 phy->ops.commit = e1000e_phy_sw_reset;
333 break;
303 case e1000_phy_82578: 334 case e1000_phy_82578:
304 phy->ops.check_polarity = e1000_check_polarity_m88; 335 phy->ops.check_polarity = e1000_check_polarity_m88;
305 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; 336 phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
@@ -472,8 +503,10 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
472 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; 503 mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
473 if (mac->type == e1000_ich8lan) 504 if (mac->type == e1000_ich8lan)
474 mac->rar_entry_count--; 505 mac->rar_entry_count--;
475 /* Set if manageability features are enabled. */ 506 /* FWSM register */
476 mac->arc_subsystem_valid = true; 507 mac->has_fwsm = true;
508 /* ARC subsystem not supported */
509 mac->arc_subsystem_valid = false;
477 /* Adaptive IFS supported */ 510 /* Adaptive IFS supported */
478 mac->adaptive_ifs = true; 511 mac->adaptive_ifs = true;
479 512
@@ -657,8 +690,6 @@ static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
657static void e1000_release_nvm_ich8lan(struct e1000_hw *hw) 690static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
658{ 691{
659 mutex_unlock(&nvm_mutex); 692 mutex_unlock(&nvm_mutex);
660
661 return;
662} 693}
663 694
664static DEFINE_MUTEX(swflag_mutex); 695static DEFINE_MUTEX(swflag_mutex);
@@ -737,8 +768,6 @@ static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
737 ew32(EXTCNF_CTRL, extcnf_ctrl); 768 ew32(EXTCNF_CTRL, extcnf_ctrl);
738 769
739 mutex_unlock(&swflag_mutex); 770 mutex_unlock(&swflag_mutex);
740
741 return;
742} 771}
743 772
744/** 773/**
@@ -785,11 +814,16 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
785 **/ 814 **/
786static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) 815static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
787{ 816{
817 struct e1000_adapter *adapter = hw->adapter;
788 struct e1000_phy_info *phy = &hw->phy; 818 struct e1000_phy_info *phy = &hw->phy;
789 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; 819 u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
790 s32 ret_val; 820 s32 ret_val = 0;
791 u16 word_addr, reg_data, reg_addr, phy_page = 0; 821 u16 word_addr, reg_data, reg_addr, phy_page = 0;
792 822
823 if (!(hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) &&
824 !(hw->mac.type == e1000_pchlan))
825 return ret_val;
826
793 ret_val = hw->phy.ops.acquire(hw); 827 ret_val = hw->phy.ops.acquire(hw);
794 if (ret_val) 828 if (ret_val)
795 return ret_val; 829 return ret_val;
@@ -801,97 +835,87 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
801 * Therefore, after each PHY reset, we will load the 835 * Therefore, after each PHY reset, we will load the
802 * configuration data out of the NVM manually. 836 * configuration data out of the NVM manually.
803 */ 837 */
804 if ((hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) || 838 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
805 (hw->mac.type == e1000_pchlan)) { 839 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
806 struct e1000_adapter *adapter = hw->adapter; 840 (hw->mac.type == e1000_pchlan))
807 841 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
808 /* Check if SW needs to configure the PHY */ 842 else
809 if ((adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M_AMT) || 843 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
810 (adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_M) ||
811 (hw->mac.type == e1000_pchlan))
812 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
813 else
814 sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
815 844
816 data = er32(FEXTNVM); 845 data = er32(FEXTNVM);
817 if (!(data & sw_cfg_mask)) 846 if (!(data & sw_cfg_mask))
818 goto out; 847 goto out;
819 848
820 /* Wait for basic configuration completes before proceeding */ 849 /*
821 e1000_lan_init_done_ich8lan(hw); 850 * Make sure HW does not configure LCD from PHY
851 * extended configuration before SW configuration
852 */
853 data = er32(EXTCNF_CTRL);
854 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
855 goto out;
856
857 cnf_size = er32(EXTCNF_SIZE);
858 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
859 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
860 if (!cnf_size)
861 goto out;
862
863 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
864 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
822 865
866 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
867 (hw->mac.type == e1000_pchlan)) {
823 /* 868 /*
824 * Make sure HW does not configure LCD from PHY 869 * HW configures the SMBus address and LEDs when the
825 * extended configuration before SW configuration 870 * OEM and LCD Write Enable bits are set in the NVM.
871 * When both NVM bits are cleared, SW will configure
872 * them instead.
826 */ 873 */
827 data = er32(EXTCNF_CTRL); 874 data = er32(STRAP);
828 if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) 875 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
876 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
877 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
878 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
879 reg_data);
880 if (ret_val)
829 goto out; 881 goto out;
830 882
831 cnf_size = er32(EXTCNF_SIZE); 883 data = er32(LEDCTL);
832 cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; 884 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
833 cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; 885 (u16)data);
834 if (!cnf_size) 886 if (ret_val)
835 goto out; 887 goto out;
888 }
836 889
837 cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; 890 /* Configure LCD from extended configuration region. */
838 cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
839
840 if (!(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) &&
841 (hw->mac.type == e1000_pchlan)) {
842 /*
843 * HW configures the SMBus address and LEDs when the
844 * OEM and LCD Write Enable bits are set in the NVM.
845 * When both NVM bits are cleared, SW will configure
846 * them instead.
847 */
848 data = er32(STRAP);
849 data &= E1000_STRAP_SMBUS_ADDRESS_MASK;
850 reg_data = data >> E1000_STRAP_SMBUS_ADDRESS_SHIFT;
851 reg_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
852 ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR,
853 reg_data);
854 if (ret_val)
855 goto out;
856
857 data = er32(LEDCTL);
858 ret_val = e1000_write_phy_reg_hv_locked(hw,
859 HV_LED_CONFIG,
860 (u16)data);
861 if (ret_val)
862 goto out;
863 }
864 /* Configure LCD from extended configuration region. */
865 891
866 /* cnf_base_addr is in DWORD */ 892 /* cnf_base_addr is in DWORD */
867 word_addr = (u16)(cnf_base_addr << 1); 893 word_addr = (u16)(cnf_base_addr << 1);
868 894
869 for (i = 0; i < cnf_size; i++) { 895 for (i = 0; i < cnf_size; i++) {
870 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, 896 ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
871 &reg_data); 897 &reg_data);
872 if (ret_val) 898 if (ret_val)
873 goto out; 899 goto out;
874 900
875 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), 901 ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
876 1, &reg_addr); 902 1, &reg_addr);
877 if (ret_val) 903 if (ret_val)
878 goto out; 904 goto out;
879 905
880 /* Save off the PHY page for future writes. */ 906 /* Save off the PHY page for future writes. */
881 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { 907 if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
882 phy_page = reg_data; 908 phy_page = reg_data;
883 continue; 909 continue;
884 } 910 }
885 911
886 reg_addr &= PHY_REG_MASK; 912 reg_addr &= PHY_REG_MASK;
887 reg_addr |= phy_page; 913 reg_addr |= phy_page;
888 914
889 ret_val = phy->ops.write_reg_locked(hw, 915 ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
890 (u32)reg_addr, 916 reg_data);
891 reg_data); 917 if (ret_val)
892 if (ret_val) 918 goto out;
893 goto out;
894 }
895 } 919 }
896 920
897out: 921out:
@@ -1229,30 +1253,26 @@ static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
1229} 1253}
1230 1254
1231/** 1255/**
1232 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset 1256 * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
1233 * @hw: pointer to the HW structure 1257 * @hw: pointer to the HW structure
1234 *
1235 * Resets the PHY
1236 * This is a function pointer entry point called by drivers
1237 * or other shared routines.
1238 **/ 1258 **/
1239static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) 1259static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
1240{ 1260{
1241 s32 ret_val = 0; 1261 s32 ret_val = 0;
1242 u16 reg; 1262 u16 reg;
1243 1263
1244 ret_val = e1000e_phy_hw_reset_generic(hw); 1264 if (e1000_check_reset_block(hw))
1245 if (ret_val) 1265 goto out;
1246 return ret_val;
1247
1248 /* Allow time for h/w to get to a quiescent state after reset */
1249 mdelay(10);
1250 1266
1251 /* Perform any necessary post-reset workarounds */ 1267 /* Perform any necessary post-reset workarounds */
1252 if (hw->mac.type == e1000_pchlan) { 1268 switch (hw->mac.type) {
1269 case e1000_pchlan:
1253 ret_val = e1000_hv_phy_workarounds_ich8lan(hw); 1270 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
1254 if (ret_val) 1271 if (ret_val)
1255 return ret_val; 1272 goto out;
1273 break;
1274 default:
1275 break;
1256 } 1276 }
1257 1277
1258 /* Dummy read to clear the phy wakeup bit after lcd reset */ 1278 /* Dummy read to clear the phy wakeup bit after lcd reset */
@@ -1265,11 +1285,32 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1265 goto out; 1285 goto out;
1266 1286
1267 /* Configure the LCD with the OEM bits in NVM */ 1287 /* Configure the LCD with the OEM bits in NVM */
1268 if (hw->mac.type == e1000_pchlan) 1288 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1269 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
1270 1289
1271out: 1290out:
1272 return 0; 1291 return ret_val;
1292}
1293
1294/**
1295 * e1000_phy_hw_reset_ich8lan - Performs a PHY reset
1296 * @hw: pointer to the HW structure
1297 *
1298 * Resets the PHY
1299 * This is a function pointer entry point called by drivers
1300 * or other shared routines.
1301 **/
1302static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
1303{
1304 s32 ret_val = 0;
1305
1306 ret_val = e1000e_phy_hw_reset_generic(hw);
1307 if (ret_val)
1308 goto out;
1309
1310 ret_val = e1000_post_phy_reset_ich8lan(hw);
1311
1312out:
1313 return ret_val;
1273} 1314}
1274 1315
1275/** 1316/**
@@ -1622,7 +1663,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1622 /* Check if the flash descriptor is valid */ 1663 /* Check if the flash descriptor is valid */
1623 if (hsfsts.hsf_status.fldesvalid == 0) { 1664 if (hsfsts.hsf_status.fldesvalid == 0) {
1624 e_dbg("Flash descriptor invalid. " 1665 e_dbg("Flash descriptor invalid. "
1625 "SW Sequencing must be used."); 1666 "SW Sequencing must be used.\n");
1626 return -E1000_ERR_NVM; 1667 return -E1000_ERR_NVM;
1627 } 1668 }
1628 1669
@@ -1671,7 +1712,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1671 hsfsts.hsf_status.flcdone = 1; 1712 hsfsts.hsf_status.flcdone = 1;
1672 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 1713 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1673 } else { 1714 } else {
1674 e_dbg("Flash controller busy, cannot get access"); 1715 e_dbg("Flash controller busy, cannot get access\n");
1675 } 1716 }
1676 } 1717 }
1677 1718
@@ -1822,7 +1863,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1822 continue; 1863 continue;
1823 } else if (hsfsts.hsf_status.flcdone == 0) { 1864 } else if (hsfsts.hsf_status.flcdone == 0) {
1824 e_dbg("Timeout error - flash cycle " 1865 e_dbg("Timeout error - flash cycle "
1825 "did not complete."); 1866 "did not complete.\n");
1826 break; 1867 break;
1827 } 1868 }
1828 } 1869 }
@@ -1908,18 +1949,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1908 new_bank_offset = nvm->flash_bank_size; 1949 new_bank_offset = nvm->flash_bank_size;
1909 old_bank_offset = 0; 1950 old_bank_offset = 0;
1910 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); 1951 ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
1911 if (ret_val) { 1952 if (ret_val)
1912 nvm->ops.release(hw); 1953 goto release;
1913 goto out;
1914 }
1915 } else { 1954 } else {
1916 old_bank_offset = nvm->flash_bank_size; 1955 old_bank_offset = nvm->flash_bank_size;
1917 new_bank_offset = 0; 1956 new_bank_offset = 0;
1918 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); 1957 ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
1919 if (ret_val) { 1958 if (ret_val)
1920 nvm->ops.release(hw); 1959 goto release;
1921 goto out;
1922 }
1923 } 1960 }
1924 1961
1925 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 1962 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -1975,8 +2012,7 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1975 if (ret_val) { 2012 if (ret_val) {
1976 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ 2013 /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
1977 e_dbg("Flash commit failed.\n"); 2014 e_dbg("Flash commit failed.\n");
1978 nvm->ops.release(hw); 2015 goto release;
1979 goto out;
1980 } 2016 }
1981 2017
1982 /* 2018 /*
@@ -1987,18 +2023,15 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
1987 */ 2023 */
1988 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; 2024 act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
1989 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); 2025 ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
1990 if (ret_val) { 2026 if (ret_val)
1991 nvm->ops.release(hw); 2027 goto release;
1992 goto out; 2028
1993 }
1994 data &= 0xBFFF; 2029 data &= 0xBFFF;
1995 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, 2030 ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
1996 act_offset * 2 + 1, 2031 act_offset * 2 + 1,
1997 (u8)(data >> 8)); 2032 (u8)(data >> 8));
1998 if (ret_val) { 2033 if (ret_val)
1999 nvm->ops.release(hw); 2034 goto release;
2000 goto out;
2001 }
2002 2035
2003 /* 2036 /*
2004 * And invalidate the previously valid segment by setting 2037 * And invalidate the previously valid segment by setting
@@ -2008,10 +2041,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2008 */ 2041 */
2009 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; 2042 act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
2010 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); 2043 ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
2011 if (ret_val) { 2044 if (ret_val)
2012 nvm->ops.release(hw); 2045 goto release;
2013 goto out;
2014 }
2015 2046
2016 /* Great! Everything worked, we can now clear the cached entries. */ 2047 /* Great! Everything worked, we can now clear the cached entries. */
2017 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { 2048 for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
@@ -2019,14 +2050,17 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
2019 dev_spec->shadow_ram[i].value = 0xFFFF; 2050 dev_spec->shadow_ram[i].value = 0xFFFF;
2020 } 2051 }
2021 2052
2053release:
2022 nvm->ops.release(hw); 2054 nvm->ops.release(hw);
2023 2055
2024 /* 2056 /*
2025 * Reload the EEPROM, or else modifications will not appear 2057 * Reload the EEPROM, or else modifications will not appear
2026 * until after the next adapter reset. 2058 * until after the next adapter reset.
2027 */ 2059 */
2028 e1000e_reload_nvm(hw); 2060 if (!ret_val) {
2029 msleep(10); 2061 e1000e_reload_nvm(hw);
2062 msleep(10);
2063 }
2030 2064
2031out: 2065out:
2032 if (ret_val) 2066 if (ret_val)
@@ -2487,9 +2521,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2487 * on the last TLP read/write transaction when MAC is reset. 2521 * on the last TLP read/write transaction when MAC is reset.
2488 */ 2522 */
2489 ret_val = e1000e_disable_pcie_master(hw); 2523 ret_val = e1000e_disable_pcie_master(hw);
2490 if (ret_val) { 2524 if (ret_val)
2491 e_dbg("PCI-E Master disable polling has failed.\n"); 2525 e_dbg("PCI-E Master disable polling has failed.\n");
2492 }
2493 2526
2494 e_dbg("Masking off all interrupts\n"); 2527 e_dbg("Masking off all interrupts\n");
2495 ew32(IMC, 0xffffffff); 2528 ew32(IMC, 0xffffffff);
@@ -2528,14 +2561,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2528 ctrl = er32(CTRL); 2561 ctrl = er32(CTRL);
2529 2562
2530 if (!e1000_check_reset_block(hw)) { 2563 if (!e1000_check_reset_block(hw)) {
2531 /* Clear PHY Reset Asserted bit */
2532 if (hw->mac.type >= e1000_pchlan) {
2533 u32 status = er32(STATUS);
2534 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
2535 }
2536
2537 /* 2564 /*
2538 * PHY HW reset requires MAC CORE reset at the same 2565 * Full-chip reset requires MAC and PHY reset at the same
2539 * time to make sure the interface between MAC and the 2566 * time to make sure the interface between MAC and the
2540 * external PHY is reset. 2567 * external PHY is reset.
2541 */ 2568 */
@@ -2549,39 +2576,16 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2549 if (!ret_val) 2576 if (!ret_val)
2550 e1000_release_swflag_ich8lan(hw); 2577 e1000_release_swflag_ich8lan(hw);
2551 2578
2552 /* Perform any necessary post-reset workarounds */ 2579 if (ctrl & E1000_CTRL_PHY_RST) {
2553 if (hw->mac.type == e1000_pchlan)
2554 ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
2555
2556 if (ctrl & E1000_CTRL_PHY_RST)
2557 ret_val = hw->phy.ops.get_cfg_done(hw); 2580 ret_val = hw->phy.ops.get_cfg_done(hw);
2581 if (ret_val)
2582 goto out;
2558 2583
2559 if (hw->mac.type >= e1000_ich10lan) { 2584 ret_val = e1000_post_phy_reset_ich8lan(hw);
2560 e1000_lan_init_done_ich8lan(hw);
2561 } else {
2562 ret_val = e1000e_get_auto_rd_done(hw);
2563 if (ret_val) {
2564 /*
2565 * When auto config read does not complete, do not
2566 * return with an error. This can happen in situations
2567 * where there is no eeprom and prevents getting link.
2568 */
2569 e_dbg("Auto Read Done did not complete\n");
2570 }
2571 }
2572 /* Dummy read to clear the phy wakeup bit after lcd reset */
2573 if (hw->mac.type == e1000_pchlan)
2574 e1e_rphy(hw, BM_WUC, &reg);
2575
2576 ret_val = e1000_sw_lcd_config_ich8lan(hw);
2577 if (ret_val)
2578 goto out;
2579
2580 if (hw->mac.type == e1000_pchlan) {
2581 ret_val = e1000_oem_bits_config_ich8lan(hw, true);
2582 if (ret_val) 2585 if (ret_val)
2583 goto out; 2586 goto out;
2584 } 2587 }
2588
2585 /* 2589 /*
2586 * For PCH, this write will make sure that any noise 2590 * For PCH, this write will make sure that any noise
2587 * will be detected as a CRC error and be dropped rather than show up 2591 * will be detected as a CRC error and be dropped rather than show up
@@ -2748,8 +2752,6 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
2748 reg = er32(RFCTL); 2752 reg = er32(RFCTL);
2749 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); 2753 reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
2750 ew32(RFCTL, reg); 2754 ew32(RFCTL, reg);
2751
2752 return;
2753} 2755}
2754 2756
2755/** 2757/**
@@ -2799,6 +2801,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
2799 ew32(FCTTV, hw->fc.pause_time); 2801 ew32(FCTTV, hw->fc.pause_time);
2800 if ((hw->phy.type == e1000_phy_82578) || 2802 if ((hw->phy.type == e1000_phy_82578) ||
2801 (hw->phy.type == e1000_phy_82577)) { 2803 (hw->phy.type == e1000_phy_82577)) {
2804 ew32(FCRTV_PCH, hw->fc.refresh_time);
2805
2802 ret_val = hw->phy.ops.write_reg(hw, 2806 ret_val = hw->phy.ops.write_reg(hw,
2803 PHY_REG(BM_PORT_CTRL_PAGE, 27), 2807 PHY_REG(BM_PORT_CTRL_PAGE, 27),
2804 hw->fc.pause_time); 2808 hw->fc.pause_time);
@@ -3127,8 +3131,6 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3127 default: 3131 default:
3128 break; 3132 break;
3129 } 3133 }
3130
3131 return;
3132} 3134}
3133 3135
3134/** 3136/**
@@ -3265,33 +3267,50 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3265} 3267}
3266 3268
3267/** 3269/**
3268 * e1000_get_cfg_done_ich8lan - Read config done bit 3270 * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
3269 * @hw: pointer to the HW structure 3271 * @hw: pointer to the HW structure
3270 * 3272 *
3271 * Read the management control register for the config done bit for 3273 * Read appropriate register for the config done bit for completion status
3272 * completion status. NOTE: silicon which is EEPROM-less will fail trying 3274 * and configure the PHY through s/w for EEPROM-less parts.
3273 * to read the config done bit, so an error is *ONLY* logged and returns 3275 *
3274 * 0. If we were to return with error, EEPROM-less silicon 3276 * NOTE: some silicon which is EEPROM-less will fail trying to read the
3275 * would not be able to be reset or change link. 3277 * config done bit, so only an error is logged and continues. If we were
3278 * to return with error, EEPROM-less silicon would not be able to be reset
3279 * or change link.
3276 **/ 3280 **/
3277static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) 3281static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3278{ 3282{
3283 s32 ret_val = 0;
3279 u32 bank = 0; 3284 u32 bank = 0;
3285 u32 status;
3280 3286
3281 if (hw->mac.type >= e1000_pchlan) { 3287 e1000e_get_cfg_done(hw);
3282 u32 status = er32(STATUS);
3283 3288
3284 if (status & E1000_STATUS_PHYRA) 3289 /* Wait for indication from h/w that it has completed basic config */
3285 ew32(STATUS, status & ~E1000_STATUS_PHYRA); 3290 if (hw->mac.type >= e1000_ich10lan) {
3286 else 3291 e1000_lan_init_done_ich8lan(hw);
3287 e_dbg("PHY Reset Asserted not set - needs delay\n"); 3292 } else {
3293 ret_val = e1000e_get_auto_rd_done(hw);
3294 if (ret_val) {
3295 /*
3296 * When auto config read does not complete, do not
3297 * return with an error. This can happen in situations
3298 * where there is no eeprom and prevents getting link.
3299 */
3300 e_dbg("Auto Read Done did not complete\n");
3301 ret_val = 0;
3302 }
3288 } 3303 }
3289 3304
3290 e1000e_get_cfg_done(hw); 3305 /* Clear PHY Reset Asserted bit */
3306 status = er32(STATUS);
3307 if (status & E1000_STATUS_PHYRA)
3308 ew32(STATUS, status & ~E1000_STATUS_PHYRA);
3309 else
3310 e_dbg("PHY Reset Asserted not set - needs delay\n");
3291 3311
3292 /* If EEPROM is not marked present, init the IGP 3 PHY manually */ 3312 /* If EEPROM is not marked present, init the IGP 3 PHY manually */
3293 if ((hw->mac.type != e1000_ich10lan) && 3313 if (hw->mac.type <= e1000_ich9lan) {
3294 (hw->mac.type != e1000_pchlan)) {
3295 if (((er32(EECD) & E1000_EECD_PRES) == 0) && 3314 if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
3296 (hw->phy.type == e1000_phy_igp_3)) { 3315 (hw->phy.type == e1000_phy_igp_3)) {
3297 e1000e_phy_init_script_igp3(hw); 3316 e1000e_phy_init_script_igp3(hw);
@@ -3300,11 +3319,11 @@ static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
3300 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { 3319 if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
3301 /* Maybe we should do a basic PHY config */ 3320 /* Maybe we should do a basic PHY config */
3302 e_dbg("EEPROM not present\n"); 3321 e_dbg("EEPROM not present\n");
3303 return -E1000_ERR_CONFIG; 3322 ret_val = -E1000_ERR_CONFIG;
3304 } 3323 }
3305 } 3324 }
3306 3325
3307 return 0; 3326 return ret_val;
3308} 3327}
3309 3328
3310/** 3329/**
@@ -3320,8 +3339,6 @@ static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
3320 if (!(hw->mac.ops.check_mng_mode(hw) || 3339 if (!(hw->mac.ops.check_mng_mode(hw) ||
3321 hw->phy.ops.check_reset_block(hw))) 3340 hw->phy.ops.check_reset_block(hw)))
3322 e1000_power_down_phy_copper(hw); 3341 e1000_power_down_phy_copper(hw);
3323
3324 return;
3325} 3342}
3326 3343
3327/** 3344/**
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c4..a968e3a416ac 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1262 u32 status; 1262 u32 status;
1263 1263
1264 status = er32(STATUS); 1264 status = er32(STATUS);
1265 if (status & E1000_STATUS_SPEED_1000) { 1265 if (status & E1000_STATUS_SPEED_1000)
1266 *speed = SPEED_1000; 1266 *speed = SPEED_1000;
1267 e_dbg("1000 Mbs, "); 1267 else if (status & E1000_STATUS_SPEED_100)
1268 } else if (status & E1000_STATUS_SPEED_100) {
1269 *speed = SPEED_100; 1268 *speed = SPEED_100;
1270 e_dbg("100 Mbs, "); 1269 else
1271 } else {
1272 *speed = SPEED_10; 1270 *speed = SPEED_10;
1273 e_dbg("10 Mbs, ");
1274 }
1275 1271
1276 if (status & E1000_STATUS_FD) { 1272 if (status & E1000_STATUS_FD)
1277 *duplex = FULL_DUPLEX; 1273 *duplex = FULL_DUPLEX;
1278 e_dbg("Full Duplex\n"); 1274 else
1279 } else {
1280 *duplex = HALF_DUPLEX; 1275 *duplex = HALF_DUPLEX;
1281 e_dbg("Half Duplex\n"); 1276
1282 } 1277 e_dbg("%u Mbps, %s Duplex\n",
1278 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1279 *duplex == FULL_DUPLEX ? "Full" : "Half");
1283 1280
1284 return 0; 1281 return 0;
1285} 1282}
@@ -2275,6 +2272,11 @@ static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
2275 u32 hicr; 2272 u32 hicr;
2276 u8 i; 2273 u8 i;
2277 2274
2275 if (!(hw->mac.arc_subsystem_valid)) {
2276 e_dbg("ARC subsystem not valid.\n");
2277 return -E1000_ERR_HOST_INTERFACE_COMMAND;
2278 }
2279
2278 /* Check that the host interface is enabled. */ 2280 /* Check that the host interface is enabled. */
2279 hicr = er32(HICR); 2281 hicr = er32(HICR);
2280 if ((hicr & E1000_HICR_EN) == 0) { 2282 if ((hicr & E1000_HICR_EN) == 0) {
@@ -2518,10 +2520,11 @@ s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
2518} 2520}
2519 2521
2520/** 2522/**
2521 * e1000e_enable_mng_pass_thru - Enable processing of ARP's 2523 * e1000e_enable_mng_pass_thru - Check if management passthrough is needed
2522 * @hw: pointer to the HW structure 2524 * @hw: pointer to the HW structure
2523 * 2525 *
2524 * Verifies the hardware needs to allow ARPs to be processed by the host. 2526 * Verifies the hardware needs to leave interface enabled so that frames can
2527 * be directed to and from the management interface.
2525 **/ 2528 **/
2526bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) 2529bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2527{ 2530{
@@ -2531,11 +2534,10 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2531 2534
2532 manc = er32(MANC); 2535 manc = er32(MANC);
2533 2536
2534 if (!(manc & E1000_MANC_RCV_TCO_EN) || 2537 if (!(manc & E1000_MANC_RCV_TCO_EN))
2535 !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) 2538 goto out;
2536 return ret_val;
2537 2539
2538 if (hw->mac.arc_subsystem_valid) { 2540 if (hw->mac.has_fwsm) {
2539 fwsm = er32(FWSM); 2541 fwsm = er32(FWSM);
2540 factps = er32(FACTPS); 2542 factps = er32(FACTPS);
2541 2543
@@ -2543,16 +2545,28 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2543 ((fwsm & E1000_FWSM_MODE_MASK) == 2545 ((fwsm & E1000_FWSM_MODE_MASK) ==
2544 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) { 2546 (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
2545 ret_val = true; 2547 ret_val = true;
2546 return ret_val; 2548 goto out;
2547 } 2549 }
2548 } else { 2550 } else if ((hw->mac.type == e1000_82574) ||
2549 if ((manc & E1000_MANC_SMBUS_EN) && 2551 (hw->mac.type == e1000_82583)) {
2550 !(manc & E1000_MANC_ASF_EN)) { 2552 u16 data;
2553
2554 factps = er32(FACTPS);
2555 e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
2556
2557 if (!(factps & E1000_FACTPS_MNGCG) &&
2558 ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
2559 (e1000_mng_mode_pt << 13))) {
2551 ret_val = true; 2560 ret_val = true;
2552 return ret_val; 2561 goto out;
2553 } 2562 }
2563 } else if ((manc & E1000_MANC_SMBUS_EN) &&
2564 !(manc & E1000_MANC_ASF_EN)) {
2565 ret_val = true;
2566 goto out;
2554 } 2567 }
2555 2568
2569out:
2556 return ret_val; 2570 return ret_val;
2557} 2571}
2558 2572
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 73d43c53015a..24507f3b8b17 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/module.h> 31#include <linux/module.h>
30#include <linux/types.h> 32#include <linux/types.h>
31#include <linux/init.h> 33#include <linux/init.h>
@@ -45,11 +47,12 @@
45#include <linux/cpu.h> 47#include <linux/cpu.h>
46#include <linux/smp.h> 48#include <linux/smp.h>
47#include <linux/pm_qos_params.h> 49#include <linux/pm_qos_params.h>
50#include <linux/pm_runtime.h>
48#include <linux/aer.h> 51#include <linux/aer.h>
49 52
50#include "e1000.h" 53#include "e1000.h"
51 54
52#define DRV_VERSION "1.0.2-k2" 55#define DRV_VERSION "1.0.2-k4"
53char e1000e_driver_name[] = "e1000e"; 56char e1000e_driver_name[] = "e1000e";
54const char e1000e_driver_version[] = DRV_VERSION; 57const char e1000e_driver_version[] = DRV_VERSION;
55 58
@@ -66,6 +69,361 @@ static const struct e1000_info *e1000_info_tbl[] = {
66 [board_pchlan] = &e1000_pch_info, 69 [board_pchlan] = &e1000_pch_info,
67}; 70};
68 71
72struct e1000_reg_info {
73 u32 ofs;
74 char *name;
75};
76
77#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */
78#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */
79#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */
80#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */
81#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */
82
83#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */
84#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */
85#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */
86#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */
87#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */
88
89static const struct e1000_reg_info e1000_reg_info_tbl[] = {
90
91 /* General Registers */
92 {E1000_CTRL, "CTRL"},
93 {E1000_STATUS, "STATUS"},
94 {E1000_CTRL_EXT, "CTRL_EXT"},
95
96 /* Interrupt Registers */
97 {E1000_ICR, "ICR"},
98
99 /* RX Registers */
100 {E1000_RCTL, "RCTL"},
101 {E1000_RDLEN, "RDLEN"},
102 {E1000_RDH, "RDH"},
103 {E1000_RDT, "RDT"},
104 {E1000_RDTR, "RDTR"},
105 {E1000_RXDCTL(0), "RXDCTL"},
106 {E1000_ERT, "ERT"},
107 {E1000_RDBAL, "RDBAL"},
108 {E1000_RDBAH, "RDBAH"},
109 {E1000_RDFH, "RDFH"},
110 {E1000_RDFT, "RDFT"},
111 {E1000_RDFHS, "RDFHS"},
112 {E1000_RDFTS, "RDFTS"},
113 {E1000_RDFPC, "RDFPC"},
114
115 /* TX Registers */
116 {E1000_TCTL, "TCTL"},
117 {E1000_TDBAL, "TDBAL"},
118 {E1000_TDBAH, "TDBAH"},
119 {E1000_TDLEN, "TDLEN"},
120 {E1000_TDH, "TDH"},
121 {E1000_TDT, "TDT"},
122 {E1000_TIDV, "TIDV"},
123 {E1000_TXDCTL(0), "TXDCTL"},
124 {E1000_TADV, "TADV"},
125 {E1000_TARC(0), "TARC"},
126 {E1000_TDFH, "TDFH"},
127 {E1000_TDFT, "TDFT"},
128 {E1000_TDFHS, "TDFHS"},
129 {E1000_TDFTS, "TDFTS"},
130 {E1000_TDFPC, "TDFPC"},
131
132 /* List Terminator */
133 {}
134};
135
136/*
137 * e1000_regdump - register printout routine
138 */
139static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
140{
141 int n = 0;
142 char rname[16];
143 u32 regs[8];
144
145 switch (reginfo->ofs) {
146 case E1000_RXDCTL(0):
147 for (n = 0; n < 2; n++)
148 regs[n] = __er32(hw, E1000_RXDCTL(n));
149 break;
150 case E1000_TXDCTL(0):
151 for (n = 0; n < 2; n++)
152 regs[n] = __er32(hw, E1000_TXDCTL(n));
153 break;
154 case E1000_TARC(0):
155 for (n = 0; n < 2; n++)
156 regs[n] = __er32(hw, E1000_TARC(n));
157 break;
158 default:
159 printk(KERN_INFO "%-15s %08x\n",
160 reginfo->name, __er32(hw, reginfo->ofs));
161 return;
162 }
163
164 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
165 printk(KERN_INFO "%-15s ", rname);
166 for (n = 0; n < 2; n++)
167 printk(KERN_CONT "%08x ", regs[n]);
168 printk(KERN_CONT "\n");
169}
170
171
172/*
173 * e1000e_dump - Print registers, tx-ring and rx-ring
174 */
175static void e1000e_dump(struct e1000_adapter *adapter)
176{
177 struct net_device *netdev = adapter->netdev;
178 struct e1000_hw *hw = &adapter->hw;
179 struct e1000_reg_info *reginfo;
180 struct e1000_ring *tx_ring = adapter->tx_ring;
181 struct e1000_tx_desc *tx_desc;
182 struct my_u0 { u64 a; u64 b; } *u0;
183 struct e1000_buffer *buffer_info;
184 struct e1000_ring *rx_ring = adapter->rx_ring;
185 union e1000_rx_desc_packet_split *rx_desc_ps;
186 struct e1000_rx_desc *rx_desc;
187 struct my_u1 { u64 a; u64 b; u64 c; u64 d; } *u1;
188 u32 staterr;
189 int i = 0;
190
191 if (!netif_msg_hw(adapter))
192 return;
193
194 /* Print netdevice Info */
195 if (netdev) {
196 dev_info(&adapter->pdev->dev, "Net device Info\n");
197 printk(KERN_INFO "Device Name state "
198 "trans_start last_rx\n");
199 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
200 netdev->name,
201 netdev->state,
202 netdev->trans_start,
203 netdev->last_rx);
204 }
205
206 /* Print Registers */
207 dev_info(&adapter->pdev->dev, "Register Dump\n");
208 printk(KERN_INFO " Register Name Value\n");
209 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
210 reginfo->name; reginfo++) {
211 e1000_regdump(hw, reginfo);
212 }
213
214 /* Print TX Ring Summary */
215 if (!netdev || !netif_running(netdev))
216 goto exit;
217
218 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
219 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
220 " leng ntw timestamp\n");
221 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
222 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
223 0, tx_ring->next_to_use, tx_ring->next_to_clean,
224 (u64)buffer_info->dma,
225 buffer_info->length,
226 buffer_info->next_to_watch,
227 (u64)buffer_info->time_stamp);
228
229 /* Print TX Rings */
230 if (!netif_msg_tx_done(adapter))
231 goto rx_ring_summary;
232
233 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
234
235 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
236 *
237 * Legacy Transmit Descriptor
238 * +--------------------------------------------------------------+
239 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
240 * +--------------------------------------------------------------+
241 * 8 | Special | CSS | Status | CMD | CSO | Length |
242 * +--------------------------------------------------------------+
243 * 63 48 47 36 35 32 31 24 23 16 15 0
244 *
245 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
246 * 63 48 47 40 39 32 31 16 15 8 7 0
247 * +----------------------------------------------------------------+
248 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
249 * +----------------------------------------------------------------+
250 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
251 * +----------------------------------------------------------------+
252 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
253 *
254 * Extended Data Descriptor (DTYP=0x1)
255 * +----------------------------------------------------------------+
256 * 0 | Buffer Address [63:0] |
257 * +----------------------------------------------------------------+
258 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
259 * +----------------------------------------------------------------+
260 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
261 */
262 printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
263 " [bi->dma ] leng ntw timestamp bi->skb "
264 "<-- Legacy format\n");
265 printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
266 " [bi->dma ] leng ntw timestamp bi->skb "
267 "<-- Ext Context format\n");
268 printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
269 " [bi->dma ] leng ntw timestamp bi->skb "
270 "<-- Ext Data format\n");
271 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
272 tx_desc = E1000_TX_DESC(*tx_ring, i);
273 buffer_info = &tx_ring->buffer_info[i];
274 u0 = (struct my_u0 *)tx_desc;
275 printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
276 "%04X %3X %016llX %p",
277 (!(le64_to_cpu(u0->b) & (1<<29)) ? 'l' :
278 ((le64_to_cpu(u0->b) & (1<<20)) ? 'd' : 'c')), i,
279 le64_to_cpu(u0->a), le64_to_cpu(u0->b),
280 (u64)buffer_info->dma, buffer_info->length,
281 buffer_info->next_to_watch, (u64)buffer_info->time_stamp,
282 buffer_info->skb);
283 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
284 printk(KERN_CONT " NTC/U\n");
285 else if (i == tx_ring->next_to_use)
286 printk(KERN_CONT " NTU\n");
287 else if (i == tx_ring->next_to_clean)
288 printk(KERN_CONT " NTC\n");
289 else
290 printk(KERN_CONT "\n");
291
292 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
293 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
294 16, 1, phys_to_virt(buffer_info->dma),
295 buffer_info->length, true);
296 }
297
298 /* Print RX Rings Summary */
299rx_ring_summary:
300 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
301 printk(KERN_INFO "Queue [NTU] [NTC]\n");
302 printk(KERN_INFO " %5d %5X %5X\n", 0,
303 rx_ring->next_to_use, rx_ring->next_to_clean);
304
305 /* Print RX Rings */
306 if (!netif_msg_rx_status(adapter))
307 goto exit;
308
309 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
310 switch (adapter->rx_ps_pages) {
311 case 1:
312 case 2:
313 case 3:
314 /* [Extended] Packet Split Receive Descriptor Format
315 *
316 * +-----------------------------------------------------+
317 * 0 | Buffer Address 0 [63:0] |
318 * +-----------------------------------------------------+
319 * 8 | Buffer Address 1 [63:0] |
320 * +-----------------------------------------------------+
321 * 16 | Buffer Address 2 [63:0] |
322 * +-----------------------------------------------------+
323 * 24 | Buffer Address 3 [63:0] |
324 * +-----------------------------------------------------+
325 */
326 printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
327 "[buffer 1 63:0 ] "
328 "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
329 "[bi->skb] <-- Ext Pkt Split format\n");
330 /* [Extended] Receive Descriptor (Write-Back) Format
331 *
332 * 63 48 47 32 31 13 12 8 7 4 3 0
333 * +------------------------------------------------------+
334 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
335 * | Checksum | Ident | | Queue | | Type |
336 * +------------------------------------------------------+
337 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
338 * +------------------------------------------------------+
339 * 63 48 47 32 31 20 19 0
340 */
341 printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
342 "[vl l0 ee es] "
343 "[ l3 l2 l1 hs] [reserved ] ---------------- "
344 "[bi->skb] <-- Ext Rx Write-Back format\n");
345 for (i = 0; i < rx_ring->count; i++) {
346 buffer_info = &rx_ring->buffer_info[i];
347 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
348 u1 = (struct my_u1 *)rx_desc_ps;
349 staterr =
350 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
351 if (staterr & E1000_RXD_STAT_DD) {
352 /* Descriptor Done */
353 printk(KERN_INFO "RWB[0x%03X] %016llX "
354 "%016llX %016llX %016llX "
355 "---------------- %p", i,
356 le64_to_cpu(u1->a),
357 le64_to_cpu(u1->b),
358 le64_to_cpu(u1->c),
359 le64_to_cpu(u1->d),
360 buffer_info->skb);
361 } else {
362 printk(KERN_INFO "R [0x%03X] %016llX "
363 "%016llX %016llX %016llX %016llX %p", i,
364 le64_to_cpu(u1->a),
365 le64_to_cpu(u1->b),
366 le64_to_cpu(u1->c),
367 le64_to_cpu(u1->d),
368 (u64)buffer_info->dma,
369 buffer_info->skb);
370
371 if (netif_msg_pktdata(adapter))
372 print_hex_dump(KERN_INFO, "",
373 DUMP_PREFIX_ADDRESS, 16, 1,
374 phys_to_virt(buffer_info->dma),
375 adapter->rx_ps_bsize0, true);
376 }
377
378 if (i == rx_ring->next_to_use)
379 printk(KERN_CONT " NTU\n");
380 else if (i == rx_ring->next_to_clean)
381 printk(KERN_CONT " NTC\n");
382 else
383 printk(KERN_CONT "\n");
384 }
385 break;
386 default:
387 case 0:
388 /* Legacy Receive Descriptor Format
389 *
390 * +-----------------------------------------------------+
391 * | Buffer Address [63:0] |
392 * +-----------------------------------------------------+
393 * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
394 * +-----------------------------------------------------+
395 * 63 48 47 40 39 32 31 16 15 0
396 */
397 printk(KERN_INFO "Rl[desc] [address 63:0 ] "
398 "[vl er S cks ln] [bi->dma ] [bi->skb] "
399 "<-- Legacy format\n");
400 for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
401 rx_desc = E1000_RX_DESC(*rx_ring, i);
402 buffer_info = &rx_ring->buffer_info[i];
403 u0 = (struct my_u0 *)rx_desc;
404 printk(KERN_INFO "Rl[0x%03X] %016llX %016llX "
405 "%016llX %p",
406 i, le64_to_cpu(u0->a), le64_to_cpu(u0->b),
407 (u64)buffer_info->dma, buffer_info->skb);
408 if (i == rx_ring->next_to_use)
409 printk(KERN_CONT " NTU\n");
410 else if (i == rx_ring->next_to_clean)
411 printk(KERN_CONT " NTC\n");
412 else
413 printk(KERN_CONT "\n");
414
415 if (netif_msg_pktdata(adapter))
416 print_hex_dump(KERN_INFO, "",
417 DUMP_PREFIX_ADDRESS,
418 16, 1, phys_to_virt(buffer_info->dma),
419 adapter->rx_buffer_len, true);
420 }
421 }
422
423exit:
424 return;
425}
426
69/** 427/**
70 * e1000_desc_unused - calculate if we have unused descriptors 428 * e1000_desc_unused - calculate if we have unused descriptors
71 **/ 429 **/
@@ -178,10 +536,10 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
178 536
179 buffer_info->skb = skb; 537 buffer_info->skb = skb;
180map_skb: 538map_skb:
181 buffer_info->dma = pci_map_single(pdev, skb->data, 539 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
182 adapter->rx_buffer_len, 540 adapter->rx_buffer_len,
183 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
184 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 542 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
185 dev_err(&pdev->dev, "RX DMA map failed\n"); 543 dev_err(&pdev->dev, "RX DMA map failed\n");
186 adapter->rx_dma_failed++; 544 adapter->rx_dma_failed++;
187 break; 545 break;
@@ -190,26 +548,23 @@ map_skb:
190 rx_desc = E1000_RX_DESC(*rx_ring, i); 548 rx_desc = E1000_RX_DESC(*rx_ring, i);
191 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 549 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
192 550
551 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
552 /*
553 * Force memory writes to complete before letting h/w
554 * know there are new descriptors to fetch. (Only
555 * applicable for weak-ordered memory model archs,
556 * such as IA-64).
557 */
558 wmb();
559 writel(i, adapter->hw.hw_addr + rx_ring->tail);
560 }
193 i++; 561 i++;
194 if (i == rx_ring->count) 562 if (i == rx_ring->count)
195 i = 0; 563 i = 0;
196 buffer_info = &rx_ring->buffer_info[i]; 564 buffer_info = &rx_ring->buffer_info[i];
197 } 565 }
198 566
199 if (rx_ring->next_to_use != i) { 567 rx_ring->next_to_use = i;
200 rx_ring->next_to_use = i;
201 if (i-- == 0)
202 i = (rx_ring->count - 1);
203
204 /*
205 * Force memory writes to complete before letting h/w
206 * know there are new descriptors to fetch. (Only
207 * applicable for weak-ordered memory model archs,
208 * such as IA-64).
209 */
210 wmb();
211 writel(i, adapter->hw.hw_addr + rx_ring->tail);
212 }
213} 568}
214 569
215/** 570/**
@@ -247,11 +602,12 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
247 adapter->alloc_rx_buff_failed++; 602 adapter->alloc_rx_buff_failed++;
248 goto no_buffers; 603 goto no_buffers;
249 } 604 }
250 ps_page->dma = pci_map_page(pdev, 605 ps_page->dma = dma_map_page(&pdev->dev,
251 ps_page->page, 606 ps_page->page,
252 0, PAGE_SIZE, 607 0, PAGE_SIZE,
253 PCI_DMA_FROMDEVICE); 608 DMA_FROM_DEVICE);
254 if (pci_dma_mapping_error(pdev, ps_page->dma)) { 609 if (dma_mapping_error(&pdev->dev,
610 ps_page->dma)) {
255 dev_err(&adapter->pdev->dev, 611 dev_err(&adapter->pdev->dev,
256 "RX DMA page map failed\n"); 612 "RX DMA page map failed\n");
257 adapter->rx_dma_failed++; 613 adapter->rx_dma_failed++;
@@ -276,10 +632,10 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
276 } 632 }
277 633
278 buffer_info->skb = skb; 634 buffer_info->skb = skb;
279 buffer_info->dma = pci_map_single(pdev, skb->data, 635 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
280 adapter->rx_ps_bsize0, 636 adapter->rx_ps_bsize0,
281 PCI_DMA_FROMDEVICE); 637 DMA_FROM_DEVICE);
282 if (pci_dma_mapping_error(pdev, buffer_info->dma)) { 638 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
283 dev_err(&pdev->dev, "RX DMA map failed\n"); 639 dev_err(&pdev->dev, "RX DMA map failed\n");
284 adapter->rx_dma_failed++; 640 adapter->rx_dma_failed++;
285 /* cleanup skb */ 641 /* cleanup skb */
@@ -290,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
290 646
291 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); 647 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
292 648
649 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
650 /*
651 * Force memory writes to complete before letting h/w
652 * know there are new descriptors to fetch. (Only
653 * applicable for weak-ordered memory model archs,
654 * such as IA-64).
655 */
656 wmb();
657 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
658 }
659
293 i++; 660 i++;
294 if (i == rx_ring->count) 661 if (i == rx_ring->count)
295 i = 0; 662 i = 0;
@@ -297,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
297 } 664 }
298 665
299no_buffers: 666no_buffers:
300 if (rx_ring->next_to_use != i) { 667 rx_ring->next_to_use = i;
301 rx_ring->next_to_use = i;
302
303 if (!(i--))
304 i = (rx_ring->count - 1);
305
306 /*
307 * Force memory writes to complete before letting h/w
308 * know there are new descriptors to fetch. (Only
309 * applicable for weak-ordered memory model archs,
310 * such as IA-64).
311 */
312 wmb();
313 /*
314 * Hardware increments by 16 bytes, but packet split
315 * descriptors are 32 bytes...so we increment tail
316 * twice as much.
317 */
318 writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
319 }
320} 668}
321 669
322/** 670/**
@@ -366,10 +714,10 @@ check_page:
366 } 714 }
367 715
368 if (!buffer_info->dma) 716 if (!buffer_info->dma)
369 buffer_info->dma = pci_map_page(pdev, 717 buffer_info->dma = dma_map_page(&pdev->dev,
370 buffer_info->page, 0, 718 buffer_info->page, 0,
371 PAGE_SIZE, 719 PAGE_SIZE,
372 PCI_DMA_FROMDEVICE); 720 DMA_FROM_DEVICE);
373 721
374 rx_desc = E1000_RX_DESC(*rx_ring, i); 722 rx_desc = E1000_RX_DESC(*rx_ring, i);
375 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 723 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -443,10 +791,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
443 791
444 cleaned = 1; 792 cleaned = 1;
445 cleaned_count++; 793 cleaned_count++;
446 pci_unmap_single(pdev, 794 dma_unmap_single(&pdev->dev,
447 buffer_info->dma, 795 buffer_info->dma,
448 adapter->rx_buffer_len, 796 adapter->rx_buffer_len,
449 PCI_DMA_FROMDEVICE); 797 DMA_FROM_DEVICE);
450 buffer_info->dma = 0; 798 buffer_info->dma = 0;
451 799
452 length = le16_to_cpu(rx_desc->length); 800 length = le16_to_cpu(rx_desc->length);
@@ -547,12 +895,11 @@ static void e1000_put_txbuf(struct e1000_adapter *adapter,
547{ 895{
548 if (buffer_info->dma) { 896 if (buffer_info->dma) {
549 if (buffer_info->mapped_as_page) 897 if (buffer_info->mapped_as_page)
550 pci_unmap_page(adapter->pdev, buffer_info->dma, 898 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
551 buffer_info->length, PCI_DMA_TODEVICE); 899 buffer_info->length, DMA_TO_DEVICE);
552 else 900 else
553 pci_unmap_single(adapter->pdev, buffer_info->dma, 901 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
554 buffer_info->length, 902 buffer_info->length, DMA_TO_DEVICE);
555 PCI_DMA_TODEVICE);
556 buffer_info->dma = 0; 903 buffer_info->dma = 0;
557 } 904 }
558 if (buffer_info->skb) { 905 if (buffer_info->skb) {
@@ -643,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
643 cleaned = (i == eop); 990 cleaned = (i == eop);
644 991
645 if (cleaned) { 992 if (cleaned) {
646 struct sk_buff *skb = buffer_info->skb; 993 total_tx_packets += buffer_info->segs;
647 unsigned int segs, bytecount; 994 total_tx_bytes += buffer_info->bytecount;
648 segs = skb_shinfo(skb)->gso_segs ?: 1;
649 /* multiply data chunks by size of headers */
650 bytecount = ((segs - 1) * skb_headlen(skb)) +
651 skb->len;
652 total_tx_packets += segs;
653 total_tx_bytes += bytecount;
654 } 995 }
655 996
656 e1000_put_txbuf(adapter, buffer_info); 997 e1000_put_txbuf(adapter, buffer_info);
@@ -753,9 +1094,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
753 1094
754 cleaned = 1; 1095 cleaned = 1;
755 cleaned_count++; 1096 cleaned_count++;
756 pci_unmap_single(pdev, buffer_info->dma, 1097 dma_unmap_single(&pdev->dev, buffer_info->dma,
757 adapter->rx_ps_bsize0, 1098 adapter->rx_ps_bsize0,
758 PCI_DMA_FROMDEVICE); 1099 DMA_FROM_DEVICE);
759 buffer_info->dma = 0; 1100 buffer_info->dma = 0;
760 1101
761 /* see !EOP comment in other rx routine */ 1102 /* see !EOP comment in other rx routine */
@@ -811,13 +1152,13 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
811 * kmap_atomic, so we can't hold the mapping 1152 * kmap_atomic, so we can't hold the mapping
812 * very long 1153 * very long
813 */ 1154 */
814 pci_dma_sync_single_for_cpu(pdev, ps_page->dma, 1155 dma_sync_single_for_cpu(&pdev->dev, ps_page->dma,
815 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1156 PAGE_SIZE, DMA_FROM_DEVICE);
816 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ); 1157 vaddr = kmap_atomic(ps_page->page, KM_SKB_DATA_SOFTIRQ);
817 memcpy(skb_tail_pointer(skb), vaddr, l1); 1158 memcpy(skb_tail_pointer(skb), vaddr, l1);
818 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1159 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
819 pci_dma_sync_single_for_device(pdev, ps_page->dma, 1160 dma_sync_single_for_device(&pdev->dev, ps_page->dma,
820 PAGE_SIZE, PCI_DMA_FROMDEVICE); 1161 PAGE_SIZE, DMA_FROM_DEVICE);
821 1162
822 /* remove the CRC */ 1163 /* remove the CRC */
823 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) 1164 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
@@ -834,8 +1175,8 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
834 break; 1175 break;
835 1176
836 ps_page = &buffer_info->ps_pages[j]; 1177 ps_page = &buffer_info->ps_pages[j];
837 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1178 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
838 PCI_DMA_FROMDEVICE); 1179 DMA_FROM_DEVICE);
839 ps_page->dma = 0; 1180 ps_page->dma = 0;
840 skb_fill_page_desc(skb, j, ps_page->page, 0, length); 1181 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
841 ps_page->page = NULL; 1182 ps_page->page = NULL;
@@ -953,8 +1294,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
953 1294
954 cleaned = true; 1295 cleaned = true;
955 cleaned_count++; 1296 cleaned_count++;
956 pci_unmap_page(pdev, buffer_info->dma, PAGE_SIZE, 1297 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
957 PCI_DMA_FROMDEVICE); 1298 DMA_FROM_DEVICE);
958 buffer_info->dma = 0; 1299 buffer_info->dma = 0;
959 1300
960 length = le16_to_cpu(rx_desc->length); 1301 length = le16_to_cpu(rx_desc->length);
@@ -1090,17 +1431,17 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1090 buffer_info = &rx_ring->buffer_info[i]; 1431 buffer_info = &rx_ring->buffer_info[i];
1091 if (buffer_info->dma) { 1432 if (buffer_info->dma) {
1092 if (adapter->clean_rx == e1000_clean_rx_irq) 1433 if (adapter->clean_rx == e1000_clean_rx_irq)
1093 pci_unmap_single(pdev, buffer_info->dma, 1434 dma_unmap_single(&pdev->dev, buffer_info->dma,
1094 adapter->rx_buffer_len, 1435 adapter->rx_buffer_len,
1095 PCI_DMA_FROMDEVICE); 1436 DMA_FROM_DEVICE);
1096 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) 1437 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1097 pci_unmap_page(pdev, buffer_info->dma, 1438 dma_unmap_page(&pdev->dev, buffer_info->dma,
1098 PAGE_SIZE, 1439 PAGE_SIZE,
1099 PCI_DMA_FROMDEVICE); 1440 DMA_FROM_DEVICE);
1100 else if (adapter->clean_rx == e1000_clean_rx_irq_ps) 1441 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1101 pci_unmap_single(pdev, buffer_info->dma, 1442 dma_unmap_single(&pdev->dev, buffer_info->dma,
1102 adapter->rx_ps_bsize0, 1443 adapter->rx_ps_bsize0,
1103 PCI_DMA_FROMDEVICE); 1444 DMA_FROM_DEVICE);
1104 buffer_info->dma = 0; 1445 buffer_info->dma = 0;
1105 } 1446 }
1106 1447
@@ -1118,8 +1459,8 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
1118 ps_page = &buffer_info->ps_pages[j]; 1459 ps_page = &buffer_info->ps_pages[j];
1119 if (!ps_page->page) 1460 if (!ps_page->page)
1120 break; 1461 break;
1121 pci_unmap_page(pdev, ps_page->dma, PAGE_SIZE, 1462 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1122 PCI_DMA_FROMDEVICE); 1463 DMA_FROM_DEVICE);
1123 ps_page->dma = 0; 1464 ps_page->dma = 0;
1124 put_page(ps_page->page); 1465 put_page(ps_page->page);
1125 ps_page->page = NULL; 1466 ps_page->page = NULL;
@@ -1426,8 +1767,6 @@ void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
1426 pci_disable_msi(adapter->pdev); 1767 pci_disable_msi(adapter->pdev);
1427 adapter->flags &= ~FLAG_MSI_ENABLED; 1768 adapter->flags &= ~FLAG_MSI_ENABLED;
1428 } 1769 }
1429
1430 return;
1431} 1770}
1432 1771
1433/** 1772/**
@@ -1479,8 +1818,6 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1479 /* Don't do anything; this is the system default */ 1818 /* Don't do anything; this is the system default */
1480 break; 1819 break;
1481 } 1820 }
1482
1483 return;
1484} 1821}
1485 1822
1486/** 1823/**
@@ -2185,10 +2522,10 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
2185 } 2522 }
2186} 2523}
2187 2524
2188static void e1000_init_manageability(struct e1000_adapter *adapter) 2525static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2189{ 2526{
2190 struct e1000_hw *hw = &adapter->hw; 2527 struct e1000_hw *hw = &adapter->hw;
2191 u32 manc, manc2h; 2528 u32 manc, manc2h, mdef, i, j;
2192 2529
2193 if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) 2530 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2194 return; 2531 return;
@@ -2202,10 +2539,49 @@ static void e1000_init_manageability(struct e1000_adapter *adapter)
2202 */ 2539 */
2203 manc |= E1000_MANC_EN_MNG2HOST; 2540 manc |= E1000_MANC_EN_MNG2HOST;
2204 manc2h = er32(MANC2H); 2541 manc2h = er32(MANC2H);
2205#define E1000_MNG2HOST_PORT_623 (1 << 5) 2542
2206#define E1000_MNG2HOST_PORT_664 (1 << 6) 2543 switch (hw->mac.type) {
2207 manc2h |= E1000_MNG2HOST_PORT_623; 2544 default:
2208 manc2h |= E1000_MNG2HOST_PORT_664; 2545 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2546 break;
2547 case e1000_82574:
2548 case e1000_82583:
2549 /*
2550 * Check if IPMI pass-through decision filter already exists;
2551 * if so, enable it.
2552 */
2553 for (i = 0, j = 0; i < 8; i++) {
2554 mdef = er32(MDEF(i));
2555
2556 /* Ignore filters with anything other than IPMI ports */
2557 if (mdef & !(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2558 continue;
2559
2560 /* Enable this decision filter in MANC2H */
2561 if (mdef)
2562 manc2h |= (1 << i);
2563
2564 j |= mdef;
2565 }
2566
2567 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2568 break;
2569
2570 /* Create new decision filter in an empty filter */
2571 for (i = 0, j = 0; i < 8; i++)
2572 if (er32(MDEF(i)) == 0) {
2573 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2574 E1000_MDEF_PORT_664));
2575 manc2h |= (1 << 1);
2576 j++;
2577 break;
2578 }
2579
2580 if (!j)
2581 e_warn("Unable to create IPMI pass-through filter\n");
2582 break;
2583 }
2584
2209 ew32(MANC2H, manc2h); 2585 ew32(MANC2H, manc2h);
2210 ew32(MANC, manc); 2586 ew32(MANC, manc);
2211} 2587}
@@ -2524,12 +2900,12 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
2524 * excessive C-state transition latencies result in 2900 * excessive C-state transition latencies result in
2525 * dropped transactions. 2901 * dropped transactions.
2526 */ 2902 */
2527 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2903 pm_qos_update_request(
2528 adapter->netdev->name, 55); 2904 adapter->netdev->pm_qos_req, 55);
2529 } else { 2905 } else {
2530 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, 2906 pm_qos_update_request(
2531 adapter->netdev->name, 2907 adapter->netdev->pm_qos_req,
2532 PM_QOS_DEFAULT_VALUE); 2908 PM_QOS_DEFAULT_VALUE);
2533 } 2909 }
2534 } 2910 }
2535 2911
@@ -2565,7 +2941,7 @@ static void e1000_set_multi(struct net_device *netdev)
2565{ 2941{
2566 struct e1000_adapter *adapter = netdev_priv(netdev); 2942 struct e1000_adapter *adapter = netdev_priv(netdev);
2567 struct e1000_hw *hw = &adapter->hw; 2943 struct e1000_hw *hw = &adapter->hw;
2568 struct dev_mc_list *mc_ptr; 2944 struct netdev_hw_addr *ha;
2569 u8 *mta_list; 2945 u8 *mta_list;
2570 u32 rctl; 2946 u32 rctl;
2571 int i; 2947 int i;
@@ -2597,9 +2973,8 @@ static void e1000_set_multi(struct net_device *netdev)
2597 2973
2598 /* prepare a packed array of only addresses. */ 2974 /* prepare a packed array of only addresses. */
2599 i = 0; 2975 i = 0;
2600 netdev_for_each_mc_addr(mc_ptr, netdev) 2976 netdev_for_each_mc_addr(ha, netdev)
2601 memcpy(mta_list + (i++ * ETH_ALEN), 2977 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2602 mc_ptr->dmi_addr, ETH_ALEN);
2603 2978
2604 e1000_update_mc_addr_list(hw, mta_list, i); 2979 e1000_update_mc_addr_list(hw, mta_list, i);
2605 kfree(mta_list); 2980 kfree(mta_list);
@@ -2621,7 +2996,7 @@ static void e1000_configure(struct e1000_adapter *adapter)
2621 e1000_set_multi(adapter->netdev); 2996 e1000_set_multi(adapter->netdev);
2622 2997
2623 e1000_restore_vlan(adapter); 2998 e1000_restore_vlan(adapter);
2624 e1000_init_manageability(adapter); 2999 e1000_init_manageability_pt(adapter);
2625 3000
2626 e1000_configure_tx(adapter); 3001 e1000_configure_tx(adapter);
2627 e1000_setup_rctl(adapter); 3002 e1000_setup_rctl(adapter);
@@ -2755,6 +3130,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
2755 fc->high_water = 0x5000; 3130 fc->high_water = 0x5000;
2756 fc->low_water = 0x3000; 3131 fc->low_water = 0x3000;
2757 } 3132 }
3133 fc->refresh_time = 0x1000;
2758 } else { 3134 } else {
2759 if ((adapter->flags & FLAG_HAS_ERT) && 3135 if ((adapter->flags & FLAG_HAS_ERT) &&
2760 (adapter->netdev->mtu > ETH_DATA_LEN)) 3136 (adapter->netdev->mtu > ETH_DATA_LEN))
@@ -2792,10 +3168,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
2792 if (mac->ops.init_hw(hw)) 3168 if (mac->ops.init_hw(hw))
2793 e_err("Hardware Error\n"); 3169 e_err("Hardware Error\n");
2794 3170
2795 /* additional part of the flow-control workaround above */
2796 if (hw->mac.type == e1000_pchlan)
2797 ew32(FCRTV_PCH, 0x1000);
2798
2799 e1000_update_mng_vlan(adapter); 3171 e1000_update_mng_vlan(adapter);
2800 3172
2801 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 3173 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -2824,8 +3196,8 @@ int e1000e_up(struct e1000_adapter *adapter)
2824 3196
2825 /* DMA latency requirement to workaround early-receive/jumbo issue */ 3197 /* DMA latency requirement to workaround early-receive/jumbo issue */
2826 if (adapter->flags & FLAG_HAS_ERT) 3198 if (adapter->flags & FLAG_HAS_ERT)
2827 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, 3199 adapter->netdev->pm_qos_req =
2828 adapter->netdev->name, 3200 pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2829 PM_QOS_DEFAULT_VALUE); 3201 PM_QOS_DEFAULT_VALUE);
2830 3202
2831 /* hardware has been reset, we need to reload some things */ 3203 /* hardware has been reset, we need to reload some things */
@@ -2841,7 +3213,11 @@ int e1000e_up(struct e1000_adapter *adapter)
2841 netif_wake_queue(adapter->netdev); 3213 netif_wake_queue(adapter->netdev);
2842 3214
2843 /* fire a link change interrupt to start the watchdog */ 3215 /* fire a link change interrupt to start the watchdog */
2844 ew32(ICS, E1000_ICS_LSC); 3216 if (adapter->msix_entries)
3217 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3218 else
3219 ew32(ICS, E1000_ICS_LSC);
3220
2845 return 0; 3221 return 0;
2846} 3222}
2847 3223
@@ -2887,9 +3263,11 @@ void e1000e_down(struct e1000_adapter *adapter)
2887 e1000_clean_tx_ring(adapter); 3263 e1000_clean_tx_ring(adapter);
2888 e1000_clean_rx_ring(adapter); 3264 e1000_clean_rx_ring(adapter);
2889 3265
2890 if (adapter->flags & FLAG_HAS_ERT) 3266 if (adapter->flags & FLAG_HAS_ERT) {
2891 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, 3267 pm_qos_remove_request(
2892 adapter->netdev->name); 3268 adapter->netdev->pm_qos_req);
3269 adapter->netdev->pm_qos_req = NULL;
3270 }
2893 3271
2894 /* 3272 /*
2895 * TODO: for power management, we could drop the link and 3273 * TODO: for power management, we could drop the link and
@@ -3083,12 +3461,15 @@ static int e1000_open(struct net_device *netdev)
3083{ 3461{
3084 struct e1000_adapter *adapter = netdev_priv(netdev); 3462 struct e1000_adapter *adapter = netdev_priv(netdev);
3085 struct e1000_hw *hw = &adapter->hw; 3463 struct e1000_hw *hw = &adapter->hw;
3464 struct pci_dev *pdev = adapter->pdev;
3086 int err; 3465 int err;
3087 3466
3088 /* disallow open during test */ 3467 /* disallow open during test */
3089 if (test_bit(__E1000_TESTING, &adapter->state)) 3468 if (test_bit(__E1000_TESTING, &adapter->state))
3090 return -EBUSY; 3469 return -EBUSY;
3091 3470
3471 pm_runtime_get_sync(&pdev->dev);
3472
3092 netif_carrier_off(netdev); 3473 netif_carrier_off(netdev);
3093 3474
3094 /* allocate transmit descriptors */ 3475 /* allocate transmit descriptors */
@@ -3101,6 +3482,15 @@ static int e1000_open(struct net_device *netdev)
3101 if (err) 3482 if (err)
3102 goto err_setup_rx; 3483 goto err_setup_rx;
3103 3484
3485 /*
3486 * If AMT is enabled, let the firmware know that the network
3487 * interface is now open and reset the part to a known state.
3488 */
3489 if (adapter->flags & FLAG_HAS_AMT) {
3490 e1000_get_hw_control(adapter);
3491 e1000e_reset(adapter);
3492 }
3493
3104 e1000e_power_up_phy(adapter); 3494 e1000e_power_up_phy(adapter);
3105 3495
3106 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; 3496 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@@ -3109,13 +3499,6 @@ static int e1000_open(struct net_device *netdev)
3109 e1000_update_mng_vlan(adapter); 3499 e1000_update_mng_vlan(adapter);
3110 3500
3111 /* 3501 /*
3112 * If AMT is enabled, let the firmware know that the network
3113 * interface is now open
3114 */
3115 if (adapter->flags & FLAG_HAS_AMT)
3116 e1000_get_hw_control(adapter);
3117
3118 /*
3119 * before we allocate an interrupt, we must be ready to handle it. 3502 * before we allocate an interrupt, we must be ready to handle it.
3120 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 3503 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
3121 * as soon as we call pci_request_irq, so we have to setup our 3504 * as soon as we call pci_request_irq, so we have to setup our
@@ -3149,8 +3532,14 @@ static int e1000_open(struct net_device *netdev)
3149 3532
3150 netif_start_queue(netdev); 3533 netif_start_queue(netdev);
3151 3534
3535 adapter->idle_check = true;
3536 pm_runtime_put(&pdev->dev);
3537
3152 /* fire a link status change interrupt to start the watchdog */ 3538 /* fire a link status change interrupt to start the watchdog */
3153 ew32(ICS, E1000_ICS_LSC); 3539 if (adapter->msix_entries)
3540 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
3541 else
3542 ew32(ICS, E1000_ICS_LSC);
3154 3543
3155 return 0; 3544 return 0;
3156 3545
@@ -3162,6 +3551,7 @@ err_setup_rx:
3162 e1000e_free_tx_resources(adapter); 3551 e1000e_free_tx_resources(adapter);
3163err_setup_tx: 3552err_setup_tx:
3164 e1000e_reset(adapter); 3553 e1000e_reset(adapter);
3554 pm_runtime_put_sync(&pdev->dev);
3165 3555
3166 return err; 3556 return err;
3167} 3557}
@@ -3180,11 +3570,17 @@ err_setup_tx:
3180static int e1000_close(struct net_device *netdev) 3570static int e1000_close(struct net_device *netdev)
3181{ 3571{
3182 struct e1000_adapter *adapter = netdev_priv(netdev); 3572 struct e1000_adapter *adapter = netdev_priv(netdev);
3573 struct pci_dev *pdev = adapter->pdev;
3183 3574
3184 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3575 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3185 e1000e_down(adapter); 3576
3577 pm_runtime_get_sync(&pdev->dev);
3578
3579 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3580 e1000e_down(adapter);
3581 e1000_free_irq(adapter);
3582 }
3186 e1000_power_down_phy(adapter); 3583 e1000_power_down_phy(adapter);
3187 e1000_free_irq(adapter);
3188 3584
3189 e1000e_free_tx_resources(adapter); 3585 e1000e_free_tx_resources(adapter);
3190 e1000e_free_rx_resources(adapter); 3586 e1000e_free_rx_resources(adapter);
@@ -3206,6 +3602,8 @@ static int e1000_close(struct net_device *netdev)
3206 if (adapter->flags & FLAG_HAS_AMT) 3602 if (adapter->flags & FLAG_HAS_AMT)
3207 e1000_release_hw_control(adapter); 3603 e1000_release_hw_control(adapter);
3208 3604
3605 pm_runtime_put_sync(&pdev->dev);
3606
3209 return 0; 3607 return 0;
3210} 3608}
3211/** 3609/**
@@ -3550,6 +3948,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3550 3948
3551 link = e1000e_has_link(adapter); 3949 link = e1000e_has_link(adapter);
3552 if ((netif_carrier_ok(netdev)) && link) { 3950 if ((netif_carrier_ok(netdev)) && link) {
3951 /* Cancel scheduled suspend requests. */
3952 pm_runtime_resume(netdev->dev.parent);
3953
3553 e1000e_enable_receives(adapter); 3954 e1000e_enable_receives(adapter);
3554 goto link_up; 3955 goto link_up;
3555 } 3956 }
@@ -3561,6 +3962,10 @@ static void e1000_watchdog_task(struct work_struct *work)
3561 if (link) { 3962 if (link) {
3562 if (!netif_carrier_ok(netdev)) { 3963 if (!netif_carrier_ok(netdev)) {
3563 bool txb2b = 1; 3964 bool txb2b = 1;
3965
3966 /* Cancel scheduled suspend requests. */
3967 pm_runtime_resume(netdev->dev.parent);
3968
3564 /* update snapshot of PHY registers on LSC */ 3969 /* update snapshot of PHY registers on LSC */
3565 e1000_phy_read_status(adapter); 3970 e1000_phy_read_status(adapter);
3566 mac->ops.get_link_up_info(&adapter->hw, 3971 mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +4075,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3670 4075
3671 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 4076 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3672 schedule_work(&adapter->reset_task); 4077 schedule_work(&adapter->reset_task);
4078 else
4079 pm_schedule_suspend(netdev->dev.parent,
4080 LINK_TIMEOUT);
3673 } 4081 }
3674 } 4082 }
3675 4083
@@ -3705,6 +4113,22 @@ link_up:
3705 } 4113 }
3706 } 4114 }
3707 4115
4116 /* Simple mode for Interrupt Throttle Rate (ITR) */
4117 if (adapter->itr_setting == 4) {
4118 /*
4119 * Symmetric Tx/Rx gets a reduced ITR=2000;
4120 * Total asymmetrical Tx or Rx gets ITR=8000;
4121 * everyone else is between 2000-8000.
4122 */
4123 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
4124 u32 dif = (adapter->gotc > adapter->gorc ?
4125 adapter->gotc - adapter->gorc :
4126 adapter->gorc - adapter->gotc) / 10000;
4127 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
4128
4129 ew32(ITR, 1000000000 / (itr * 256));
4130 }
4131
3708 /* Cause software interrupt to ensure Rx ring is cleaned */ 4132 /* Cause software interrupt to ensure Rx ring is cleaned */
3709 if (adapter->msix_entries) 4133 if (adapter->msix_entries)
3710 ew32(ICS, adapter->rx_ring->ims_val); 4134 ew32(ICS, adapter->rx_ring->ims_val);
@@ -3879,7 +4303,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3879 struct e1000_buffer *buffer_info; 4303 struct e1000_buffer *buffer_info;
3880 unsigned int len = skb_headlen(skb); 4304 unsigned int len = skb_headlen(skb);
3881 unsigned int offset = 0, size, count = 0, i; 4305 unsigned int offset = 0, size, count = 0, i;
3882 unsigned int f; 4306 unsigned int f, bytecount, segs;
3883 4307
3884 i = tx_ring->next_to_use; 4308 i = tx_ring->next_to_use;
3885 4309
@@ -3890,10 +4314,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3890 buffer_info->length = size; 4314 buffer_info->length = size;
3891 buffer_info->time_stamp = jiffies; 4315 buffer_info->time_stamp = jiffies;
3892 buffer_info->next_to_watch = i; 4316 buffer_info->next_to_watch = i;
3893 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 4317 buffer_info->dma = dma_map_single(&pdev->dev,
3894 size, PCI_DMA_TODEVICE); 4318 skb->data + offset,
4319 size, DMA_TO_DEVICE);
3895 buffer_info->mapped_as_page = false; 4320 buffer_info->mapped_as_page = false;
3896 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4321 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3897 goto dma_error; 4322 goto dma_error;
3898 4323
3899 len -= size; 4324 len -= size;
@@ -3925,11 +4350,11 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3925 buffer_info->length = size; 4350 buffer_info->length = size;
3926 buffer_info->time_stamp = jiffies; 4351 buffer_info->time_stamp = jiffies;
3927 buffer_info->next_to_watch = i; 4352 buffer_info->next_to_watch = i;
3928 buffer_info->dma = pci_map_page(pdev, frag->page, 4353 buffer_info->dma = dma_map_page(&pdev->dev, frag->page,
3929 offset, size, 4354 offset, size,
3930 PCI_DMA_TODEVICE); 4355 DMA_TO_DEVICE);
3931 buffer_info->mapped_as_page = true; 4356 buffer_info->mapped_as_page = true;
3932 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 4357 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
3933 goto dma_error; 4358 goto dma_error;
3934 4359
3935 len -= size; 4360 len -= size;
@@ -3938,7 +4363,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
3938 } 4363 }
3939 } 4364 }
3940 4365
4366 segs = skb_shinfo(skb)->gso_segs ?: 1;
4367 /* multiply data chunks by size of headers */
4368 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
4369
3941 tx_ring->buffer_info[i].skb = skb; 4370 tx_ring->buffer_info[i].skb = skb;
4371 tx_ring->buffer_info[i].segs = segs;
4372 tx_ring->buffer_info[i].bytecount = bytecount;
3942 tx_ring->buffer_info[first].next_to_watch = i; 4373 tx_ring->buffer_info[first].next_to_watch = i;
3943 4374
3944 return count; 4375 return count;
@@ -4105,7 +4536,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4105 unsigned int max_per_txd = E1000_MAX_PER_TXD; 4536 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4106 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4537 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4107 unsigned int tx_flags = 0; 4538 unsigned int tx_flags = 0;
4108 unsigned int len = skb->len - skb->data_len; 4539 unsigned int len = skb_headlen(skb);
4109 unsigned int nr_frags; 4540 unsigned int nr_frags;
4110 unsigned int mss; 4541 unsigned int mss;
4111 int count = 0; 4542 int count = 0;
@@ -4155,7 +4586,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4155 dev_kfree_skb_any(skb); 4586 dev_kfree_skb_any(skb);
4156 return NETDEV_TX_OK; 4587 return NETDEV_TX_OK;
4157 } 4588 }
4158 len = skb->len - skb->data_len; 4589 len = skb_headlen(skb);
4159 } 4590 }
4160 } 4591 }
4161 4592
@@ -4241,6 +4672,8 @@ static void e1000_reset_task(struct work_struct *work)
4241 struct e1000_adapter *adapter; 4672 struct e1000_adapter *adapter;
4242 adapter = container_of(work, struct e1000_adapter, reset_task); 4673 adapter = container_of(work, struct e1000_adapter, reset_task);
4243 4674
4675 e1000e_dump(adapter);
4676 e_err("Reset adapter\n");
4244 e1000e_reinit_locked(adapter); 4677 e1000e_reinit_locked(adapter);
4245} 4678}
4246 4679
@@ -4283,6 +4716,14 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4283 return -EINVAL; 4716 return -EINVAL;
4284 } 4717 }
4285 4718
4719 /* 82573 Errata 17 */
4720 if (((adapter->hw.mac.type == e1000_82573) ||
4721 (adapter->hw.mac.type == e1000_82574)) &&
4722 (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN)) {
4723 adapter->flags2 |= FLAG2_DISABLE_ASPM_L1;
4724 e1000e_disable_aspm(adapter->pdev, PCIE_LINK_STATE_L1);
4725 }
4726
4286 while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) 4727 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4287 msleep(1); 4728 msleep(1);
4288 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ 4729 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
@@ -4467,13 +4908,15 @@ out:
4467 return retval; 4908 return retval;
4468} 4909}
4469 4910
4470static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4911static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
4912 bool runtime)
4471{ 4913{
4472 struct net_device *netdev = pci_get_drvdata(pdev); 4914 struct net_device *netdev = pci_get_drvdata(pdev);
4473 struct e1000_adapter *adapter = netdev_priv(netdev); 4915 struct e1000_adapter *adapter = netdev_priv(netdev);
4474 struct e1000_hw *hw = &adapter->hw; 4916 struct e1000_hw *hw = &adapter->hw;
4475 u32 ctrl, ctrl_ext, rctl, status; 4917 u32 ctrl, ctrl_ext, rctl, status;
4476 u32 wufc = adapter->wol; 4918 /* Runtime suspend should only enable wakeup for link changes */
4919 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
4477 int retval = 0; 4920 int retval = 0;
4478 4921
4479 netif_device_detach(netdev); 4922 netif_device_detach(netdev);
@@ -4605,45 +5048,51 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
4605 } 5048 }
4606} 5049}
4607 5050
4608static void e1000e_disable_l1aspm(struct pci_dev *pdev) 5051#ifdef CONFIG_PCIEASPM
5052static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
5053{
5054 pci_disable_link_state(pdev, state);
5055}
5056#else
5057static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4609{ 5058{
4610 int pos; 5059 int pos;
4611 u16 val; 5060 u16 reg16;
4612 5061
4613 /* 5062 /*
4614 * 82573 workaround - disable L1 ASPM on mobile chipsets 5063 * Both device and parent should have the same ASPM setting.
4615 * 5064 * Disable ASPM in downstream component first and then upstream.
4616 * L1 ASPM on various mobile (ich7) chipsets do not behave properly
4617 * resulting in lost data or garbage information on the pci-e link
4618 * level. This could result in (false) bad EEPROM checksum errors,
4619 * long ping times (up to 2s) or even a system freeze/hang.
4620 *
4621 * Unfortunately this feature saves about 1W power consumption when
4622 * active.
4623 */ 5065 */
4624 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 5066 pos = pci_pcie_cap(pdev);
4625 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &val); 5067 pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
4626 if (val & 0x2) { 5068 reg16 &= ~state;
4627 dev_warn(&pdev->dev, "Disabling L1 ASPM\n"); 5069 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
4628 val &= ~0x2; 5070
4629 pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, val); 5071 if (!pdev->bus->self)
4630 } 5072 return;
4631}
4632 5073
4633#ifdef CONFIG_PM 5074 pos = pci_pcie_cap(pdev->bus->self);
4634static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 5075 pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
5076 reg16 &= ~state;
5077 pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
5078}
5079#endif
5080void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
4635{ 5081{
4636 int retval; 5082 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
4637 bool wake; 5083 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
5084 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
4638 5085
4639 retval = __e1000_shutdown(pdev, &wake); 5086 __e1000e_disable_aspm(pdev, state);
4640 if (!retval) 5087}
4641 e1000_complete_shutdown(pdev, true, wake);
4642 5088
4643 return retval; 5089#ifdef CONFIG_PM_OPS
5090static bool e1000e_pm_ready(struct e1000_adapter *adapter)
5091{
5092 return !!adapter->tx_ring->buffer_info;
4644} 5093}
4645 5094
4646static int e1000_resume(struct pci_dev *pdev) 5095static int __e1000_resume(struct pci_dev *pdev)
4647{ 5096{
4648 struct net_device *netdev = pci_get_drvdata(pdev); 5097 struct net_device *netdev = pci_get_drvdata(pdev);
4649 struct e1000_adapter *adapter = netdev_priv(netdev); 5098 struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -4653,19 +5102,8 @@ static int e1000_resume(struct pci_dev *pdev)
4653 pci_set_power_state(pdev, PCI_D0); 5102 pci_set_power_state(pdev, PCI_D0);
4654 pci_restore_state(pdev); 5103 pci_restore_state(pdev);
4655 pci_save_state(pdev); 5104 pci_save_state(pdev);
4656 e1000e_disable_l1aspm(pdev); 5105 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
4657 5106 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4658 err = pci_enable_device_mem(pdev);
4659 if (err) {
4660 dev_err(&pdev->dev,
4661 "Cannot enable PCI device from suspend\n");
4662 return err;
4663 }
4664
4665 pci_set_master(pdev);
4666
4667 pci_enable_wake(pdev, PCI_D3hot, 0);
4668 pci_enable_wake(pdev, PCI_D3cold, 0);
4669 5107
4670 e1000e_set_interrupt_capability(adapter); 5108 e1000e_set_interrupt_capability(adapter);
4671 if (netif_running(netdev)) { 5109 if (netif_running(netdev)) {
@@ -4707,7 +5145,7 @@ static int e1000_resume(struct pci_dev *pdev)
4707 5145
4708 e1000e_reset(adapter); 5146 e1000e_reset(adapter);
4709 5147
4710 e1000_init_manageability(adapter); 5148 e1000_init_manageability_pt(adapter);
4711 5149
4712 if (netif_running(netdev)) 5150 if (netif_running(netdev))
4713 e1000e_up(adapter); 5151 e1000e_up(adapter);
@@ -4724,13 +5162,88 @@ static int e1000_resume(struct pci_dev *pdev)
4724 5162
4725 return 0; 5163 return 0;
4726} 5164}
4727#endif 5165
5166#ifdef CONFIG_PM_SLEEP
5167static int e1000_suspend(struct device *dev)
5168{
5169 struct pci_dev *pdev = to_pci_dev(dev);
5170 int retval;
5171 bool wake;
5172
5173 retval = __e1000_shutdown(pdev, &wake, false);
5174 if (!retval)
5175 e1000_complete_shutdown(pdev, true, wake);
5176
5177 return retval;
5178}
5179
5180static int e1000_resume(struct device *dev)
5181{
5182 struct pci_dev *pdev = to_pci_dev(dev);
5183 struct net_device *netdev = pci_get_drvdata(pdev);
5184 struct e1000_adapter *adapter = netdev_priv(netdev);
5185
5186 if (e1000e_pm_ready(adapter))
5187 adapter->idle_check = true;
5188
5189 return __e1000_resume(pdev);
5190}
5191#endif /* CONFIG_PM_SLEEP */
5192
5193#ifdef CONFIG_PM_RUNTIME
5194static int e1000_runtime_suspend(struct device *dev)
5195{
5196 struct pci_dev *pdev = to_pci_dev(dev);
5197 struct net_device *netdev = pci_get_drvdata(pdev);
5198 struct e1000_adapter *adapter = netdev_priv(netdev);
5199
5200 if (e1000e_pm_ready(adapter)) {
5201 bool wake;
5202
5203 __e1000_shutdown(pdev, &wake, true);
5204 }
5205
5206 return 0;
5207}
5208
5209static int e1000_idle(struct device *dev)
5210{
5211 struct pci_dev *pdev = to_pci_dev(dev);
5212 struct net_device *netdev = pci_get_drvdata(pdev);
5213 struct e1000_adapter *adapter = netdev_priv(netdev);
5214
5215 if (!e1000e_pm_ready(adapter))
5216 return 0;
5217
5218 if (adapter->idle_check) {
5219 adapter->idle_check = false;
5220 if (!e1000e_has_link(adapter))
5221 pm_schedule_suspend(dev, MSEC_PER_SEC);
5222 }
5223
5224 return -EBUSY;
5225}
5226
5227static int e1000_runtime_resume(struct device *dev)
5228{
5229 struct pci_dev *pdev = to_pci_dev(dev);
5230 struct net_device *netdev = pci_get_drvdata(pdev);
5231 struct e1000_adapter *adapter = netdev_priv(netdev);
5232
5233 if (!e1000e_pm_ready(adapter))
5234 return 0;
5235
5236 adapter->idle_check = !dev->power.runtime_auto;
5237 return __e1000_resume(pdev);
5238}
5239#endif /* CONFIG_PM_RUNTIME */
5240#endif /* CONFIG_PM_OPS */
4728 5241
4729static void e1000_shutdown(struct pci_dev *pdev) 5242static void e1000_shutdown(struct pci_dev *pdev)
4730{ 5243{
4731 bool wake = false; 5244 bool wake = false;
4732 5245
4733 __e1000_shutdown(pdev, &wake); 5246 __e1000_shutdown(pdev, &wake, false);
4734 5247
4735 if (system_state == SYSTEM_POWER_OFF) 5248 if (system_state == SYSTEM_POWER_OFF)
4736 e1000_complete_shutdown(pdev, false, wake); 5249 e1000_complete_shutdown(pdev, false, wake);
@@ -4795,7 +5308,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4795 int err; 5308 int err;
4796 pci_ers_result_t result; 5309 pci_ers_result_t result;
4797 5310
4798 e1000e_disable_l1aspm(pdev); 5311 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
5312 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4799 err = pci_enable_device_mem(pdev); 5313 err = pci_enable_device_mem(pdev);
4800 if (err) { 5314 if (err) {
4801 dev_err(&pdev->dev, 5315 dev_err(&pdev->dev,
@@ -4803,8 +5317,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4803 result = PCI_ERS_RESULT_DISCONNECT; 5317 result = PCI_ERS_RESULT_DISCONNECT;
4804 } else { 5318 } else {
4805 pci_set_master(pdev); 5319 pci_set_master(pdev);
5320 pdev->state_saved = true;
4806 pci_restore_state(pdev); 5321 pci_restore_state(pdev);
4807 pci_save_state(pdev);
4808 5322
4809 pci_enable_wake(pdev, PCI_D3hot, 0); 5323 pci_enable_wake(pdev, PCI_D3hot, 0);
4810 pci_enable_wake(pdev, PCI_D3cold, 0); 5324 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -4832,7 +5346,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
4832 struct net_device *netdev = pci_get_drvdata(pdev); 5346 struct net_device *netdev = pci_get_drvdata(pdev);
4833 struct e1000_adapter *adapter = netdev_priv(netdev); 5347 struct e1000_adapter *adapter = netdev_priv(netdev);
4834 5348
4835 e1000_init_manageability(adapter); 5349 e1000_init_manageability_pt(adapter);
4836 5350
4837 if (netif_running(netdev)) { 5351 if (netif_running(netdev)) {
4838 if (e1000e_up(adapter)) { 5352 if (e1000e_up(adapter)) {
@@ -4889,13 +5403,6 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
4889 dev_warn(&adapter->pdev->dev, 5403 dev_warn(&adapter->pdev->dev,
4890 "Warning: detected DSPD enabled in EEPROM\n"); 5404 "Warning: detected DSPD enabled in EEPROM\n");
4891 } 5405 }
4892
4893 ret_val = e1000_read_nvm(hw, NVM_INIT_3GIO_3, 1, &buf);
4894 if (!ret_val && (le16_to_cpu(buf) & (3 << 2))) {
4895 /* ASPM enable */
4896 dev_warn(&adapter->pdev->dev,
4897 "Warning: detected ASPM enabled in EEPROM\n");
4898 }
4899} 5406}
4900 5407
4901static const struct net_device_ops e1000e_netdev_ops = { 5408static const struct net_device_ops e1000e_netdev_ops = {
@@ -4944,23 +5451,24 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4944 u16 eeprom_data = 0; 5451 u16 eeprom_data = 0;
4945 u16 eeprom_apme_mask = E1000_EEPROM_APME; 5452 u16 eeprom_apme_mask = E1000_EEPROM_APME;
4946 5453
4947 e1000e_disable_l1aspm(pdev); 5454 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
5455 e1000e_disable_aspm(pdev, PCIE_LINK_STATE_L1);
4948 5456
4949 err = pci_enable_device_mem(pdev); 5457 err = pci_enable_device_mem(pdev);
4950 if (err) 5458 if (err)
4951 return err; 5459 return err;
4952 5460
4953 pci_using_dac = 0; 5461 pci_using_dac = 0;
4954 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 5462 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4955 if (!err) { 5463 if (!err) {
4956 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 5464 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4957 if (!err) 5465 if (!err)
4958 pci_using_dac = 1; 5466 pci_using_dac = 1;
4959 } else { 5467 } else {
4960 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5468 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4961 if (err) { 5469 if (err) {
4962 err = pci_set_consistent_dma_mask(pdev, 5470 err = dma_set_coherent_mask(&pdev->dev,
4963 DMA_BIT_MASK(32)); 5471 DMA_BIT_MASK(32));
4964 if (err) { 5472 if (err) {
4965 dev_err(&pdev->dev, "No usable DMA " 5473 dev_err(&pdev->dev, "No usable DMA "
4966 "configuration, aborting\n"); 5474 "configuration, aborting\n");
@@ -4991,6 +5499,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4991 5499
4992 SET_NETDEV_DEV(netdev, &pdev->dev); 5500 SET_NETDEV_DEV(netdev, &pdev->dev);
4993 5501
5502 netdev->irq = pdev->irq;
5503
4994 pci_set_drvdata(pdev, netdev); 5504 pci_set_drvdata(pdev, netdev);
4995 adapter = netdev_priv(netdev); 5505 adapter = netdev_priv(netdev);
4996 hw = &adapter->hw; 5506 hw = &adapter->hw;
@@ -5211,6 +5721,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5211 5721
5212 e1000_print_device_info(adapter); 5722 e1000_print_device_info(adapter);
5213 5723
5724 if (pci_dev_run_wake(pdev)) {
5725 pm_runtime_set_active(&pdev->dev);
5726 pm_runtime_enable(&pdev->dev);
5727 }
5728 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5729
5214 return 0; 5730 return 0;
5215 5731
5216err_register: 5732err_register:
@@ -5253,12 +5769,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5253{ 5769{
5254 struct net_device *netdev = pci_get_drvdata(pdev); 5770 struct net_device *netdev = pci_get_drvdata(pdev);
5255 struct e1000_adapter *adapter = netdev_priv(netdev); 5771 struct e1000_adapter *adapter = netdev_priv(netdev);
5772 bool down = test_bit(__E1000_DOWN, &adapter->state);
5773
5774 pm_runtime_get_sync(&pdev->dev);
5256 5775
5257 /* 5776 /*
5258 * flush_scheduled work may reschedule our watchdog task, so 5777 * flush_scheduled work may reschedule our watchdog task, so
5259 * explicitly disable watchdog tasks from being rescheduled 5778 * explicitly disable watchdog tasks from being rescheduled
5260 */ 5779 */
5261 set_bit(__E1000_DOWN, &adapter->state); 5780 if (!down)
5781 set_bit(__E1000_DOWN, &adapter->state);
5262 del_timer_sync(&adapter->watchdog_timer); 5782 del_timer_sync(&adapter->watchdog_timer);
5263 del_timer_sync(&adapter->phy_info_timer); 5783 del_timer_sync(&adapter->phy_info_timer);
5264 5784
@@ -5272,8 +5792,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5272 if (!(netdev->flags & IFF_UP)) 5792 if (!(netdev->flags & IFF_UP))
5273 e1000_power_down_phy(adapter); 5793 e1000_power_down_phy(adapter);
5274 5794
5795 /* Don't lie to e1000_close() down the road. */
5796 if (!down)
5797 clear_bit(__E1000_DOWN, &adapter->state);
5275 unregister_netdev(netdev); 5798 unregister_netdev(netdev);
5276 5799
5800 if (pci_dev_run_wake(pdev)) {
5801 pm_runtime_disable(&pdev->dev);
5802 pm_runtime_set_suspended(&pdev->dev);
5803 }
5804 pm_runtime_put_noidle(&pdev->dev);
5805
5277 /* 5806 /*
5278 * Release control of h/w to f/w. If f/w is AMT enabled, this 5807 * Release control of h/w to f/w. If f/w is AMT enabled, this
5279 * would have already happened in close and is redundant. 5808 * would have already happened in close and is redundant.
@@ -5363,6 +5892,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5363 5892
5364 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, 5893 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
5365 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, 5894 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
5895 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
5366 5896
5367 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, 5897 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
5368 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, 5898 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
@@ -5373,16 +5903,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5373}; 5903};
5374MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5904MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5375 5905
5906#ifdef CONFIG_PM_OPS
5907static const struct dev_pm_ops e1000_pm_ops = {
5908 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
5909 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
5910 e1000_runtime_resume, e1000_idle)
5911};
5912#endif
5913
5376/* PCI Device API Driver */ 5914/* PCI Device API Driver */
5377static struct pci_driver e1000_driver = { 5915static struct pci_driver e1000_driver = {
5378 .name = e1000e_driver_name, 5916 .name = e1000e_driver_name,
5379 .id_table = e1000_pci_tbl, 5917 .id_table = e1000_pci_tbl,
5380 .probe = e1000_probe, 5918 .probe = e1000_probe,
5381 .remove = __devexit_p(e1000_remove), 5919 .remove = __devexit_p(e1000_remove),
5382#ifdef CONFIG_PM 5920#ifdef CONFIG_PM_OPS
5383 /* Power Management Hooks */ 5921 .driver.pm = &e1000_pm_ops,
5384 .suspend = e1000_suspend,
5385 .resume = e1000_resume,
5386#endif 5922#endif
5387 .shutdown = e1000_shutdown, 5923 .shutdown = e1000_shutdown,
5388 .err_handler = &e1000_err_handler 5924 .err_handler = &e1000_err_handler
@@ -5397,10 +5933,9 @@ static struct pci_driver e1000_driver = {
5397static int __init e1000_init_module(void) 5933static int __init e1000_init_module(void)
5398{ 5934{
5399 int ret; 5935 int ret;
5400 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 5936 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5401 e1000e_driver_name, e1000e_driver_version); 5937 e1000e_driver_version);
5402 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", 5938 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
5403 e1000e_driver_name);
5404 ret = pci_register_driver(&e1000_driver); 5939 ret = pci_register_driver(&e1000_driver);
5405 5940
5406 return ret; 5941 return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae5..a150e48a117f 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
248 } 248 }
249 249
250 { /* Transmit Interrupt Delay */ 250 { /* Transmit Interrupt Delay */
251 const struct e1000_option opt = { 251 static const struct e1000_option opt = {
252 .type = range_option, 252 .type = range_option,
253 .name = "Transmit Interrupt Delay", 253 .name = "Transmit Interrupt Delay",
254 .err = "using default of " 254 .err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
267 } 267 }
268 } 268 }
269 { /* Transmit Absolute Interrupt Delay */ 269 { /* Transmit Absolute Interrupt Delay */
270 const struct e1000_option opt = { 270 static const struct e1000_option opt = {
271 .type = range_option, 271 .type = range_option,
272 .name = "Transmit Absolute Interrupt Delay", 272 .name = "Transmit Absolute Interrupt Delay",
273 .err = "using default of " 273 .err = "using default of "
@@ -286,7 +286,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
286 } 286 }
287 } 287 }
288 { /* Receive Interrupt Delay */ 288 { /* Receive Interrupt Delay */
289 struct e1000_option opt = { 289 static struct e1000_option opt = {
290 .type = range_option, 290 .type = range_option,
291 .name = "Receive Interrupt Delay", 291 .name = "Receive Interrupt Delay",
292 .err = "using default of " 292 .err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
305 } 305 }
306 } 306 }
307 { /* Receive Absolute Interrupt Delay */ 307 { /* Receive Absolute Interrupt Delay */
308 const struct e1000_option opt = { 308 static const struct e1000_option opt = {
309 .type = range_option, 309 .type = range_option,
310 .name = "Receive Absolute Interrupt Delay", 310 .name = "Receive Absolute Interrupt Delay",
311 .err = "using default of " 311 .err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
324 } 324 }
325 } 325 }
326 { /* Interrupt Throttling Rate */ 326 { /* Interrupt Throttling Rate */
327 const struct e1000_option opt = { 327 static const struct e1000_option opt = {
328 .type = range_option, 328 .type = range_option,
329 .name = "Interrupt Throttling Rate (ints/sec)", 329 .name = "Interrupt Throttling Rate (ints/sec)",
330 .err = "using default of " 330 .err = "using default of "
@@ -351,6 +351,11 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
351 adapter->itr_setting = adapter->itr; 351 adapter->itr_setting = adapter->itr;
352 adapter->itr = 20000; 352 adapter->itr = 20000;
353 break; 353 break;
354 case 4:
355 e_info("%s set to simplified (2000-8000 ints) "
356 "mode\n", opt.name);
357 adapter->itr_setting = 4;
358 break;
354 default: 359 default:
355 /* 360 /*
356 * Save the setting, because the dynamic bits 361 * Save the setting, because the dynamic bits
@@ -381,7 +386,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
381 } 386 }
382 } 387 }
383 { /* Interrupt Mode */ 388 { /* Interrupt Mode */
384 struct e1000_option opt = { 389 static struct e1000_option opt = {
385 .type = range_option, 390 .type = range_option,
386 .name = "Interrupt Mode", 391 .name = "Interrupt Mode",
387 .err = "defaulting to 2 (MSI-X)", 392 .err = "defaulting to 2 (MSI-X)",
@@ -399,7 +404,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
399 } 404 }
400 } 405 }
401 { /* Smart Power Down */ 406 { /* Smart Power Down */
402 const struct e1000_option opt = { 407 static const struct e1000_option opt = {
403 .type = enable_option, 408 .type = enable_option,
404 .name = "PHY Smart Power Down", 409 .name = "PHY Smart Power Down",
405 .err = "defaulting to Disabled", 410 .err = "defaulting to Disabled",
@@ -415,7 +420,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
415 } 420 }
416 } 421 }
417 { /* CRC Stripping */ 422 { /* CRC Stripping */
418 const struct e1000_option opt = { 423 static const struct e1000_option opt = {
419 .type = enable_option, 424 .type = enable_option,
420 .name = "CRC Stripping", 425 .name = "CRC Stripping",
421 .err = "defaulting to enabled", 426 .err = "defaulting to enabled",
@@ -432,7 +437,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
432 } 437 }
433 } 438 }
434 { /* Kumeran Lock Loss Workaround */ 439 { /* Kumeran Lock Loss Workaround */
435 const struct e1000_option opt = { 440 static const struct e1000_option opt = {
436 .type = enable_option, 441 .type = enable_option,
437 .name = "Kumeran Lock Loss Workaround", 442 .name = "Kumeran Lock Loss Workaround",
438 .err = "defaulting to Enabled", 443 .err = "defaulting to Enabled",
@@ -452,7 +457,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
452 } 457 }
453 } 458 }
454 { /* Write-protect NVM */ 459 { /* Write-protect NVM */
455 const struct e1000_option opt = { 460 static const struct e1000_option opt = {
456 .type = enable_option, 461 .type = enable_option,
457 .name = "Write-protect NVM", 462 .name = "Write-protect NVM",
458 .err = "defaulting to Enabled", 463 .err = "defaulting to Enabled",
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 7f3ceb9dad6a..b4ac82d51b20 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -3116,9 +3116,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3116 * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY 3116 * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
3117 * @hw: pointer to the HW structure 3117 * @hw: pointer to the HW structure
3118 * 3118 *
3119 * Calls the PHY setup function to force speed and duplex. Clears the 3119 * Calls the PHY setup function to force speed and duplex.
3120 * auto-crossover to force MDI manually. Waits for link and returns
3121 * successful if link up is successful, else -E1000_ERR_PHY (-2).
3122 **/ 3120 **/
3123s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) 3121s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3124{ 3122{
@@ -3137,23 +3135,6 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3137 if (ret_val) 3135 if (ret_val)
3138 goto out; 3136 goto out;
3139 3137
3140 /*
3141 * Clear Auto-Crossover to force MDI manually. 82577 requires MDI
3142 * forced whenever speed and duplex are forced.
3143 */
3144 ret_val = phy->ops.read_reg(hw, I82577_PHY_CTRL_2, &phy_data);
3145 if (ret_val)
3146 goto out;
3147
3148 phy_data &= ~I82577_PHY_CTRL2_AUTO_MDIX;
3149 phy_data &= ~I82577_PHY_CTRL2_FORCE_MDI_MDIX;
3150
3151 ret_val = phy->ops.write_reg(hw, I82577_PHY_CTRL_2, phy_data);
3152 if (ret_val)
3153 goto out;
3154
3155 e_dbg("I82577_PHY_CTRL_2: %X\n", phy_data);
3156
3157 udelay(1); 3138 udelay(1);
3158 3139
3159 if (phy->autoneg_wait_to_complete) { 3140 if (phy->autoneg_wait_to_complete) {
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index ca93c9a9d372..06e72fbef862 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -328,7 +328,6 @@ e21_reset_8390(struct net_device *dev)
328 /* Set up the ASIC registers, just in case something changed them. */ 328 /* Set up the ASIC registers, just in case something changed them. */
329 329
330 if (ei_debug > 1) printk("reset done\n"); 330 if (ei_debug > 1) printk("reset done\n");
331 return;
332} 331}
333 332
334/* Grab the 8390 specific header. We put the 2k window so the header page 333/* Grab the 8390 specific header. We put the 2k window so the header page
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdbfa003..8d97f168f018 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
645 if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); 645 if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
646 if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); 646 if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
647 if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); 647 if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
648 printk(KERN_DEBUG "port(s) \n"); 648 printk(KERN_DEBUG "port(s)\n");
649 649
650 Word = lp->word[6]; 650 Word = lp->word[6];
651 printk(KERN_DEBUG "Word6:\n"); 651 printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
765 /* Grab the region so we can find another board if autoIRQ fails. */ 765 /* Grab the region so we can find another board if autoIRQ fails. */
766 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { 766 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
767 if (!autoprobe) 767 if (!autoprobe)
768 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", 768 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
769 ioaddr); 769 ioaddr);
770 return -EBUSY; 770 return -EBUSY;
771 } 771 }
@@ -1161,8 +1161,7 @@ static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
1161 /* we won't wake queue here because we're out of space */ 1161 /* we won't wake queue here because we're out of space */
1162 dev->stats.tx_dropped++; 1162 dev->stats.tx_dropped++;
1163 else { 1163 else {
1164 dev->stats.tx_bytes+=skb->len; 1164 dev->stats.tx_bytes+=skb->len;
1165 dev->trans_start = jiffies;
1166 netif_wake_queue(dev); 1165 netif_wake_queue(dev);
1167 } 1166 }
1168 1167
@@ -1286,7 +1285,7 @@ set_multicast_list(struct net_device *dev)
1286 struct eepro_local *lp = netdev_priv(dev); 1285 struct eepro_local *lp = netdev_priv(dev);
1287 short ioaddr = dev->base_addr; 1286 short ioaddr = dev->base_addr;
1288 unsigned short mode; 1287 unsigned short mode;
1289 struct dev_mc_list *dmi; 1288 struct netdev_hw_addr *ha;
1290 int mc_count = netdev_mc_count(dev); 1289 int mc_count = netdev_mc_count(dev);
1291 1290
1292 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) 1291 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1330,8 @@ set_multicast_list(struct net_device *dev)
1331 outw(0, ioaddr + IO_PORT); 1330 outw(0, ioaddr + IO_PORT);
1332 outw(6 * (mc_count + 1), ioaddr + IO_PORT); 1331 outw(6 * (mc_count + 1), ioaddr + IO_PORT);
1333 1332
1334 netdev_for_each_mc_addr(dmi, dev) { 1333 netdev_for_each_mc_addr(ha, dev) {
1335 eaddrs = (unsigned short *) dmi->dmi_addr; 1334 eaddrs = (unsigned short *) ha->addr;
1336 outw(*eaddrs++, ioaddr + IO_PORT); 1335 outw(*eaddrs++, ioaddr + IO_PORT);
1337 outw(*eaddrs++, ioaddr + IO_PORT); 1336 outw(*eaddrs++, ioaddr + IO_PORT);
1338 outw(*eaddrs++, ioaddr + IO_PORT); 1337 outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b80ea7..12c37d264108 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -543,7 +543,7 @@ static void unstick_cu(struct net_device *dev)
543 543
544 if (lp->started) 544 if (lp->started)
545 { 545 {
546 if (time_after(jiffies, dev->trans_start + 50)) 546 if (time_after(jiffies, dev_trans_start(dev) + HZ/2))
547 { 547 {
548 if (lp->tx_link==lp->last_tx_restart) 548 if (lp->tx_link==lp->last_tx_restart)
549 { 549 {
@@ -1018,7 +1018,7 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
1018 outw(lp->tx_head+0x16, ioaddr + DATAPORT); 1018 outw(lp->tx_head+0x16, ioaddr + DATAPORT);
1019 outw(0, ioaddr + DATAPORT); 1019 outw(0, ioaddr + DATAPORT);
1020 1020
1021 outsw(ioaddr + DATAPORT, buf, (len+1)>>1); 1021 outsw(ioaddr + DATAPORT, buf, (len+1)>>1);
1022 1022
1023 outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR); 1023 outw(lp->tx_tail+0xc, ioaddr + WRITE_PTR);
1024 outw(lp->tx_head, ioaddr + DATAPORT); 1024 outw(lp->tx_head, ioaddr + DATAPORT);
@@ -1570,12 +1570,11 @@ static void eexp_hw_init586(struct net_device *dev)
1570#if NET_DEBUG > 6 1570#if NET_DEBUG > 6
1571 printk("%s: leaving eexp_hw_init586()\n", dev->name); 1571 printk("%s: leaving eexp_hw_init586()\n", dev->name);
1572#endif 1572#endif
1573 return;
1574} 1573}
1575 1574
1576static void eexp_setup_filter(struct net_device *dev) 1575static void eexp_setup_filter(struct net_device *dev)
1577{ 1576{
1578 struct dev_mc_list *dmi; 1577 struct netdev_hw_addr *ha;
1579 unsigned short ioaddr = dev->base_addr; 1578 unsigned short ioaddr = dev->base_addr;
1580 int count = netdev_mc_count(dev); 1579 int count = netdev_mc_count(dev);
1581 int i; 1580 int i;
@@ -1588,8 +1587,8 @@ static void eexp_setup_filter(struct net_device *dev)
1588 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); 1587 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
1589 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); 1588 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
1590 i = 0; 1589 i = 0;
1591 netdev_for_each_mc_addr(dmi, dev) { 1590 netdev_for_each_mc_addr(ha, dev) {
1592 unsigned short *data = (unsigned short *) dmi->dmi_addr; 1591 unsigned short *data = (unsigned short *) ha->addr;
1593 1592
1594 if (i == count) 1593 if (i == count)
1595 break; 1594 break;
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index fa311a950996..0630980a2722 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0102" 43#define DRV_VERSION "EHEA_0103"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b23173864c60..f547894ff48f 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -794,11 +794,17 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
794 cqe_counter++; 794 cqe_counter++;
795 rmb(); 795 rmb();
796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 796 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
797 ehea_error("Send Completion Error: Resetting port"); 797 ehea_error("Bad send completion status=0x%04X",
798 cqe->status);
799
798 if (netif_msg_tx_err(pr->port)) 800 if (netif_msg_tx_err(pr->port))
799 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 801 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
800 ehea_schedule_port_reset(pr->port); 802
801 break; 803 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
804 ehea_error("Resetting port");
805 ehea_schedule_port_reset(pr->port);
806 break;
807 }
802 } 808 }
803 809
804 if (netif_msg_tx_done(pr->port)) 810 if (netif_msg_tx_done(pr->port))
@@ -817,7 +823,7 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
817 quota--; 823 quota--;
818 824
819 cqe = ehea_poll_cq(send_cq); 825 cqe = ehea_poll_cq(send_cq);
820 }; 826 }
821 827
822 ehea_update_feca(send_cq, cqe_counter); 828 ehea_update_feca(send_cq, cqe_counter);
823 atomic_add(swqe_av, &pr->swqe_avail); 829 atomic_add(swqe_av, &pr->swqe_avail);
@@ -904,6 +910,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
904 struct ehea_eqe *eqe; 910 struct ehea_eqe *eqe;
905 struct ehea_qp *qp; 911 struct ehea_qp *qp;
906 u32 qp_token; 912 u32 qp_token;
913 u64 resource_type, aer, aerr;
914 int reset_port = 0;
907 915
908 eqe = ehea_poll_eq(port->qp_eq); 916 eqe = ehea_poll_eq(port->qp_eq);
909 917
@@ -913,11 +921,24 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
913 eqe->entry, qp_token); 921 eqe->entry, qp_token);
914 922
915 qp = port->port_res[qp_token].qp; 923 qp = port->port_res[qp_token].qp;
916 ehea_error_data(port->adapter, qp->fw_handle); 924
925 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
926 &aer, &aerr);
927
928 if (resource_type == EHEA_AER_RESTYPE_QP) {
929 if ((aer & EHEA_AER_RESET_MASK) ||
930 (aerr & EHEA_AERR_RESET_MASK))
931 reset_port = 1;
932 } else
933 reset_port = 1; /* Reset in case of CQ or EQ error */
934
917 eqe = ehea_poll_eq(port->qp_eq); 935 eqe = ehea_poll_eq(port->qp_eq);
918 } 936 }
919 937
920 ehea_schedule_port_reset(port); 938 if (reset_port) {
939 ehea_error("Resetting port");
940 ehea_schedule_port_reset(port);
941 }
921 942
922 return IRQ_HANDLED; 943 return IRQ_HANDLED;
923} 944}
@@ -1621,7 +1642,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1621{ 1642{
1622 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1643 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1623 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1644 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1624 int skb_data_size = skb->len - skb->data_len; 1645 int skb_data_size = skb_headlen(skb);
1625 int headersize; 1646 int headersize;
1626 1647
1627 /* Packet is TCP with TSO enabled */ 1648 /* Packet is TCP with TSO enabled */
@@ -1632,7 +1653,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1632 */ 1653 */
1633 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1654 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1634 1655
1635 skb_data_size = skb->len - skb->data_len; 1656 skb_data_size = skb_headlen(skb);
1636 1657
1637 if (skb_data_size >= headersize) { 1658 if (skb_data_size >= headersize) {
1638 /* copy immediate data */ 1659 /* copy immediate data */
@@ -1654,7 +1675,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1654static void write_swqe2_nonTSO(struct sk_buff *skb, 1675static void write_swqe2_nonTSO(struct sk_buff *skb,
1655 struct ehea_swqe *swqe, u32 lkey) 1676 struct ehea_swqe *swqe, u32 lkey)
1656{ 1677{
1657 int skb_data_size = skb->len - skb->data_len; 1678 int skb_data_size = skb_headlen(skb);
1658 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1679 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1659 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1680 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1660 1681
@@ -1863,7 +1884,6 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1863 port->promisc = enable; 1884 port->promisc = enable;
1864out: 1885out:
1865 free_page((unsigned long)cb7); 1886 free_page((unsigned long)cb7);
1866 return;
1867} 1887}
1868 1888
1869static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr, 1889static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
@@ -1970,7 +1990,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1970static void ehea_set_multicast_list(struct net_device *dev) 1990static void ehea_set_multicast_list(struct net_device *dev)
1971{ 1991{
1972 struct ehea_port *port = netdev_priv(dev); 1992 struct ehea_port *port = netdev_priv(dev);
1973 struct dev_mc_list *k_mcl_entry; 1993 struct netdev_hw_addr *ha;
1974 int ret; 1994 int ret;
1975 1995
1976 if (dev->flags & IFF_PROMISC) { 1996 if (dev->flags & IFF_PROMISC) {
@@ -2001,13 +2021,12 @@ static void ehea_set_multicast_list(struct net_device *dev)
2001 goto out; 2021 goto out;
2002 } 2022 }
2003 2023
2004 netdev_for_each_mc_addr(k_mcl_entry, dev) 2024 netdev_for_each_mc_addr(ha, dev)
2005 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2025 ehea_add_multicast_entry(port, ha->addr);
2006 2026
2007 } 2027 }
2008out: 2028out:
2009 ehea_update_bcmc_registrations(); 2029 ehea_update_bcmc_registrations();
2010 return;
2011} 2030}
2012 2031
2013static int ehea_change_mtu(struct net_device *dev, int new_mtu) 2032static int ehea_change_mtu(struct net_device *dev, int new_mtu)
@@ -2111,8 +2130,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2111 } else { 2130 } else {
2112 /* first copy data from the skb->data buffer ... */ 2131 /* first copy data from the skb->data buffer ... */
2113 skb_copy_from_linear_data(skb, imm_data, 2132 skb_copy_from_linear_data(skb, imm_data,
2114 skb->len - skb->data_len); 2133 skb_headlen(skb));
2115 imm_data += skb->len - skb->data_len; 2134 imm_data += skb_headlen(skb);
2116 2135
2117 /* ... then copy data from the fragments */ 2136 /* ... then copy data from the fragments */
2118 for (i = 0; i < nfrags; i++) { 2137 for (i = 0; i < nfrags; i++) {
@@ -2223,7 +2242,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2223 } 2242 }
2224 spin_unlock_irqrestore(&pr->netif_queue, flags); 2243 spin_unlock_irqrestore(&pr->netif_queue, flags);
2225 } 2244 }
2226 dev->trans_start = jiffies; 2245 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2227 spin_unlock(&pr->xmit_lock); 2246 spin_unlock(&pr->xmit_lock);
2228 2247
2229 return NETDEV_TX_OK; 2248 return NETDEV_TX_OK;
@@ -2320,7 +2339,6 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2320 ehea_error("modify_ehea_port failed"); 2339 ehea_error("modify_ehea_port failed");
2321out: 2340out:
2322 free_page((unsigned long)cb1); 2341 free_page((unsigned long)cb1);
2323 return;
2324} 2342}
2325 2343
2326int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp) 2344int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
@@ -2863,7 +2881,6 @@ static void ehea_reset_port(struct work_struct *work)
2863 netif_wake_queue(dev); 2881 netif_wake_queue(dev);
2864out: 2882out:
2865 mutex_unlock(&port->port_lock); 2883 mutex_unlock(&port->port_lock);
2866 return;
2867} 2884}
2868 2885
2869static void ehea_rereg_mrs(struct work_struct *work) 2886static void ehea_rereg_mrs(struct work_struct *work)
@@ -2871,7 +2888,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2871 int ret, i; 2888 int ret, i;
2872 struct ehea_adapter *adapter; 2889 struct ehea_adapter *adapter;
2873 2890
2874 mutex_lock(&dlpar_mem_lock);
2875 ehea_info("LPAR memory changed - re-initializing driver"); 2891 ehea_info("LPAR memory changed - re-initializing driver");
2876 2892
2877 list_for_each_entry(adapter, &adapter_list, list) 2893 list_for_each_entry(adapter, &adapter_list, list)
@@ -2941,7 +2957,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
2941 } 2957 }
2942 ehea_info("re-initializing driver complete"); 2958 ehea_info("re-initializing driver complete");
2943out: 2959out:
2944 mutex_unlock(&dlpar_mem_lock);
2945 return; 2960 return;
2946} 2961}
2947 2962
@@ -3241,7 +3256,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3241 ehea_remove_adapter_mr(adapter); 3256 ehea_remove_adapter_mr(adapter);
3242 3257
3243 i++; 3258 i++;
3244 }; 3259 }
3245 return 0; 3260 return 0;
3246} 3261}
3247 3262
@@ -3260,7 +3275,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3260 if (dn_log_port_id) 3275 if (dn_log_port_id)
3261 if (*dn_log_port_id == logical_port_id) 3276 if (*dn_log_port_id == logical_port_id)
3262 return eth_dn; 3277 return eth_dn;
3263 }; 3278 }
3264 3279
3265 return NULL; 3280 return NULL;
3266} 3281}
@@ -3524,7 +3539,14 @@ void ehea_crash_handler(void)
3524static int ehea_mem_notifier(struct notifier_block *nb, 3539static int ehea_mem_notifier(struct notifier_block *nb,
3525 unsigned long action, void *data) 3540 unsigned long action, void *data)
3526{ 3541{
3542 int ret = NOTIFY_BAD;
3527 struct memory_notify *arg = data; 3543 struct memory_notify *arg = data;
3544
3545 if (!mutex_trylock(&dlpar_mem_lock)) {
3546 ehea_info("ehea_mem_notifier must not be called parallelized");
3547 goto out;
3548 }
3549
3528 switch (action) { 3550 switch (action) {
3529 case MEM_CANCEL_OFFLINE: 3551 case MEM_CANCEL_OFFLINE:
3530 ehea_info("memory offlining canceled"); 3552 ehea_info("memory offlining canceled");
@@ -3533,14 +3555,14 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3533 ehea_info("memory is going online"); 3555 ehea_info("memory is going online");
3534 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3556 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3535 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3557 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3536 return NOTIFY_BAD; 3558 goto out_unlock;
3537 ehea_rereg_mrs(NULL); 3559 ehea_rereg_mrs(NULL);
3538 break; 3560 break;
3539 case MEM_GOING_OFFLINE: 3561 case MEM_GOING_OFFLINE:
3540 ehea_info("memory is going offline"); 3562 ehea_info("memory is going offline");
3541 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3563 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3542 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3564 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3543 return NOTIFY_BAD; 3565 goto out_unlock;
3544 ehea_rereg_mrs(NULL); 3566 ehea_rereg_mrs(NULL);
3545 break; 3567 break;
3546 default: 3568 default:
@@ -3548,8 +3570,12 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3548 } 3570 }
3549 3571
3550 ehea_update_firmware_handles(); 3572 ehea_update_firmware_handles();
3573 ret = NOTIFY_OK;
3551 3574
3552 return NOTIFY_OK; 3575out_unlock:
3576 mutex_unlock(&dlpar_mem_lock);
3577out:
3578 return ret;
3553} 3579}
3554 3580
3555static struct notifier_block ehea_mem_nb = { 3581static struct notifier_block ehea_mem_nb = {
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index a1b4c7e56367..89128b6373e3 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -229,14 +229,14 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
229 229
230int ehea_destroy_cq(struct ehea_cq *cq) 230int ehea_destroy_cq(struct ehea_cq *cq)
231{ 231{
232 u64 hret; 232 u64 hret, aer, aerr;
233 if (!cq) 233 if (!cq)
234 return 0; 234 return 0;
235 235
236 hcp_epas_dtor(&cq->epas); 236 hcp_epas_dtor(&cq->epas);
237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE); 237 hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
238 if (hret == H_R_STATE) { 238 if (hret == H_R_STATE) {
239 ehea_error_data(cq->adapter, cq->fw_handle); 239 ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
240 hret = ehea_destroy_cq_res(cq, FORCE_FREE); 240 hret = ehea_destroy_cq_res(cq, FORCE_FREE);
241 } 241 }
242 242
@@ -357,7 +357,7 @@ u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
357 357
358int ehea_destroy_eq(struct ehea_eq *eq) 358int ehea_destroy_eq(struct ehea_eq *eq)
359{ 359{
360 u64 hret; 360 u64 hret, aer, aerr;
361 if (!eq) 361 if (!eq)
362 return 0; 362 return 0;
363 363
@@ -365,7 +365,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
365 365
366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE); 366 hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
367 if (hret == H_R_STATE) { 367 if (hret == H_R_STATE) {
368 ehea_error_data(eq->adapter, eq->fw_handle); 368 ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
369 hret = ehea_destroy_eq_res(eq, FORCE_FREE); 369 hret = ehea_destroy_eq_res(eq, FORCE_FREE);
370 } 370 }
371 371
@@ -540,7 +540,7 @@ u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
540 540
541int ehea_destroy_qp(struct ehea_qp *qp) 541int ehea_destroy_qp(struct ehea_qp *qp)
542{ 542{
543 u64 hret; 543 u64 hret, aer, aerr;
544 if (!qp) 544 if (!qp)
545 return 0; 545 return 0;
546 546
@@ -548,7 +548,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
548 548
549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE); 549 hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
550 if (hret == H_R_STATE) { 550 if (hret == H_R_STATE) {
551 ehea_error_data(qp->adapter, qp->fw_handle); 551 ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
552 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 552 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
553 } 553 }
554 554
@@ -986,42 +986,45 @@ void print_error_data(u64 *data)
986 if (length > EHEA_PAGESIZE) 986 if (length > EHEA_PAGESIZE)
987 length = EHEA_PAGESIZE; 987 length = EHEA_PAGESIZE;
988 988
989 if (type == 0x8) /* Queue Pair */ 989 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
991 "port=%llX", resource, data[6], data[12], data[22]); 991 "port=%llX", resource, data[6], data[12], data[22]);
992 992 else if (type == EHEA_AER_RESTYPE_CQ)
993 if (type == 0x4) /* Completion Queue */
994 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
995 data[6]); 994 data[6]);
996 995 else if (type == EHEA_AER_RESTYPE_EQ)
997 if (type == 0x3) /* Event Queue */
998 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
999 data[6]); 997 data[6]);
1000 998
1001 ehea_dump(data, length, "error data"); 999 ehea_dump(data, length, "error data");
1002} 1000}
1003 1001
1004void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle) 1002u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1003 u64 *aer, u64 *aerr)
1005{ 1004{
1006 unsigned long ret; 1005 unsigned long ret;
1007 u64 *rblock; 1006 u64 *rblock;
1007 u64 type = 0;
1008 1008
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1009 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1010 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1011 ehea_error("Cannot allocate rblock memory.");
1012 return; 1012 goto out;
1013 } 1013 }
1014 1014
1015 ret = ehea_h_error_data(adapter->handle, 1015 ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
1016 res_handle,
1017 rblock);
1018 1016
1019 if (ret == H_R_STATE) 1017 if (ret == H_SUCCESS) {
1020 ehea_error("No error data is available: %llX.", res_handle); 1018 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
1021 else if (ret == H_SUCCESS) 1019 *aer = rblock[6];
1020 *aerr = rblock[12];
1022 print_error_data(rblock); 1021 print_error_data(rblock);
1023 else 1022 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle);
1024 } else
1024 ehea_error("Error data could not be fetched: %llX", res_handle); 1025 ehea_error("Error data could not be fetched: %llX", res_handle);
1025 1026
1026 free_page((unsigned long)rblock); 1027 free_page((unsigned long)rblock);
1028out:
1029 return type;
1027} 1030}
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0817c1e74a19..882c50c9c34f 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -154,6 +154,9 @@ struct ehea_rwqe {
154#define EHEA_CQE_STAT_ERR_IP 0x2000 154#define EHEA_CQE_STAT_ERR_IP 0x2000
155#define EHEA_CQE_STAT_ERR_CRC 0x1000 155#define EHEA_CQE_STAT_ERR_CRC 0x1000
156 156
157/* Defines which bad send cqe stati lead to a port reset */
158#define EHEA_CQE_STAT_RESET_MASK 0x0002
159
157struct ehea_cqe { 160struct ehea_cqe {
158 u64 wr_id; /* work request ID from WQE */ 161 u64 wr_id; /* work request ID from WQE */
159 u8 type; 162 u8 type;
@@ -187,6 +190,14 @@ struct ehea_cqe {
187#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55) 190#define EHEA_EQE_SM_MECH_NUMBER EHEA_BMASK_IBM(48, 55)
188#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63) 191#define EHEA_EQE_SM_PORT_NUMBER EHEA_BMASK_IBM(56, 63)
189 192
193#define EHEA_AER_RESTYPE_QP 0x8
194#define EHEA_AER_RESTYPE_CQ 0x4
195#define EHEA_AER_RESTYPE_EQ 0x3
196
197/* Defines which affiliated errors lead to a port reset */
198#define EHEA_AER_RESET_MASK 0xFFFFFFFFFEFFFFFFULL
199#define EHEA_AERR_RESET_MASK 0xFFFFFFFFFFFFFFFFULL
200
190struct ehea_eqe { 201struct ehea_eqe {
191 u64 entry; 202 u64 entry;
192}; 203};
@@ -379,7 +390,8 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
379 390
380int ehea_rem_mr(struct ehea_mr *mr); 391int ehea_rem_mr(struct ehea_mr *mr);
381 392
382void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle); 393u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
394 u64 *aer, u64 *aerr);
383 395
384int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages); 396int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
385int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages); 397int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index ff27f728fd9d..112c5aa9af7f 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1293,8 +1293,6 @@ static netdev_tx_t enc28j60_send_packet(struct sk_buff *skb,
1293 */ 1293 */
1294 netif_stop_queue(dev); 1294 netif_stop_queue(dev);
1295 1295
1296 /* save the timestamp */
1297 priv->netdev->trans_start = jiffies;
1298 /* Remember the skb for deferred processing */ 1296 /* Remember the skb for deferred processing */
1299 priv->tx_skb = skb; 1297 priv->tx_skb = skb;
1300 schedule_work(&priv->tx_work); 1298 schedule_work(&priv->tx_work);
diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
index 391c3bce5b79..e7b6c31880ba 100644
--- a/drivers/net/enic/Makefile
+++ b/drivers/net/enic/Makefile
@@ -1,5 +1,5 @@
1obj-$(CONFIG_ENIC) := enic.o 1obj-$(CONFIG_ENIC) := enic.o
2 2
3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \ 3enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
4 enic_res.o vnic_dev.o vnic_rq.o 4 enic_res.o vnic_dev.o vnic_rq.o vnic_vic.o
5 5
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9ed612c..337d1943af46 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, 101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) 102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
103{ 103{
104 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); 104 u16 completed_index_flags;
105 u16 q_number_rss_type_flags = 105 u16 q_number_rss_type_flags;
106 le16_to_cpu(desc->q_number_rss_type_flags); 106 u16 bytes_written_flags;
107 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
108 107
109 cq_desc_dec((struct cq_desc *)desc, type, 108 cq_desc_dec((struct cq_desc *)desc, type,
110 color, q_number, completed_index); 109 color, q_number, completed_index);
111 110
111 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
112 q_number_rss_type_flags =
113 le16_to_cpu(desc->q_number_rss_type_flags);
114 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
115
112 *ingress_port = (completed_index_flags & 116 *ingress_port = (completed_index_flags &
113 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; 117 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
114 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? 118 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a6d0d4..85f2a2e7030a 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
33#include "vnic_rss.h" 33#include "vnic_rss.h"
34 34
35#define DRV_NAME "enic" 35#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 36#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
37#define DRV_VERSION "1.1.0.241a" 37#define DRV_VERSION "1.3.1.1-pp"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
39#define PFX DRV_NAME ": " 39#define PFX DRV_NAME ": "
40 40
@@ -74,6 +74,13 @@ struct enic_msix_entry {
74 void *devid; 74 void *devid;
75}; 75};
76 76
77struct enic_port_profile {
78 u8 request;
79 char name[PORT_PROFILE_MAX];
80 u8 instance_uuid[PORT_UUID_MAX];
81 u8 host_uuid[PORT_UUID_MAX];
82};
83
77/* Per-instance private data structure */ 84/* Per-instance private data structure */
78struct enic { 85struct enic {
79 struct net_device *netdev; 86 struct net_device *netdev;
@@ -95,6 +102,7 @@ struct enic {
95 u32 port_mtu; 102 u32 port_mtu;
96 u32 rx_coalesce_usecs; 103 u32 rx_coalesce_usecs;
97 u32 tx_coalesce_usecs; 104 u32 tx_coalesce_usecs;
105 struct enic_port_profile pp;
98 106
99 /* work queue cache line section */ 107 /* work queue cache line section */
100 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; 108 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb636b8..e125113759a5 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -29,6 +29,7 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/if_ether.h> 30#include <linux/if_ether.h>
31#include <linux/if_vlan.h> 31#include <linux/if_vlan.h>
32#include <linux/if_link.h>
32#include <linux/ethtool.h> 33#include <linux/ethtool.h>
33#include <linux/in.h> 34#include <linux/in.h>
34#include <linux/ip.h> 35#include <linux/ip.h>
@@ -40,6 +41,7 @@
40#include "vnic_dev.h" 41#include "vnic_dev.h"
41#include "vnic_intr.h" 42#include "vnic_intr.h"
42#include "vnic_stats.h" 43#include "vnic_stats.h"
44#include "vnic_vic.h"
43#include "enic_res.h" 45#include "enic_res.h"
44#include "enic.h" 46#include "enic.h"
45 47
@@ -49,10 +51,12 @@
49#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1) 51#define ENIC_DESC_MAX_SPLITS (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
50 52
51#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */ 53#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
54#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
52 55
53/* Supported devices */ 56/* Supported devices */
54static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = { 57static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
55 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) }, 58 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
59 { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
56 { 0, } /* end of table */ 60 { 0, } /* end of table */
57}; 61};
58 62
@@ -113,6 +117,11 @@ static const struct enic_stat enic_rx_stats[] = {
113static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats); 117static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
114static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats); 118static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
115 119
120static int enic_is_dynamic(struct enic *enic)
121{
122 return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
123}
124
116static int enic_get_settings(struct net_device *netdev, 125static int enic_get_settings(struct net_device *netdev,
117 struct ethtool_cmd *ecmd) 126 struct ethtool_cmd *ecmd)
118{ 127{
@@ -810,26 +819,90 @@ static void enic_reset_mcaddrs(struct enic *enic)
810 819
811static int enic_set_mac_addr(struct net_device *netdev, char *addr) 820static int enic_set_mac_addr(struct net_device *netdev, char *addr)
812{ 821{
813 if (!is_valid_ether_addr(addr)) 822 struct enic *enic = netdev_priv(netdev);
814 return -EADDRNOTAVAIL; 823
824 if (enic_is_dynamic(enic)) {
825 if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
826 return -EADDRNOTAVAIL;
827 } else {
828 if (!is_valid_ether_addr(addr))
829 return -EADDRNOTAVAIL;
830 }
815 831
816 memcpy(netdev->dev_addr, addr, netdev->addr_len); 832 memcpy(netdev->dev_addr, addr, netdev->addr_len);
817 833
818 return 0; 834 return 0;
819} 835}
820 836
837static int enic_dev_add_station_addr(struct enic *enic)
838{
839 int err = 0;
840
841 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
842 spin_lock(&enic->devcmd_lock);
843 err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
844 spin_unlock(&enic->devcmd_lock);
845 }
846
847 return err;
848}
849
850static int enic_dev_del_station_addr(struct enic *enic)
851{
852 int err = 0;
853
854 if (is_valid_ether_addr(enic->netdev->dev_addr)) {
855 spin_lock(&enic->devcmd_lock);
856 err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
857 spin_unlock(&enic->devcmd_lock);
858 }
859
860 return err;
861}
862
863static int enic_set_mac_address_dynamic(struct net_device *netdev, void *p)
864{
865 struct enic *enic = netdev_priv(netdev);
866 struct sockaddr *saddr = p;
867 char *addr = saddr->sa_data;
868 int err;
869
870 if (netif_running(enic->netdev)) {
871 err = enic_dev_del_station_addr(enic);
872 if (err)
873 return err;
874 }
875
876 err = enic_set_mac_addr(netdev, addr);
877 if (err)
878 return err;
879
880 if (netif_running(enic->netdev)) {
881 err = enic_dev_add_station_addr(enic);
882 if (err)
883 return err;
884 }
885
886 return err;
887}
888
889static int enic_set_mac_address(struct net_device *netdev, void *p)
890{
891 return -EOPNOTSUPP;
892}
893
821/* netif_tx_lock held, BHs disabled */ 894/* netif_tx_lock held, BHs disabled */
822static void enic_set_multicast_list(struct net_device *netdev) 895static void enic_set_multicast_list(struct net_device *netdev)
823{ 896{
824 struct enic *enic = netdev_priv(netdev); 897 struct enic *enic = netdev_priv(netdev);
825 struct dev_mc_list *list; 898 struct netdev_hw_addr *ha;
826 int directed = 1; 899 int directed = 1;
827 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 900 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
828 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 901 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 902 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
830 unsigned int mc_count = netdev_mc_count(netdev); 903 unsigned int mc_count = netdev_mc_count(netdev);
831 int allmulti = (netdev->flags & IFF_ALLMULTI) || 904 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
832 mc_count > ENIC_MULTICAST_PERFECT_FILTERS; 905 mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
833 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); 906 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
834 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 907 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
835 unsigned int i, j; 908 unsigned int i, j;
@@ -852,10 +925,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
852 */ 925 */
853 926
854 i = 0; 927 i = 0;
855 netdev_for_each_mc_addr(list, netdev) { 928 netdev_for_each_mc_addr(ha, netdev) {
856 if (i == mc_count) 929 if (i == mc_count)
857 break; 930 break;
858 memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); 931 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
859 } 932 }
860 933
861 for (i = 0; i < enic->mc_count; i++) { 934 for (i = 0; i < enic->mc_count; i++) {
@@ -922,6 +995,213 @@ static void enic_tx_timeout(struct net_device *netdev)
922 schedule_work(&enic->reset); 995 schedule_work(&enic->reset);
923} 996}
924 997
998static int enic_vnic_dev_deinit(struct enic *enic)
999{
1000 int err;
1001
1002 spin_lock(&enic->devcmd_lock);
1003 err = vnic_dev_deinit(enic->vdev);
1004 spin_unlock(&enic->devcmd_lock);
1005
1006 return err;
1007}
1008
1009static int enic_dev_init_prov(struct enic *enic, struct vic_provinfo *vp)
1010{
1011 int err;
1012
1013 spin_lock(&enic->devcmd_lock);
1014 err = vnic_dev_init_prov(enic->vdev,
1015 (u8 *)vp, vic_provinfo_size(vp));
1016 spin_unlock(&enic->devcmd_lock);
1017
1018 return err;
1019}
1020
1021static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1022{
1023 int err;
1024
1025 spin_lock(&enic->devcmd_lock);
1026 err = vnic_dev_init_done(enic->vdev, done, error);
1027 spin_unlock(&enic->devcmd_lock);
1028
1029 return err;
1030}
1031
1032static int enic_set_port_profile(struct enic *enic, u8 request, u8 *mac,
1033 char *name, u8 *instance_uuid, u8 *host_uuid)
1034{
1035 struct vic_provinfo *vp;
1036 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1037 unsigned short *uuid;
1038 char uuid_str[38];
1039 static char *uuid_fmt = "%04X%04X-%04X-%04X-%04X-%04X%04X%04X";
1040 int err;
1041
1042 if (!name)
1043 return -EINVAL;
1044
1045 if (!is_valid_ether_addr(mac))
1046 return -EADDRNOTAVAIL;
1047
1048 vp = vic_provinfo_alloc(GFP_KERNEL, oui, VIC_PROVINFO_LINUX_TYPE);
1049 if (!vp)
1050 return -ENOMEM;
1051
1052 vic_provinfo_add_tlv(vp,
1053 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
1054 strlen(name) + 1, name);
1055
1056 vic_provinfo_add_tlv(vp,
1057 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
1058 ETH_ALEN, mac);
1059
1060 if (instance_uuid) {
1061 uuid = (unsigned short *)instance_uuid;
1062 sprintf(uuid_str, uuid_fmt,
1063 uuid[0], uuid[1], uuid[2], uuid[3],
1064 uuid[4], uuid[5], uuid[6], uuid[7]);
1065 vic_provinfo_add_tlv(vp,
1066 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
1067 sizeof(uuid_str), uuid_str);
1068 }
1069
1070 if (host_uuid) {
1071 uuid = (unsigned short *)host_uuid;
1072 sprintf(uuid_str, uuid_fmt,
1073 uuid[0], uuid[1], uuid[2], uuid[3],
1074 uuid[4], uuid[5], uuid[6], uuid[7]);
1075 vic_provinfo_add_tlv(vp,
1076 VIC_LINUX_PROV_TLV_HOST_UUID_STR,
1077 sizeof(uuid_str), uuid_str);
1078 }
1079
1080 err = enic_vnic_dev_deinit(enic);
1081 if (err)
1082 goto err_out;
1083
1084 memset(&enic->pp, 0, sizeof(enic->pp));
1085
1086 err = enic_dev_init_prov(enic, vp);
1087 if (err)
1088 goto err_out;
1089
1090 enic->pp.request = request;
1091 memcpy(enic->pp.name, name, PORT_PROFILE_MAX);
1092 if (instance_uuid)
1093 memcpy(enic->pp.instance_uuid,
1094 instance_uuid, PORT_UUID_MAX);
1095 if (host_uuid)
1096 memcpy(enic->pp.host_uuid,
1097 host_uuid, PORT_UUID_MAX);
1098
1099err_out:
1100 vic_provinfo_free(vp);
1101
1102 return err;
1103}
1104
1105static int enic_unset_port_profile(struct enic *enic)
1106{
1107 memset(&enic->pp, 0, sizeof(enic->pp));
1108 return enic_vnic_dev_deinit(enic);
1109}
1110
1111static int enic_set_vf_port(struct net_device *netdev, int vf,
1112 struct nlattr *port[])
1113{
1114 struct enic *enic = netdev_priv(netdev);
1115 char *name = NULL;
1116 u8 *instance_uuid = NULL;
1117 u8 *host_uuid = NULL;
1118 u8 request = PORT_REQUEST_DISASSOCIATE;
1119
1120 /* don't support VFs, yet */
1121 if (vf != PORT_SELF_VF)
1122 return -EOPNOTSUPP;
1123
1124 if (port[IFLA_PORT_REQUEST])
1125 request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1126
1127 switch (request) {
1128 case PORT_REQUEST_ASSOCIATE:
1129
1130 if (port[IFLA_PORT_PROFILE])
1131 name = nla_data(port[IFLA_PORT_PROFILE]);
1132
1133 if (port[IFLA_PORT_INSTANCE_UUID])
1134 instance_uuid =
1135 nla_data(port[IFLA_PORT_INSTANCE_UUID]);
1136
1137 if (port[IFLA_PORT_HOST_UUID])
1138 host_uuid = nla_data(port[IFLA_PORT_HOST_UUID]);
1139
1140 return enic_set_port_profile(enic, request,
1141 netdev->dev_addr, name,
1142 instance_uuid, host_uuid);
1143
1144 case PORT_REQUEST_DISASSOCIATE:
1145
1146 return enic_unset_port_profile(enic);
1147
1148 default:
1149 break;
1150 }
1151
1152 return -EOPNOTSUPP;
1153}
1154
1155static int enic_get_vf_port(struct net_device *netdev, int vf,
1156 struct sk_buff *skb)
1157{
1158 struct enic *enic = netdev_priv(netdev);
1159 int err, error, done;
1160 u16 response = PORT_PROFILE_RESPONSE_SUCCESS;
1161
1162 /* don't support VFs, yet */
1163 if (vf != PORT_SELF_VF)
1164 return -EOPNOTSUPP;
1165
1166 err = enic_dev_init_done(enic, &done, &error);
1167
1168 if (err)
1169 return err;
1170
1171 switch (error) {
1172 case ERR_SUCCESS:
1173 if (!done)
1174 response = PORT_PROFILE_RESPONSE_INPROGRESS;
1175 break;
1176 case ERR_EINVAL:
1177 response = PORT_PROFILE_RESPONSE_INVALID;
1178 break;
1179 case ERR_EBADSTATE:
1180 response = PORT_PROFILE_RESPONSE_BADSTATE;
1181 break;
1182 case ERR_ENOMEM:
1183 response = PORT_PROFILE_RESPONSE_INSUFFICIENT_RESOURCES;
1184 break;
1185 default:
1186 response = PORT_PROFILE_RESPONSE_ERROR;
1187 break;
1188 }
1189
1190 NLA_PUT_U16(skb, IFLA_PORT_REQUEST, enic->pp.request);
1191 NLA_PUT_U16(skb, IFLA_PORT_RESPONSE, response);
1192 NLA_PUT(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX,
1193 enic->pp.name);
1194 NLA_PUT(skb, IFLA_PORT_INSTANCE_UUID, PORT_UUID_MAX,
1195 enic->pp.instance_uuid);
1196 NLA_PUT(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX,
1197 enic->pp.host_uuid);
1198
1199 return 0;
1200
1201nla_put_failure:
1202 return -EMSGSIZE;
1203}
1204
925static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) 1205static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
926{ 1206{
927 struct enic *enic = vnic_dev_priv(rq->vdev); 1207 struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -1440,9 +1720,7 @@ static int enic_open(struct net_device *netdev)
1440 for (i = 0; i < enic->rq_count; i++) 1720 for (i = 0; i < enic->rq_count; i++)
1441 vnic_rq_enable(&enic->rq[i]); 1721 vnic_rq_enable(&enic->rq[i]);
1442 1722
1443 spin_lock(&enic->devcmd_lock); 1723 enic_dev_add_station_addr(enic);
1444 enic_add_station_addr(enic);
1445 spin_unlock(&enic->devcmd_lock);
1446 enic_set_multicast_list(netdev); 1724 enic_set_multicast_list(netdev);
1447 1725
1448 netif_wake_queue(netdev); 1726 netif_wake_queue(netdev);
@@ -1489,6 +1767,8 @@ static int enic_stop(struct net_device *netdev)
1489 netif_carrier_off(netdev); 1767 netif_carrier_off(netdev);
1490 netif_tx_disable(netdev); 1768 netif_tx_disable(netdev);
1491 1769
1770 enic_dev_del_station_addr(enic);
1771
1492 for (i = 0; i < enic->wq_count; i++) { 1772 for (i = 0; i < enic->wq_count; i++) {
1493 err = vnic_wq_disable(&enic->wq[i]); 1773 err = vnic_wq_disable(&enic->wq[i]);
1494 if (err) 1774 if (err)
@@ -1774,14 +2054,34 @@ static void enic_clear_intr_mode(struct enic *enic)
1774 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); 2054 vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN);
1775} 2055}
1776 2056
2057static const struct net_device_ops enic_netdev_dynamic_ops = {
2058 .ndo_open = enic_open,
2059 .ndo_stop = enic_stop,
2060 .ndo_start_xmit = enic_hard_start_xmit,
2061 .ndo_get_stats = enic_get_stats,
2062 .ndo_validate_addr = eth_validate_addr,
2063 .ndo_set_multicast_list = enic_set_multicast_list,
2064 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2065 .ndo_change_mtu = enic_change_mtu,
2066 .ndo_vlan_rx_register = enic_vlan_rx_register,
2067 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
2068 .ndo_vlan_rx_kill_vid = enic_vlan_rx_kill_vid,
2069 .ndo_tx_timeout = enic_tx_timeout,
2070 .ndo_set_vf_port = enic_set_vf_port,
2071 .ndo_get_vf_port = enic_get_vf_port,
2072#ifdef CONFIG_NET_POLL_CONTROLLER
2073 .ndo_poll_controller = enic_poll_controller,
2074#endif
2075};
2076
1777static const struct net_device_ops enic_netdev_ops = { 2077static const struct net_device_ops enic_netdev_ops = {
1778 .ndo_open = enic_open, 2078 .ndo_open = enic_open,
1779 .ndo_stop = enic_stop, 2079 .ndo_stop = enic_stop,
1780 .ndo_start_xmit = enic_hard_start_xmit, 2080 .ndo_start_xmit = enic_hard_start_xmit,
1781 .ndo_get_stats = enic_get_stats, 2081 .ndo_get_stats = enic_get_stats,
1782 .ndo_validate_addr = eth_validate_addr, 2082 .ndo_validate_addr = eth_validate_addr,
1783 .ndo_set_mac_address = eth_mac_addr,
1784 .ndo_set_multicast_list = enic_set_multicast_list, 2083 .ndo_set_multicast_list = enic_set_multicast_list,
2084 .ndo_set_mac_address = enic_set_mac_address,
1785 .ndo_change_mtu = enic_change_mtu, 2085 .ndo_change_mtu = enic_change_mtu,
1786 .ndo_vlan_rx_register = enic_vlan_rx_register, 2086 .ndo_vlan_rx_register = enic_vlan_rx_register,
1787 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2087 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2010,11 +2310,13 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2010 2310
2011 netif_carrier_off(netdev); 2311 netif_carrier_off(netdev);
2012 2312
2013 err = vnic_dev_init(enic->vdev, 0); 2313 if (!enic_is_dynamic(enic)) {
2014 if (err) { 2314 err = vnic_dev_init(enic->vdev, 0);
2015 printk(KERN_ERR PFX 2315 if (err) {
2016 "vNIC dev init failed, aborting.\n"); 2316 printk(KERN_ERR PFX
2017 goto err_out_dev_close; 2317 "vNIC dev init failed, aborting.\n");
2318 goto err_out_dev_close;
2319 }
2018 } 2320 }
2019 2321
2020 err = enic_dev_init(enic); 2322 err = enic_dev_init(enic);
@@ -2054,12 +2356,15 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2054 enic->tx_coalesce_usecs = enic->config.intr_timer_usec; 2356 enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
2055 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs; 2357 enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
2056 2358
2057 netdev->netdev_ops = &enic_netdev_ops; 2359 if (enic_is_dynamic(enic))
2360 netdev->netdev_ops = &enic_netdev_dynamic_ops;
2361 else
2362 netdev->netdev_ops = &enic_netdev_ops;
2363
2058 netdev->watchdog_timeo = 2 * HZ; 2364 netdev->watchdog_timeo = 2 * HZ;
2059 netdev->ethtool_ops = &enic_ethtool_ops; 2365 netdev->ethtool_ops = &enic_ethtool_ops;
2060 2366
2061 netdev->features |= NETIF_F_HW_VLAN_TX | 2367 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2062 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2063 if (ENIC_SETTING(enic, TXCSUM)) 2368 if (ENIC_SETTING(enic, TXCSUM))
2064 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2369 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2065 if (ENIC_SETTING(enic, TSO)) 2370 if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 02839bf0fe8b..9b18840cba96 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -103,11 +103,6 @@ int enic_get_vnic_config(struct enic *enic)
103 return 0; 103 return 0;
104} 104}
105 105
106void enic_add_station_addr(struct enic *enic)
107{
108 vnic_dev_add_addr(enic->vdev, enic->mac_addr);
109}
110
111void enic_add_multicast_addr(struct enic *enic, u8 *addr) 106void enic_add_multicast_addr(struct enic *enic, u8 *addr)
112{ 107{
113 vnic_dev_add_addr(enic->vdev, addr); 108 vnic_dev_add_addr(enic->vdev, addr);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index abc19741ab02..494664f7fccc 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -131,7 +131,6 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
131struct enic; 131struct enic;
132 132
133int enic_get_vnic_config(struct enic *); 133int enic_get_vnic_config(struct enic *);
134void enic_add_station_addr(struct enic *enic);
135void enic_add_multicast_addr(struct enic *enic, u8 *addr); 134void enic_add_multicast_addr(struct enic *enic, u8 *addr);
136void enic_del_multicast_addr(struct enic *enic, u8 *addr); 135void enic_del_multicast_addr(struct enic *enic, u8 *addr);
137void enic_add_vlan(struct enic *enic, u16 vlanid); 136void enic_add_vlan(struct enic *enic, u16 vlanid);
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de71014e..2b3e16db5c82 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -530,7 +530,7 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
530 printk(KERN_ERR "Can't set packet filter\n"); 530 printk(KERN_ERR "Can't set packet filter\n");
531} 531}
532 532
533void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) 533int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
534{ 534{
535 u64 a0 = 0, a1 = 0; 535 u64 a0 = 0, a1 = 0;
536 int wait = 1000; 536 int wait = 1000;
@@ -543,9 +543,11 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
543 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); 543 err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait);
544 if (err) 544 if (err)
545 printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err); 545 printk(KERN_ERR "Can't add addr [%pM], %d\n", addr, err);
546
547 return err;
546} 548}
547 549
548void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) 550int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
549{ 551{
550 u64 a0 = 0, a1 = 0; 552 u64 a0 = 0, a1 = 0;
551 int wait = 1000; 553 int wait = 1000;
@@ -558,6 +560,8 @@ void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
558 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); 560 err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait);
559 if (err) 561 if (err)
560 printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err); 562 printk(KERN_ERR "Can't del addr [%pM], %d\n", addr, err);
563
564 return err;
561} 565}
562 566
563int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr) 567int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
@@ -574,22 +578,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
574 return err; 578 return err;
575} 579}
576 580
577int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 581int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
582 void *notify_addr, dma_addr_t notify_pa, u16 intr)
578{ 583{
579 u64 a0, a1; 584 u64 a0, a1;
580 int wait = 1000; 585 int wait = 1000;
581 int r; 586 int r;
582 587
583 if (!vdev->notify) { 588 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
584 vdev->notify = pci_alloc_consistent(vdev->pdev, 589 vdev->notify = notify_addr;
585 sizeof(struct vnic_devcmd_notify), 590 vdev->notify_pa = notify_pa;
586 &vdev->notify_pa);
587 if (!vdev->notify)
588 return -ENOMEM;
589 memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
590 }
591 591
592 a0 = vdev->notify_pa; 592 a0 = (u64)notify_pa;
593 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 593 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
594 a1 += sizeof(struct vnic_devcmd_notify); 594 a1 += sizeof(struct vnic_devcmd_notify);
595 595
@@ -598,7 +598,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
598 return r; 598 return r;
599} 599}
600 600
601void vnic_dev_notify_unset(struct vnic_dev *vdev) 601int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
602{
603 void *notify_addr;
604 dma_addr_t notify_pa;
605
606 if (vdev->notify || vdev->notify_pa) {
607 printk(KERN_ERR "notify block %p still allocated",
608 vdev->notify);
609 return -EINVAL;
610 }
611
612 notify_addr = pci_alloc_consistent(vdev->pdev,
613 sizeof(struct vnic_devcmd_notify),
614 &notify_pa);
615 if (!notify_addr)
616 return -ENOMEM;
617
618 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
619}
620
621void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
602{ 622{
603 u64 a0, a1; 623 u64 a0, a1;
604 int wait = 1000; 624 int wait = 1000;
@@ -608,9 +628,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
608 a1 += sizeof(struct vnic_devcmd_notify); 628 a1 += sizeof(struct vnic_devcmd_notify);
609 629
610 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 630 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
631 vdev->notify = NULL;
632 vdev->notify_pa = 0;
611 vdev->notify_sz = 0; 633 vdev->notify_sz = 0;
612} 634}
613 635
636void vnic_dev_notify_unset(struct vnic_dev *vdev)
637{
638 if (vdev->notify) {
639 pci_free_consistent(vdev->pdev,
640 sizeof(struct vnic_devcmd_notify),
641 vdev->notify,
642 vdev->notify_pa);
643 }
644
645 vnic_dev_notify_unsetcmd(vdev);
646}
647
614static int vnic_dev_notify_ready(struct vnic_dev *vdev) 648static int vnic_dev_notify_ready(struct vnic_dev *vdev)
615{ 649{
616 u32 *words; 650 u32 *words;
@@ -652,6 +686,56 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg)
652 return r; 686 return r;
653} 687}
654 688
689int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err)
690{
691 u64 a0 = 0, a1 = 0;
692 int wait = 1000;
693 int ret;
694
695 *done = 0;
696
697 ret = vnic_dev_cmd(vdev, CMD_INIT_STATUS, &a0, &a1, wait);
698 if (ret)
699 return ret;
700
701 *done = (a0 == 0);
702
703 *err = (a0 == 0) ? a1 : 0;
704
705 return 0;
706}
707
708int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len)
709{
710 u64 a0, a1 = len;
711 int wait = 1000;
712 u64 prov_pa;
713 void *prov_buf;
714 int ret;
715
716 prov_buf = pci_alloc_consistent(vdev->pdev, len, &prov_pa);
717 if (!prov_buf)
718 return -ENOMEM;
719
720 memcpy(prov_buf, buf, len);
721
722 a0 = prov_pa;
723
724 ret = vnic_dev_cmd(vdev, CMD_INIT_PROV_INFO, &a0, &a1, wait);
725
726 pci_free_consistent(vdev->pdev, len, prov_buf, prov_pa);
727
728 return ret;
729}
730
731int vnic_dev_deinit(struct vnic_dev *vdev)
732{
733 u64 a0 = 0, a1 = 0;
734 int wait = 1000;
735
736 return vnic_dev_cmd(vdev, CMD_DEINIT, &a0, &a1, wait);
737}
738
655int vnic_dev_link_status(struct vnic_dev *vdev) 739int vnic_dev_link_status(struct vnic_dev *vdev)
656{ 740{
657 if (vdev->linkstatus) 741 if (vdev->linkstatus)
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb35a5e..caccce36957b 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -103,11 +103,14 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
103int vnic_dev_hang_notify(struct vnic_dev *vdev); 103int vnic_dev_hang_notify(struct vnic_dev *vdev);
104void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, 104void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
105 int broadcast, int promisc, int allmulti); 105 int broadcast, int promisc, int allmulti);
106void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr); 106int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
107void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 107int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
108int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 108int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
109int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); 109int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
110int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
111 void *notify_addr, dma_addr_t notify_pa, u16 intr);
110int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 112int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
113void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
111void vnic_dev_notify_unset(struct vnic_dev *vdev); 114void vnic_dev_notify_unset(struct vnic_dev *vdev);
112int vnic_dev_link_status(struct vnic_dev *vdev); 115int vnic_dev_link_status(struct vnic_dev *vdev);
113u32 vnic_dev_port_speed(struct vnic_dev *vdev); 116u32 vnic_dev_port_speed(struct vnic_dev *vdev);
@@ -121,6 +124,9 @@ int vnic_dev_disable(struct vnic_dev *vdev);
121int vnic_dev_open(struct vnic_dev *vdev, int arg); 124int vnic_dev_open(struct vnic_dev *vdev, int arg);
122int vnic_dev_open_done(struct vnic_dev *vdev, int *done); 125int vnic_dev_open_done(struct vnic_dev *vdev, int *done);
123int vnic_dev_init(struct vnic_dev *vdev, int arg); 126int vnic_dev_init(struct vnic_dev *vdev, int arg);
127int vnic_dev_init_done(struct vnic_dev *vdev, int *done, int *err);
128int vnic_dev_init_prov(struct vnic_dev *vdev, u8 *buf, u32 len);
129int vnic_dev_deinit(struct vnic_dev *vdev);
124int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); 130int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg);
125int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); 131int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done);
126void vnic_dev_set_intr_mode(struct vnic_dev *vdev, 132void vnic_dev_set_intr_mode(struct vnic_dev *vdev,
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efaf9da1..cc580cfec41d 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
168 iowrite32(0, &rq->ctrl->enable); 168 iowrite32(0, &rq->ctrl->enable);
169 169
170 /* Wait for HW to ACK disable request */ 170 /* Wait for HW to ACK disable request */
171 for (wait = 0; wait < 100; wait++) { 171 for (wait = 0; wait < 1000; wait++) {
172 if (!(ioread32(&rq->ctrl->running))) 172 if (!(ioread32(&rq->ctrl->running)))
173 return 0; 173 return 0;
174 udelay(1); 174 udelay(10);
175 } 175 }
176 176
177 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); 177 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_vic.c b/drivers/net/enic/vnic_vic.c
new file mode 100644
index 000000000000..d769772998c6
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.c
@@ -0,0 +1,73 @@
1/*
2 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/types.h>
22#include <linux/slab.h>
23
24#include "vnic_vic.h"
25
26struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type)
27{
28 struct vic_provinfo *vp = kzalloc(VIC_PROVINFO_MAX_DATA, flags);
29
30 if (!vp || !oui)
31 return NULL;
32
33 memcpy(vp->oui, oui, sizeof(vp->oui));
34 vp->type = type;
35 vp->length = htonl(sizeof(vp->num_tlvs));
36
37 return vp;
38}
39
40void vic_provinfo_free(struct vic_provinfo *vp)
41{
42 kfree(vp);
43}
44
45int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
46 void *value)
47{
48 struct vic_provinfo_tlv *tlv;
49
50 if (!vp || !value)
51 return -EINVAL;
52
53 if (ntohl(vp->length) + sizeof(*tlv) + length >
54 VIC_PROVINFO_MAX_TLV_DATA)
55 return -ENOMEM;
56
57 tlv = (struct vic_provinfo_tlv *)((u8 *)vp->tlv +
58 ntohl(vp->length) - sizeof(vp->num_tlvs));
59
60 tlv->type = htons(type);
61 tlv->length = htons(length);
62 memcpy(tlv->value, value, length);
63
64 vp->num_tlvs = htonl(ntohl(vp->num_tlvs) + 1);
65 vp->length = htonl(ntohl(vp->length) + sizeof(*tlv) + length);
66
67 return 0;
68}
69
70size_t vic_provinfo_size(struct vic_provinfo *vp)
71{
72 return vp ? ntohl(vp->length) + sizeof(*vp) - sizeof(vp->num_tlvs) : 0;
73}
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
new file mode 100644
index 000000000000..085c2a274cb1
--- /dev/null
+++ b/drivers/net/enic/vnic_vic.h
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
3 *
4 * This program is free software; you may redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; version 2 of the License.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15 * SOFTWARE.
16 *
17 */
18
19#ifndef _VNIC_VIC_H_
20#define _VNIC_VIC_H_
21
22/* Note: All integer fields in NETWORK byte order */
23
24/* Note: String field lengths include null char */
25
26#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
27#define VIC_PROVINFO_LINUX_TYPE 0x2
28
29enum vic_linux_prov_tlv_type {
30 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
31 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */
32 VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
33 VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
34 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
35};
36
37struct vic_provinfo {
38 u8 oui[3]; /* OUI of data provider */
39 u8 type; /* provider-specific type */
40 u32 length; /* length of data below */
41 u32 num_tlvs; /* number of tlvs */
42 struct vic_provinfo_tlv {
43 u16 type;
44 u16 length;
45 u8 value[0];
46 } tlv[0];
47} __attribute__ ((packed));
48
49#define VIC_PROVINFO_MAX_DATA 1385
50#define VIC_PROVINFO_MAX_TLV_DATA (VIC_PROVINFO_MAX_DATA - \
51 sizeof(struct vic_provinfo))
52
53struct vic_provinfo *vic_provinfo_alloc(gfp_t flags, u8 *oui, u8 type);
54void vic_provinfo_free(struct vic_provinfo *vp);
55int vic_provinfo_add_tlv(struct vic_provinfo *vp, u16 type, u16 length,
56 void *value);
57size_t vic_provinfo_size(struct vic_provinfo *vp);
58
59#endif /* _VNIC_VIC_H_ */
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f984357f5c..1378afbdfe67 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
161 iowrite32(0, &wq->ctrl->enable); 161 iowrite32(0, &wq->ctrl->enable);
162 162
163 /* Wait for HW to ACK disable request */ 163 /* Wait for HW to ACK disable request */
164 for (wait = 0; wait < 100; wait++) { 164 for (wait = 0; wait < 1000; wait++) {
165 if (!(ioread32(&wq->ctrl->running))) 165 if (!(ioread32(&wq->ctrl->running)))
166 return 0; 166 return 0;
167 udelay(1); 167 udelay(10);
168 } 168 }
169 169
170 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); 170 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201e829..6838dfc9ef23 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -652,7 +652,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
652 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0) 652 if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
653 break; 653 break;
654 } 654 }
655 return;
656} 655}
657 656
658 657
@@ -840,7 +839,6 @@ static void epic_restart(struct net_device *dev)
840 " interrupt %4.4x.\n", 839 " interrupt %4.4x.\n",
841 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL), 840 dev->name, (int)inl(ioaddr + COMMAND), (int)inl(ioaddr + GENCTL),
842 (int)inl(ioaddr + INTSTAT)); 841 (int)inl(ioaddr + INTSTAT));
843 return;
844} 842}
845 843
846static void check_media(struct net_device *dev) 844static void check_media(struct net_device *dev)
@@ -908,7 +906,7 @@ static void epic_tx_timeout(struct net_device *dev)
908 outl(TxQueued, dev->base_addr + COMMAND); 906 outl(TxQueued, dev->base_addr + COMMAND);
909 } 907 }
910 908
911 dev->trans_start = jiffies; 909 dev->trans_start = jiffies; /* prevent tx timeout */
912 ep->stats.tx_errors++; 910 ep->stats.tx_errors++;
913 if (!ep->tx_full) 911 if (!ep->tx_full)
914 netif_wake_queue(dev); 912 netif_wake_queue(dev);
@@ -958,7 +956,6 @@ static void epic_init_ring(struct net_device *dev)
958 (i+1)*sizeof(struct epic_tx_desc); 956 (i+1)*sizeof(struct epic_tx_desc);
959 } 957 }
960 ep->tx_ring[i-1].next = ep->tx_ring_dma; 958 ep->tx_ring[i-1].next = ep->tx_ring_dma;
961 return;
962} 959}
963 960
964static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev) 961static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -1006,7 +1003,6 @@ static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
1006 /* Trigger an immediate transmit demand. */ 1003 /* Trigger an immediate transmit demand. */
1007 outl(TxQueued, dev->base_addr + COMMAND); 1004 outl(TxQueued, dev->base_addr + COMMAND);
1008 1005
1009 dev->trans_start = jiffies;
1010 if (debug > 4) 1006 if (debug > 4)
1011 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, " 1007 printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
1012 "flag %2.2x Tx status %8.8x.\n", 1008 "flag %2.2x Tx status %8.8x.\n",
@@ -1399,12 +1395,12 @@ static void set_rx_mode(struct net_device *dev)
1399 outl(0x0004, ioaddr + RxCtrl); 1395 outl(0x0004, ioaddr + RxCtrl);
1400 return; 1396 return;
1401 } else { /* Never executed, for now. */ 1397 } else { /* Never executed, for now. */
1402 struct dev_mc_list *mclist; 1398 struct netdev_hw_addr *ha;
1403 1399
1404 memset(mc_filter, 0, sizeof(mc_filter)); 1400 memset(mc_filter, 0, sizeof(mc_filter));
1405 netdev_for_each_mc_addr(mclist, dev) { 1401 netdev_for_each_mc_addr(ha, dev) {
1406 unsigned int bit_nr = 1402 unsigned int bit_nr =
1407 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 1403 ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1408 mc_filter[bit_nr >> 3] |= (1 << bit_nr); 1404 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1409 } 1405 }
1410 } 1406 }
@@ -1414,7 +1410,6 @@ static void set_rx_mode(struct net_device *dev)
1414 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4); 1410 outw(((u16 *)mc_filter)[i], ioaddr + MC0 + i*4);
1415 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter)); 1411 memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1416 } 1412 }
1417 return;
1418} 1413}
1419 1414
1420static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) 1415static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
diff --git a/drivers/net/eql.c b/drivers/net/eql.c
index b34a2ddeef4c..dda2c7944da9 100644
--- a/drivers/net/eql.c
+++ b/drivers/net/eql.c
@@ -288,7 +288,7 @@ static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
288 return eql_s_master_cfg(dev, ifr->ifr_data); 288 return eql_s_master_cfg(dev, ifr->ifr_data);
289 default: 289 default:
290 return -EOPNOTSUPP; 290 return -EOPNOTSUPP;
291 }; 291 }
292} 292}
293 293
294/* queue->lock must be held */ 294/* queue->lock must be held */
diff --git a/drivers/net/es3210.c b/drivers/net/es3210.c
index 5569f2ffb62c..0ba5e7b90584 100644
--- a/drivers/net/es3210.c
+++ b/drivers/net/es3210.c
@@ -319,8 +319,6 @@ static void es_reset_8390(struct net_device *dev)
319 ei_status.txing = 0; 319 ei_status.txing = 0;
320 outb(0x01, ioaddr + ES_RESET_PORT); 320 outb(0x01, ioaddr + ES_RESET_PORT);
321 if (ei_debug > 1) printk("reset done\n"); 321 if (ei_debug > 1) printk("reset done\n");
322
323 return;
324} 322}
325 323
326/* 324/*
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index d4e24f08b3ba..874973f558e9 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1027,7 +1027,7 @@ static void eth16i_timeout(struct net_device *dev)
1027 inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ? 1027 inw(ioaddr + TX_STATUS_REG), (inb(ioaddr + TX_STATUS_REG) & TX_DONE) ?
1028 "IRQ conflict" : "network cable problem"); 1028 "IRQ conflict" : "network cable problem");
1029 1029
1030 dev->trans_start = jiffies; 1030 dev->trans_start = jiffies; /* prevent tx timeout */
1031 1031
1032 /* Let's dump all registers */ 1032 /* Let's dump all registers */
1033 if(eth16i_debug > 0) { 1033 if(eth16i_debug > 0) {
@@ -1047,7 +1047,7 @@ static void eth16i_timeout(struct net_device *dev)
1047 } 1047 }
1048 dev->stats.tx_errors++; 1048 dev->stats.tx_errors++;
1049 eth16i_reset(dev); 1049 eth16i_reset(dev);
1050 dev->trans_start = jiffies; 1050 dev->trans_start = jiffies; /* prevent tx timeout */
1051 outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG); 1051 outw(ETH16I_INTR_ON, ioaddr + TX_INTR_REG);
1052 netif_wake_queue(dev); 1052 netif_wake_queue(dev);
1053} 1053}
@@ -1109,7 +1109,6 @@ static netdev_tx_t eth16i_tx(struct sk_buff *skb, struct net_device *dev)
1109 outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG); 1109 outb(TX_START | lp->tx_queue, ioaddr + TRANSMIT_START_REG);
1110 lp->tx_queue = 0; 1110 lp->tx_queue = 0;
1111 lp->tx_queue_len = 0; 1111 lp->tx_queue_len = 0;
1112 dev->trans_start = jiffies;
1113 lp->tx_started = 1; 1112 lp->tx_started = 1;
1114 netif_wake_queue(dev); 1113 netif_wake_queue(dev);
1115 } 1114 }
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d92503226e..14cbde5cf68e 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -756,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
756{ 756{
757 struct ethoc *priv = netdev_priv(dev); 757 struct ethoc *priv = netdev_priv(dev);
758 u32 mode = ethoc_read(priv, MODER); 758 u32 mode = ethoc_read(priv, MODER);
759 struct dev_mc_list *mc; 759 struct netdev_hw_addr *ha;
760 u32 hash[2] = { 0, 0 }; 760 u32 hash[2] = { 0, 0 };
761 761
762 /* set loopback mode if requested */ 762 /* set loopback mode if requested */
@@ -784,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
784 hash[0] = 0xffffffff; 784 hash[0] = 0xffffffff;
785 hash[1] = 0xffffffff; 785 hash[1] = 0xffffffff;
786 } else { 786 } else {
787 netdev_for_each_mc_addr(mc, dev) { 787 netdev_for_each_mc_addr(ha, dev) {
788 u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr); 788 u32 crc = ether_crc(ETH_ALEN, ha->addr);
789 int bit = (crc >> 26) & 0x3f; 789 int bit = (crc >> 26) & 0x3f;
790 hash[bit >> 5] |= 1 << (bit & 0x1f); 790 hash[bit >> 5] |= 1 << (bit & 0x1f);
791 } 791 }
@@ -851,7 +851,6 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
851 netif_stop_queue(dev); 851 netif_stop_queue(dev);
852 } 852 }
853 853
854 dev->trans_start = jiffies;
855 spin_unlock_irq(&priv->lock); 854 spin_unlock_irq(&priv->lock);
856out: 855out:
857 dev_kfree_skb(skb); 856 dev_kfree_skb(skb);
@@ -1040,7 +1039,6 @@ static int ethoc_probe(struct platform_device *pdev)
1040 netdev->features |= 0; 1039 netdev->features |= 0;
1041 1040
1042 /* setup NAPI */ 1041 /* setup NAPI */
1043 memset(&priv->napi, 0, sizeof(priv->napi));
1044 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1042 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1045 1043
1046 spin_lock_init(&priv->rx_lock); 1044 spin_lock_init(&priv->rx_lock);
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3a9d6d..380d0614a89a 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -757,7 +757,7 @@ static void ewrk3_timeout(struct net_device *dev)
757 */ 757 */
758 ENABLE_IRQs; 758 ENABLE_IRQs;
759 759
760 dev->trans_start = jiffies; 760 dev->trans_start = jiffies; /* prevent tx timeout */
761 netif_wake_queue(dev); 761 netif_wake_queue(dev);
762 } 762 }
763} 763}
@@ -862,7 +862,6 @@ static netdev_tx_t ewrk3_queue_pkt(struct sk_buff *skb, struct net_device *dev)
862 spin_unlock_irq (&lp->hw_lock); 862 spin_unlock_irq (&lp->hw_lock);
863 863
864 dev->stats.tx_bytes += skb->len; 864 dev->stats.tx_bytes += skb->len;
865 dev->trans_start = jiffies;
866 dev_kfree_skb (skb); 865 dev_kfree_skb (skb);
867 866
868 /* Check for free resources: stop Tx queue if there are none */ 867 /* Check for free resources: stop Tx queue if there are none */
@@ -1169,7 +1168,7 @@ static void set_multicast_list(struct net_device *dev)
1169static void SetMulticastFilter(struct net_device *dev) 1168static void SetMulticastFilter(struct net_device *dev)
1170{ 1169{
1171 struct ewrk3_private *lp = netdev_priv(dev); 1170 struct ewrk3_private *lp = netdev_priv(dev);
1172 struct dev_mc_list *dmi; 1171 struct netdev_hw_addr *ha;
1173 u_long iobase = dev->base_addr; 1172 u_long iobase = dev->base_addr;
1174 int i; 1173 int i;
1175 char *addrs, bit, byte; 1174 char *addrs, bit, byte;
@@ -1213,8 +1212,8 @@ static void SetMulticastFilter(struct net_device *dev)
1213 } 1212 }
1214 1213
1215 /* Update table */ 1214 /* Update table */
1216 netdev_for_each_mc_addr(dmi, dev) { 1215 netdev_for_each_mc_addr(ha, dev) {
1217 addrs = dmi->dmi_addr; 1216 addrs = ha->addr;
1218 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1217 if ((*addrs & 0x01) == 1) { /* multicast address? */
1219 crc = ether_crc_le(ETH_ALEN, addrs); 1218 crc = ether_crc_le(ETH_ALEN, addrs);
1220 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ 1219 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1370,8 +1369,6 @@ static void __init EthwrkSignature(char *name, char *eeprom_image)
1370 name[EWRK3_STRLEN] = '\0'; 1369 name[EWRK3_STRLEN] = '\0';
1371 } else 1370 } else
1372 name[0] = '\0'; 1371 name[0] = '\0';
1373
1374 return;
1375} 1372}
1376 1373
1377/* 1374/*
@@ -1776,8 +1773,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1776 break; 1773 break;
1777 case EWRK3_SET_MCA: /* Set a multicast address */ 1774 case EWRK3_SET_MCA: /* Set a multicast address */
1778 if (capable(CAP_NET_ADMIN)) { 1775 if (capable(CAP_NET_ADMIN)) {
1779 if (ioc->len > 1024) 1776 if (ioc->len > HASH_TABLE_LEN) {
1780 {
1781 status = -EINVAL; 1777 status = -EINVAL;
1782 break; 1778 break;
1783 } 1779 }
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae5197f01..15f4f8d3d46d 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1233,7 +1233,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
1233 1233
1234 spin_unlock_irqrestore(&np->lock, flags); 1234 spin_unlock_irqrestore(&np->lock, flags);
1235 1235
1236 dev->trans_start = jiffies; 1236 dev->trans_start = jiffies; /* prevent tx timeout */
1237 np->stats.tx_errors++; 1237 np->stats.tx_errors++;
1238 netif_wake_queue(dev); /* or .._start_.. ?? */ 1238 netif_wake_queue(dev); /* or .._start_.. ?? */
1239} 1239}
@@ -1374,7 +1374,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1374 netif_stop_queue(dev); 1374 netif_stop_queue(dev);
1375 ++np->really_tx_count; 1375 ++np->really_tx_count;
1376 iowrite32(0, np->mem + TXPDR); 1376 iowrite32(0, np->mem + TXPDR);
1377 dev->trans_start = jiffies;
1378 1377
1379 spin_unlock_irqrestore(&np->lock, flags); 1378 spin_unlock_irqrestore(&np->lock, flags);
1380 return NETDEV_TX_OK; 1379 return NETDEV_TX_OK;
@@ -1791,12 +1790,12 @@ static void __set_rx_mode(struct net_device *dev)
1791 memset(mc_filter, 0xff, sizeof(mc_filter)); 1790 memset(mc_filter, 0xff, sizeof(mc_filter));
1792 rx_mode = CR_W_AB | CR_W_AM; 1791 rx_mode = CR_W_AB | CR_W_AM;
1793 } else { 1792 } else {
1794 struct dev_mc_list *mclist; 1793 struct netdev_hw_addr *ha;
1795 1794
1796 memset(mc_filter, 0, sizeof(mc_filter)); 1795 memset(mc_filter, 0, sizeof(mc_filter));
1797 netdev_for_each_mc_addr(mclist, dev) { 1796 netdev_for_each_mc_addr(ha, dev) {
1798 unsigned int bit; 1797 unsigned int bit;
1799 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1798 bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1800 mc_filter[bit >> 5] |= (1 << bit); 1799 mc_filter[bit >> 5] |= (1 << bit);
1801 } 1800 }
1802 rx_mode = CR_W_AB | CR_W_AM; 1801 rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9f98c1c4a344..42d9ac9ba395 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,7 @@
40#include <linux/irq.h> 40#include <linux/irq.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/phy.h>
43 44
44#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
45 46
@@ -61,7 +62,6 @@
61 * Define the fixed address of the FEC hardware. 62 * Define the fixed address of the FEC hardware.
62 */ 63 */
63#if defined(CONFIG_M5272) 64#if defined(CONFIG_M5272)
64#define HAVE_mii_link_interrupt
65 65
66static unsigned char fec_mac_default[] = { 66static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +86,6 @@ static unsigned char fec_mac_default[] = {
86#endif 86#endif
87#endif /* CONFIG_M5272 */ 87#endif /* CONFIG_M5272 */
88 88
89/* Forward declarations of some structures to support different PHYs */
90
91typedef struct {
92 uint mii_data;
93 void (*funct)(uint mii_reg, struct net_device *dev);
94} phy_cmd_t;
95
96typedef struct {
97 uint id;
98 char *name;
99
100 const phy_cmd_t *config;
101 const phy_cmd_t *startup;
102 const phy_cmd_t *ack_int;
103 const phy_cmd_t *shutdown;
104} phy_info_t;
105
106/* The number of Tx and Rx buffers. These are allocated from the page 89/* The number of Tx and Rx buffers. These are allocated from the page
107 * pool. The code may assume these are power of two, so it it best 90 * pool. The code may assume these are power of two, so it it best
108 * to keep them that size. 91 * to keep them that size.
@@ -189,29 +172,21 @@ struct fec_enet_private {
189 uint tx_full; 172 uint tx_full;
190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 173 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
191 spinlock_t hw_lock; 174 spinlock_t hw_lock;
192 /* hold while accessing the mii_list_t() elements */
193 spinlock_t mii_lock;
194
195 uint phy_id;
196 uint phy_id_done;
197 uint phy_status;
198 uint phy_speed;
199 phy_info_t const *phy;
200 struct work_struct phy_task;
201 175
202 uint sequence_done; 176 struct platform_device *pdev;
203 uint mii_phy_task_queued;
204 177
205 uint phy_addr; 178 int opened;
206 179
180 /* Phylib and MDIO interface */
181 struct mii_bus *mii_bus;
182 struct phy_device *phy_dev;
183 int mii_timeout;
184 uint phy_speed;
207 int index; 185 int index;
208 int opened;
209 int link; 186 int link;
210 int old_link;
211 int full_duplex; 187 int full_duplex;
212}; 188};
213 189
214static void fec_enet_mii(struct net_device *dev);
215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 190static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
216static void fec_enet_tx(struct net_device *dev); 191static void fec_enet_tx(struct net_device *dev);
217static void fec_enet_rx(struct net_device *dev); 192static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +194,20 @@ static int fec_enet_close(struct net_device *dev);
219static void fec_restart(struct net_device *dev, int duplex); 194static void fec_restart(struct net_device *dev, int duplex);
220static void fec_stop(struct net_device *dev); 195static void fec_stop(struct net_device *dev);
221 196
197/* FEC MII MMFR bits definition */
198#define FEC_MMFR_ST (1 << 30)
199#define FEC_MMFR_OP_READ (2 << 28)
200#define FEC_MMFR_OP_WRITE (1 << 28)
201#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
202#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
203#define FEC_MMFR_TA (2 << 16)
204#define FEC_MMFR_DATA(v) (v & 0xffff)
222 205
223/* MII processing. We keep this as simple as possible. Requests are 206#define FEC_MII_TIMEOUT 10000
224 * placed on the list (if there is room). When the request is finished
225 * by the MII, an optional function may be called.
226 */
227typedef struct mii_list {
228 uint mii_regval;
229 void (*mii_func)(uint val, struct net_device *dev);
230 struct mii_list *mii_next;
231} mii_list_t;
232
233#define NMII 20
234static mii_list_t mii_cmds[NMII];
235static mii_list_t *mii_free;
236static mii_list_t *mii_head;
237static mii_list_t *mii_tail;
238
239static int mii_queue(struct net_device *dev, int request,
240 void (*func)(uint, struct net_device *));
241
242/* Make MII read/write commands for the FEC */
243#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
244#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
245 (VAL & 0xffff))
246#define mk_mii_end 0
247 207
248/* Transmitter timeout */ 208/* Transmitter timeout */
249#define TX_TIMEOUT (2 * HZ) 209#define TX_TIMEOUT (2 * HZ)
250 210
251/* Register definitions for the PHY */
252
253#define MII_REG_CR 0 /* Control Register */
254#define MII_REG_SR 1 /* Status Register */
255#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
256#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
257#define MII_REG_ANAR 4 /* A-N Advertisement Register */
258#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
259#define MII_REG_ANER 6 /* A-N Expansion Register */
260#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
261#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
262
263/* values for phy_status */
264
265#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
266#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
267#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
268#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
269#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
270#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
271#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
272
273#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
274#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
275#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
276#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
277#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
278#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
279#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
280#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
281
282
283static int 211static int
284fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 212fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
285{ 213{
@@ -347,8 +275,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
347 | BD_ENET_TX_LAST | BD_ENET_TX_TC); 275 | BD_ENET_TX_LAST | BD_ENET_TX_TC);
348 bdp->cbd_sc = status; 276 bdp->cbd_sc = status;
349 277
350 dev->trans_start = jiffies;
351
352 /* Trigger transmission start */ 278 /* Trigger transmission start */
353 writel(0, fep->hwp + FEC_X_DES_ACTIVE); 279 writel(0, fep->hwp + FEC_X_DES_ACTIVE);
354 280
@@ -406,12 +332,6 @@ fec_enet_interrupt(int irq, void * dev_id)
406 ret = IRQ_HANDLED; 332 ret = IRQ_HANDLED;
407 fec_enet_tx(dev); 333 fec_enet_tx(dev);
408 } 334 }
409
410 if (int_events & FEC_ENET_MII) {
411 ret = IRQ_HANDLED;
412 fec_enet_mii(dev);
413 }
414
415 } while (int_events); 335 } while (int_events);
416 336
417 return ret; 337 return ret;
@@ -607,827 +527,311 @@ rx_processing_done:
607 spin_unlock(&fep->hw_lock); 527 spin_unlock(&fep->hw_lock);
608} 528}
609 529
610/* called from interrupt context */ 530/* ------------------------------------------------------------------------- */
611static void 531#ifdef CONFIG_M5272
612fec_enet_mii(struct net_device *dev) 532static void __inline__ fec_get_mac(struct net_device *dev)
613{
614 struct fec_enet_private *fep;
615 mii_list_t *mip;
616
617 fep = netdev_priv(dev);
618 spin_lock(&fep->mii_lock);
619
620 if ((mip = mii_head) == NULL) {
621 printk("MII and no head!\n");
622 goto unlock;
623 }
624
625 if (mip->mii_func != NULL)
626 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
627
628 mii_head = mip->mii_next;
629 mip->mii_next = mii_free;
630 mii_free = mip;
631
632 if ((mip = mii_head) != NULL)
633 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
634
635unlock:
636 spin_unlock(&fep->mii_lock);
637}
638
639static int
640mii_queue_unlocked(struct net_device *dev, int regval,
641 void (*func)(uint, struct net_device *))
642{ 533{
643 struct fec_enet_private *fep; 534 struct fec_enet_private *fep = netdev_priv(dev);
644 mii_list_t *mip; 535 unsigned char *iap, tmpaddr[ETH_ALEN];
645 int retval;
646
647 /* Add PHY address to register command */
648 fep = netdev_priv(dev);
649 536
650 regval |= fep->phy_addr << 23; 537 if (FEC_FLASHMAC) {
651 retval = 0; 538 /*
652 539 * Get MAC address from FLASH.
653 if ((mip = mii_free) != NULL) { 540 * If it is all 1's or 0's, use the default.
654 mii_free = mip->mii_next; 541 */
655 mip->mii_regval = regval; 542 iap = (unsigned char *)FEC_FLASHMAC;
656 mip->mii_func = func; 543 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
657 mip->mii_next = NULL; 544 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
658 if (mii_head) { 545 iap = fec_mac_default;
659 mii_tail->mii_next = mip; 546 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
660 mii_tail = mip; 547 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
661 } else { 548 iap = fec_mac_default;
662 mii_head = mii_tail = mip;
663 writel(regval, fep->hwp + FEC_MII_DATA);
664 }
665 } else { 549 } else {
666 retval = 1; 550 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
551 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
552 iap = &tmpaddr[0];
667 } 553 }
668 554
669 return retval; 555 memcpy(dev->dev_addr, iap, ETH_ALEN);
670}
671
672static int
673mii_queue(struct net_device *dev, int regval,
674 void (*func)(uint, struct net_device *))
675{
676 struct fec_enet_private *fep;
677 unsigned long flags;
678 int retval;
679 fep = netdev_priv(dev);
680 spin_lock_irqsave(&fep->mii_lock, flags);
681 retval = mii_queue_unlocked(dev, regval, func);
682 spin_unlock_irqrestore(&fep->mii_lock, flags);
683 return retval;
684}
685
686static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
687{
688 if(!c)
689 return;
690 556
691 for (; c->mii_data != mk_mii_end; c++) 557 /* Adjust MAC if using default MAC address */
692 mii_queue(dev, c->mii_data, c->funct); 558 if (iap == fec_mac_default)
559 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
693} 560}
561#endif
694 562
695static void mii_parse_sr(uint mii_reg, struct net_device *dev) 563/* ------------------------------------------------------------------------- */
696{
697 struct fec_enet_private *fep = netdev_priv(dev);
698 volatile uint *s = &(fep->phy_status);
699 uint status;
700
701 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
702
703 if (mii_reg & 0x0004)
704 status |= PHY_STAT_LINK;
705 if (mii_reg & 0x0010)
706 status |= PHY_STAT_FAULT;
707 if (mii_reg & 0x0020)
708 status |= PHY_STAT_ANC;
709 *s = status;
710}
711 564
712static void mii_parse_cr(uint mii_reg, struct net_device *dev) 565/*
566 * Phy section
567 */
568static void fec_enet_adjust_link(struct net_device *dev)
713{ 569{
714 struct fec_enet_private *fep = netdev_priv(dev); 570 struct fec_enet_private *fep = netdev_priv(dev);
715 volatile uint *s = &(fep->phy_status); 571 struct phy_device *phy_dev = fep->phy_dev;
716 uint status; 572 unsigned long flags;
717
718 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
719
720 if (mii_reg & 0x1000)
721 status |= PHY_CONF_ANE;
722 if (mii_reg & 0x4000)
723 status |= PHY_CONF_LOOP;
724 *s = status;
725}
726 573
727static void mii_parse_anar(uint mii_reg, struct net_device *dev) 574 int status_change = 0;
728{
729 struct fec_enet_private *fep = netdev_priv(dev);
730 volatile uint *s = &(fep->phy_status);
731 uint status;
732
733 status = *s & ~(PHY_CONF_SPMASK);
734
735 if (mii_reg & 0x0020)
736 status |= PHY_CONF_10HDX;
737 if (mii_reg & 0x0040)
738 status |= PHY_CONF_10FDX;
739 if (mii_reg & 0x0080)
740 status |= PHY_CONF_100HDX;
741 if (mii_reg & 0x00100)
742 status |= PHY_CONF_100FDX;
743 *s = status;
744}
745 575
746/* ------------------------------------------------------------------------- */ 576 spin_lock_irqsave(&fep->hw_lock, flags);
747/* The Level one LXT970 is used by many boards */
748 577
749#define MII_LXT970_MIRROR 16 /* Mirror register */ 578 /* Prevent a state halted on mii error */
750#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 579 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
751#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 580 phy_dev->state = PHY_RESUMING;
752#define MII_LXT970_CONFIG 19 /* Configuration Register */ 581 goto spin_unlock;
753#define MII_LXT970_CSR 20 /* Chip Status Register */ 582 }
754 583
755static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 584 /* Duplex link change */
756{ 585 if (phy_dev->link) {
757 struct fec_enet_private *fep = netdev_priv(dev); 586 if (fep->full_duplex != phy_dev->duplex) {
758 volatile uint *s = &(fep->phy_status); 587 fec_restart(dev, phy_dev->duplex);
759 uint status; 588 status_change = 1;
589 }
590 }
760 591
761 status = *s & ~(PHY_STAT_SPMASK); 592 /* Link on or off change */
762 if (mii_reg & 0x0800) { 593 if (phy_dev->link != fep->link) {
763 if (mii_reg & 0x1000) 594 fep->link = phy_dev->link;
764 status |= PHY_STAT_100FDX; 595 if (phy_dev->link)
765 else 596 fec_restart(dev, phy_dev->duplex);
766 status |= PHY_STAT_100HDX;
767 } else {
768 if (mii_reg & 0x1000)
769 status |= PHY_STAT_10FDX;
770 else 597 else
771 status |= PHY_STAT_10HDX; 598 fec_stop(dev);
599 status_change = 1;
772 } 600 }
773 *s = status;
774}
775 601
776static phy_cmd_t const phy_cmd_lxt970_config[] = { 602spin_unlock:
777 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 603 spin_unlock_irqrestore(&fep->hw_lock, flags);
778 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
779 { mk_mii_end, }
780 };
781static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
782 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
783 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
784 { mk_mii_end, }
785 };
786static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
787 /* read SR and ISR to acknowledge */
788 { mk_mii_read(MII_REG_SR), mii_parse_sr },
789 { mk_mii_read(MII_LXT970_ISR), NULL },
790
791 /* find out the current status */
792 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
793 { mk_mii_end, }
794 };
795static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
796 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
797 { mk_mii_end, }
798 };
799static phy_info_t const phy_info_lxt970 = {
800 .id = 0x07810000,
801 .name = "LXT970",
802 .config = phy_cmd_lxt970_config,
803 .startup = phy_cmd_lxt970_startup,
804 .ack_int = phy_cmd_lxt970_ack_int,
805 .shutdown = phy_cmd_lxt970_shutdown
806};
807
808/* ------------------------------------------------------------------------- */
809/* The Level one LXT971 is used on some of my custom boards */
810
811/* register definitions for the 971 */
812 604
813#define MII_LXT971_PCR 16 /* Port Control Register */ 605 if (status_change)
814#define MII_LXT971_SR2 17 /* Status Register 2 */ 606 phy_print_status(phy_dev);
815#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 607}
816#define MII_LXT971_ISR 19 /* Interrupt Status Register */
817#define MII_LXT971_LCR 20 /* LED Control Register */
818#define MII_LXT971_TCR 30 /* Transmit Control Register */
819 608
820/* 609/*
821 * I had some nice ideas of running the MDIO faster... 610 * NOTE: a MII transaction is during around 25 us, so polling it...
822 * The 971 should support 8MHz and I tried it, but things acted really
823 * weird, so 2.5 MHz ought to be enough for anyone...
824 */ 611 */
825 612static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
826static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
827{ 613{
828 struct fec_enet_private *fep = netdev_priv(dev); 614 struct fec_enet_private *fep = bus->priv;
829 volatile uint *s = &(fep->phy_status); 615 int timeout = FEC_MII_TIMEOUT;
830 uint status;
831 616
832 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 617 fep->mii_timeout = 0;
833 618
834 if (mii_reg & 0x0400) { 619 /* clear MII end of transfer bit*/
835 fep->link = 1; 620 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
836 status |= PHY_STAT_LINK; 621
837 } else { 622 /* start a read op */
838 fep->link = 0; 623 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
839 } 624 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
840 if (mii_reg & 0x0080) 625 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
841 status |= PHY_STAT_ANC; 626
842 if (mii_reg & 0x4000) { 627 /* wait for end of transfer */
843 if (mii_reg & 0x0200) 628 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
844 status |= PHY_STAT_100FDX; 629 cpu_relax();
845 else 630 if (timeout-- < 0) {
846 status |= PHY_STAT_100HDX; 631 fep->mii_timeout = 1;
847 } else { 632 printk(KERN_ERR "FEC: MDIO read timeout\n");
848 if (mii_reg & 0x0200) 633 return -ETIMEDOUT;
849 status |= PHY_STAT_10FDX; 634 }
850 else
851 status |= PHY_STAT_10HDX;
852 } 635 }
853 if (mii_reg & 0x0008)
854 status |= PHY_STAT_FAULT;
855 636
856 *s = status; 637 /* return value */
638 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
857} 639}
858 640
859static phy_cmd_t const phy_cmd_lxt971_config[] = { 641static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
860 /* limit to 10MBit because my prototype board 642 u16 value)
861 * doesn't work with 100. */
862 { mk_mii_read(MII_REG_CR), mii_parse_cr },
863 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
864 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
865 { mk_mii_end, }
866 };
867static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
868 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
869 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
870 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
871 /* Somehow does the 971 tell me that the link is down
872 * the first read after power-up.
873 * read here to get a valid value in ack_int */
874 { mk_mii_read(MII_REG_SR), mii_parse_sr },
875 { mk_mii_end, }
876 };
877static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
878 /* acknowledge the int before reading status ! */
879 { mk_mii_read(MII_LXT971_ISR), NULL },
880 /* find out the current status */
881 { mk_mii_read(MII_REG_SR), mii_parse_sr },
882 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
883 { mk_mii_end, }
884 };
885static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
886 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
887 { mk_mii_end, }
888 };
889static phy_info_t const phy_info_lxt971 = {
890 .id = 0x0001378e,
891 .name = "LXT971",
892 .config = phy_cmd_lxt971_config,
893 .startup = phy_cmd_lxt971_startup,
894 .ack_int = phy_cmd_lxt971_ack_int,
895 .shutdown = phy_cmd_lxt971_shutdown
896};
897
898/* ------------------------------------------------------------------------- */
899/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
900
901/* register definitions */
902
903#define MII_QS6612_MCR 17 /* Mode Control Register */
904#define MII_QS6612_FTR 27 /* Factory Test Register */
905#define MII_QS6612_MCO 28 /* Misc. Control Register */
906#define MII_QS6612_ISR 29 /* Interrupt Source Register */
907#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
908#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
909
910static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
911{ 643{
912 struct fec_enet_private *fep = netdev_priv(dev); 644 struct fec_enet_private *fep = bus->priv;
913 volatile uint *s = &(fep->phy_status); 645 int timeout = FEC_MII_TIMEOUT;
914 uint status;
915
916 status = *s & ~(PHY_STAT_SPMASK);
917 646
918 switch((mii_reg >> 2) & 7) { 647 fep->mii_timeout = 0;
919 case 1: status |= PHY_STAT_10HDX; break;
920 case 2: status |= PHY_STAT_100HDX; break;
921 case 5: status |= PHY_STAT_10FDX; break;
922 case 6: status |= PHY_STAT_100FDX; break;
923}
924
925 *s = status;
926}
927 648
928static phy_cmd_t const phy_cmd_qs6612_config[] = { 649 /* clear MII end of transfer bit*/
929 /* The PHY powers up isolated on the RPX, 650 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
930 * so send a command to allow operation.
931 */
932 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
933
934 /* parse cr and anar to get some info */
935 { mk_mii_read(MII_REG_CR), mii_parse_cr },
936 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
937 { mk_mii_end, }
938 };
939static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
940 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
941 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
942 { mk_mii_end, }
943 };
944static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
945 /* we need to read ISR, SR and ANER to acknowledge */
946 { mk_mii_read(MII_QS6612_ISR), NULL },
947 { mk_mii_read(MII_REG_SR), mii_parse_sr },
948 { mk_mii_read(MII_REG_ANER), NULL },
949
950 /* read pcr to get info */
951 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
952 { mk_mii_end, }
953 };
954static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
955 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
956 { mk_mii_end, }
957 };
958static phy_info_t const phy_info_qs6612 = {
959 .id = 0x00181440,
960 .name = "QS6612",
961 .config = phy_cmd_qs6612_config,
962 .startup = phy_cmd_qs6612_startup,
963 .ack_int = phy_cmd_qs6612_ack_int,
964 .shutdown = phy_cmd_qs6612_shutdown
965};
966
967/* ------------------------------------------------------------------------- */
968/* AMD AM79C874 phy */
969 651
970/* register definitions for the 874 */ 652 /* start a read op */
653 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
654 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
655 FEC_MMFR_TA | FEC_MMFR_DATA(value),
656 fep->hwp + FEC_MII_DATA);
657
658 /* wait for end of transfer */
659 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
660 cpu_relax();
661 if (timeout-- < 0) {
662 fep->mii_timeout = 1;
663 printk(KERN_ERR "FEC: MDIO write timeout\n");
664 return -ETIMEDOUT;
665 }
666 }
971 667
972#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 668 return 0;
973#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 669}
974#define MII_AM79C874_DR 18 /* Diagnostic Register */
975#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
976#define MII_AM79C874_MCR 21 /* ModeControl Register */
977#define MII_AM79C874_DC 23 /* Disconnect Counter */
978#define MII_AM79C874_REC 24 /* Recieve Error Counter */
979 670
980static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 671static int fec_enet_mdio_reset(struct mii_bus *bus)
981{ 672{
982 struct fec_enet_private *fep = netdev_priv(dev); 673 return 0;
983 volatile uint *s = &(fep->phy_status);
984 uint status;
985
986 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
987
988 if (mii_reg & 0x0080)
989 status |= PHY_STAT_ANC;
990 if (mii_reg & 0x0400)
991 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
992 else
993 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
994
995 *s = status;
996} 674}
997 675
998static phy_cmd_t const phy_cmd_am79c874_config[] = { 676static int fec_enet_mii_probe(struct net_device *dev)
999 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1000 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1001 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1002 { mk_mii_end, }
1003 };
1004static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
1005 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
1006 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1007 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1008 { mk_mii_end, }
1009 };
1010static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
1011 /* find out the current status */
1012 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1013 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1014 /* we only need to read ISR to acknowledge */
1015 { mk_mii_read(MII_AM79C874_ICSR), NULL },
1016 { mk_mii_end, }
1017 };
1018static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
1019 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1020 { mk_mii_end, }
1021 };
1022static phy_info_t const phy_info_am79c874 = {
1023 .id = 0x00022561,
1024 .name = "AM79C874",
1025 .config = phy_cmd_am79c874_config,
1026 .startup = phy_cmd_am79c874_startup,
1027 .ack_int = phy_cmd_am79c874_ack_int,
1028 .shutdown = phy_cmd_am79c874_shutdown
1029};
1030
1031
1032/* ------------------------------------------------------------------------- */
1033/* Kendin KS8721BL phy */
1034
1035/* register definitions for the 8721 */
1036
1037#define MII_KS8721BL_RXERCR 21
1038#define MII_KS8721BL_ICSR 27
1039#define MII_KS8721BL_PHYCR 31
1040
1041static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
1042 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1043 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1044 { mk_mii_end, }
1045 };
1046static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
1047 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1048 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1049 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1050 { mk_mii_end, }
1051 };
1052static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
1053 /* find out the current status */
1054 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1055 /* we only need to read ISR to acknowledge */
1056 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1057 { mk_mii_end, }
1058 };
1059static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
1060 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1061 { mk_mii_end, }
1062 };
1063static phy_info_t const phy_info_ks8721bl = {
1064 .id = 0x00022161,
1065 .name = "KS8721BL",
1066 .config = phy_cmd_ks8721bl_config,
1067 .startup = phy_cmd_ks8721bl_startup,
1068 .ack_int = phy_cmd_ks8721bl_ack_int,
1069 .shutdown = phy_cmd_ks8721bl_shutdown
1070};
1071
1072/* ------------------------------------------------------------------------- */
1073/* register definitions for the DP83848 */
1074
1075#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1076
1077static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1078{ 677{
1079 struct fec_enet_private *fep = netdev_priv(dev); 678 struct fec_enet_private *fep = netdev_priv(dev);
1080 volatile uint *s = &(fep->phy_status); 679 struct phy_device *phy_dev = NULL;
1081 680 int phy_addr;
1082 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1083
1084 /* Link up */
1085 if (mii_reg & 0x0001) {
1086 fep->link = 1;
1087 *s |= PHY_STAT_LINK;
1088 } else
1089 fep->link = 0;
1090 /* Status of link */
1091 if (mii_reg & 0x0010) /* Autonegotioation complete */
1092 *s |= PHY_STAT_ANC;
1093 if (mii_reg & 0x0002) { /* 10MBps? */
1094 if (mii_reg & 0x0004) /* Full Duplex? */
1095 *s |= PHY_STAT_10FDX;
1096 else
1097 *s |= PHY_STAT_10HDX;
1098 } else { /* 100 Mbps? */
1099 if (mii_reg & 0x0004) /* Full Duplex? */
1100 *s |= PHY_STAT_100FDX;
1101 else
1102 *s |= PHY_STAT_100HDX;
1103 }
1104 if (mii_reg & 0x0008)
1105 *s |= PHY_STAT_FAULT;
1106}
1107 681
1108static phy_info_t phy_info_dp83848= { 682 /* find the first phy */
1109 0x020005c9, 683 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
1110 "DP83848", 684 if (fep->mii_bus->phy_map[phy_addr]) {
1111 685 phy_dev = fep->mii_bus->phy_map[phy_addr];
1112 (const phy_cmd_t []) { /* config */ 686 break;
1113 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 687 }
1114 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 688 }
1115 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
1116 { mk_mii_end, }
1117 },
1118 (const phy_cmd_t []) { /* startup - enable interrupts */
1119 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1120 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1121 { mk_mii_end, }
1122 },
1123 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1124 { mk_mii_end, }
1125 },
1126 (const phy_cmd_t []) { /* shutdown */
1127 { mk_mii_end, }
1128 },
1129};
1130 689
1131static phy_info_t phy_info_lan8700 = { 690 if (!phy_dev) {
1132 0x0007C0C, 691 printk(KERN_ERR "%s: no PHY found\n", dev->name);
1133 "LAN8700", 692 return -ENODEV;
1134 (const phy_cmd_t []) { /* config */ 693 }
1135 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1136 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1137 { mk_mii_end, }
1138 },
1139 (const phy_cmd_t []) { /* startup */
1140 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1141 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1142 { mk_mii_end, }
1143 },
1144 (const phy_cmd_t []) { /* act_int */
1145 { mk_mii_end, }
1146 },
1147 (const phy_cmd_t []) { /* shutdown */
1148 { mk_mii_end, }
1149 },
1150};
1151/* ------------------------------------------------------------------------- */
1152 694
1153static phy_info_t const * const phy_info[] = { 695 /* attach the mac to the phy */
1154 &phy_info_lxt970, 696 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
1155 &phy_info_lxt971, 697 &fec_enet_adjust_link, 0,
1156 &phy_info_qs6612, 698 PHY_INTERFACE_MODE_MII);
1157 &phy_info_am79c874, 699 if (IS_ERR(phy_dev)) {
1158 &phy_info_ks8721bl, 700 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1159 &phy_info_dp83848, 701 return PTR_ERR(phy_dev);
1160 &phy_info_lan8700, 702 }
1161 NULL
1162};
1163 703
1164/* ------------------------------------------------------------------------- */ 704 /* mask with MAC supported features */
1165#ifdef HAVE_mii_link_interrupt 705 phy_dev->supported &= PHY_BASIC_FEATURES;
1166static irqreturn_t 706 phy_dev->advertising = phy_dev->supported;
1167mii_link_interrupt(int irq, void * dev_id);
1168 707
1169/* 708 fep->phy_dev = phy_dev;
1170 * This is specific to the MII interrupt setup of the M5272EVB. 709 fep->link = 0;
1171 */ 710 fep->full_duplex = 0;
1172static void __inline__ fec_request_mii_intr(struct net_device *dev)
1173{
1174 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1175 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1176}
1177 711
1178static void __inline__ fec_disable_phy_intr(struct net_device *dev) 712 return 0;
1179{
1180 free_irq(66, dev);
1181} 713}
1182#endif
1183 714
1184#ifdef CONFIG_M5272 715static int fec_enet_mii_init(struct platform_device *pdev)
1185static void __inline__ fec_get_mac(struct net_device *dev)
1186{ 716{
717 struct net_device *dev = platform_get_drvdata(pdev);
1187 struct fec_enet_private *fep = netdev_priv(dev); 718 struct fec_enet_private *fep = netdev_priv(dev);
1188 unsigned char *iap, tmpaddr[ETH_ALEN]; 719 int err = -ENXIO, i;
1189 720
1190 if (FEC_FLASHMAC) { 721 fep->mii_timeout = 0;
1191 /*
1192 * Get MAC address from FLASH.
1193 * If it is all 1's or 0's, use the default.
1194 */
1195 iap = (unsigned char *)FEC_FLASHMAC;
1196 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1197 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1198 iap = fec_mac_default;
1199 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1200 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1201 iap = fec_mac_default;
1202 } else {
1203 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1204 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1205 iap = &tmpaddr[0];
1206 }
1207 722
1208 memcpy(dev->dev_addr, iap, ETH_ALEN); 723 /*
1209 724 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1210 /* Adjust MAC if using default MAC address */ 725 */
1211 if (iap == fec_mac_default) 726 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
1212 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 727 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1213}
1214#endif
1215
1216/* ------------------------------------------------------------------------- */
1217
1218static void mii_display_status(struct net_device *dev)
1219{
1220 struct fec_enet_private *fep = netdev_priv(dev);
1221 volatile uint *s = &(fep->phy_status);
1222 728
1223 if (!fep->link && !fep->old_link) { 729 fep->mii_bus = mdiobus_alloc();
1224 /* Link is still down - don't print anything */ 730 if (fep->mii_bus == NULL) {
1225 return; 731 err = -ENOMEM;
732 goto err_out;
1226 } 733 }
1227 734
1228 printk("%s: status: ", dev->name); 735 fep->mii_bus->name = "fec_enet_mii_bus";
1229 736 fep->mii_bus->read = fec_enet_mdio_read;
1230 if (!fep->link) { 737 fep->mii_bus->write = fec_enet_mdio_write;
1231 printk("link down"); 738 fep->mii_bus->reset = fec_enet_mdio_reset;
1232 } else { 739 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
1233 printk("link up"); 740 fep->mii_bus->priv = fep;
1234 741 fep->mii_bus->parent = &pdev->dev;
1235 switch(*s & PHY_STAT_SPMASK) { 742
1236 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 743 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1237 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 744 if (!fep->mii_bus->irq) {
1238 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 745 err = -ENOMEM;
1239 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 746 goto err_out_free_mdiobus;
1240 default:
1241 printk(", Unknown speed/duplex");
1242 }
1243
1244 if (*s & PHY_STAT_ANC)
1245 printk(", auto-negotiation complete");
1246 } 747 }
1247 748
1248 if (*s & PHY_STAT_FAULT) 749 for (i = 0; i < PHY_MAX_ADDR; i++)
1249 printk(", remote fault"); 750 fep->mii_bus->irq[i] = PHY_POLL;
1250
1251 printk(".\n");
1252}
1253
1254static void mii_display_config(struct work_struct *work)
1255{
1256 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1257 struct net_device *dev = fep->netdev;
1258 uint status = fep->phy_status;
1259 751
1260 /* 752 platform_set_drvdata(dev, fep->mii_bus);
1261 ** When we get here, phy_task is already removed from
1262 ** the workqueue. It is thus safe to allow to reuse it.
1263 */
1264 fep->mii_phy_task_queued = 0;
1265 printk("%s: config: auto-negotiation ", dev->name);
1266
1267 if (status & PHY_CONF_ANE)
1268 printk("on");
1269 else
1270 printk("off");
1271 753
1272 if (status & PHY_CONF_100FDX) 754 if (mdiobus_register(fep->mii_bus))
1273 printk(", 100FDX"); 755 goto err_out_free_mdio_irq;
1274 if (status & PHY_CONF_100HDX)
1275 printk(", 100HDX");
1276 if (status & PHY_CONF_10FDX)
1277 printk(", 10FDX");
1278 if (status & PHY_CONF_10HDX)
1279 printk(", 10HDX");
1280 if (!(status & PHY_CONF_SPMASK))
1281 printk(", No speed/duplex selected?");
1282 756
1283 if (status & PHY_CONF_LOOP) 757 if (fec_enet_mii_probe(dev) != 0)
1284 printk(", loopback enabled"); 758 goto err_out_unregister_bus;
1285 759
1286 printk(".\n"); 760 return 0;
1287 761
1288 fep->sequence_done = 1; 762err_out_unregister_bus:
763 mdiobus_unregister(fep->mii_bus);
764err_out_free_mdio_irq:
765 kfree(fep->mii_bus->irq);
766err_out_free_mdiobus:
767 mdiobus_free(fep->mii_bus);
768err_out:
769 return err;
1289} 770}
1290 771
1291static void mii_relink(struct work_struct *work) 772static void fec_enet_mii_remove(struct fec_enet_private *fep)
1292{ 773{
1293 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 774 if (fep->phy_dev)
1294 struct net_device *dev = fep->netdev; 775 phy_disconnect(fep->phy_dev);
1295 int duplex; 776 mdiobus_unregister(fep->mii_bus);
1296 777 kfree(fep->mii_bus->irq);
1297 /* 778 mdiobus_free(fep->mii_bus);
1298 ** When we get here, phy_task is already removed from
1299 ** the workqueue. It is thus safe to allow to reuse it.
1300 */
1301 fep->mii_phy_task_queued = 0;
1302 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1303 mii_display_status(dev);
1304 fep->old_link = fep->link;
1305
1306 if (fep->link) {
1307 duplex = 0;
1308 if (fep->phy_status
1309 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1310 duplex = 1;
1311 fec_restart(dev, duplex);
1312 } else
1313 fec_stop(dev);
1314} 779}
1315 780
1316/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 781static int fec_enet_get_settings(struct net_device *dev,
1317static void mii_queue_relink(uint mii_reg, struct net_device *dev) 782 struct ethtool_cmd *cmd)
1318{ 783{
1319 struct fec_enet_private *fep = netdev_priv(dev); 784 struct fec_enet_private *fep = netdev_priv(dev);
785 struct phy_device *phydev = fep->phy_dev;
1320 786
1321 /* 787 if (!phydev)
1322 * We cannot queue phy_task twice in the workqueue. It 788 return -ENODEV;
1323 * would cause an endless loop in the workqueue.
1324 * Fortunately, if the last mii_relink entry has not yet been
1325 * executed now, it will do the job for the current interrupt,
1326 * which is just what we want.
1327 */
1328 if (fep->mii_phy_task_queued)
1329 return;
1330 789
1331 fep->mii_phy_task_queued = 1; 790 return phy_ethtool_gset(phydev, cmd);
1332 INIT_WORK(&fep->phy_task, mii_relink);
1333 schedule_work(&fep->phy_task);
1334} 791}
1335 792
1336/* mii_queue_config is called in interrupt context from fec_enet_mii */ 793static int fec_enet_set_settings(struct net_device *dev,
1337static void mii_queue_config(uint mii_reg, struct net_device *dev) 794 struct ethtool_cmd *cmd)
1338{ 795{
1339 struct fec_enet_private *fep = netdev_priv(dev); 796 struct fec_enet_private *fep = netdev_priv(dev);
797 struct phy_device *phydev = fep->phy_dev;
1340 798
1341 if (fep->mii_phy_task_queued) 799 if (!phydev)
1342 return; 800 return -ENODEV;
1343 801
1344 fep->mii_phy_task_queued = 1; 802 return phy_ethtool_sset(phydev, cmd);
1345 INIT_WORK(&fep->phy_task, mii_display_config);
1346 schedule_work(&fep->phy_task);
1347} 803}
1348 804
1349phy_cmd_t const phy_cmd_relink[] = { 805static void fec_enet_get_drvinfo(struct net_device *dev,
1350 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 806 struct ethtool_drvinfo *info)
1351 { mk_mii_end, }
1352 };
1353phy_cmd_t const phy_cmd_config[] = {
1354 { mk_mii_read(MII_REG_CR), mii_queue_config },
1355 { mk_mii_end, }
1356 };
1357
1358/* Read remainder of PHY ID. */
1359static void
1360mii_discover_phy3(uint mii_reg, struct net_device *dev)
1361{ 807{
1362 struct fec_enet_private *fep; 808 struct fec_enet_private *fep = netdev_priv(dev);
1363 int i;
1364
1365 fep = netdev_priv(dev);
1366 fep->phy_id |= (mii_reg & 0xffff);
1367 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1368
1369 for(i = 0; phy_info[i]; i++) {
1370 if(phy_info[i]->id == (fep->phy_id >> 4))
1371 break;
1372 }
1373
1374 if (phy_info[i])
1375 printk(" -- %s\n", phy_info[i]->name);
1376 else
1377 printk(" -- unknown PHY!\n");
1378 809
1379 fep->phy = phy_info[i]; 810 strcpy(info->driver, fep->pdev->dev.driver->name);
1380 fep->phy_id_done = 1; 811 strcpy(info->version, "Revision: 1.0");
812 strcpy(info->bus_info, dev_name(&dev->dev));
1381} 813}
1382 814
1383/* Scan all of the MII PHY addresses looking for someone to respond 815static struct ethtool_ops fec_enet_ethtool_ops = {
1384 * with a valid ID. This usually happens quickly. 816 .get_settings = fec_enet_get_settings,
1385 */ 817 .set_settings = fec_enet_set_settings,
1386static void 818 .get_drvinfo = fec_enet_get_drvinfo,
1387mii_discover_phy(uint mii_reg, struct net_device *dev) 819 .get_link = ethtool_op_get_link,
1388{ 820};
1389 struct fec_enet_private *fep;
1390 uint phytype;
1391
1392 fep = netdev_priv(dev);
1393
1394 if (fep->phy_addr < 32) {
1395 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1396
1397 /* Got first part of ID, now get remainder */
1398 fep->phy_id = phytype << 16;
1399 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
1400 mii_discover_phy3);
1401 } else {
1402 fep->phy_addr++;
1403 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
1404 mii_discover_phy);
1405 }
1406 } else {
1407 printk("FEC: No PHY device found.\n");
1408 /* Disable external MII interface */
1409 writel(0, fep->hwp + FEC_MII_SPEED);
1410 fep->phy_speed = 0;
1411#ifdef HAVE_mii_link_interrupt
1412 fec_disable_phy_intr(dev);
1413#endif
1414 }
1415}
1416 821
1417/* This interrupt occurs when the PHY detects a link change */ 822static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1418#ifdef HAVE_mii_link_interrupt
1419static irqreturn_t
1420mii_link_interrupt(int irq, void * dev_id)
1421{ 823{
1422 struct net_device *dev = dev_id;
1423 struct fec_enet_private *fep = netdev_priv(dev); 824 struct fec_enet_private *fep = netdev_priv(dev);
825 struct phy_device *phydev = fep->phy_dev;
826
827 if (!netif_running(dev))
828 return -EINVAL;
1424 829
1425 mii_do_cmd(dev, fep->phy->ack_int); 830 if (!phydev)
1426 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 831 return -ENODEV;
1427 832
1428 return IRQ_HANDLED; 833 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1429} 834}
1430#endif
1431 835
1432static void fec_enet_free_buffers(struct net_device *dev) 836static void fec_enet_free_buffers(struct net_device *dev)
1433{ 837{
@@ -1509,35 +913,8 @@ fec_enet_open(struct net_device *dev)
1509 if (ret) 913 if (ret)
1510 return ret; 914 return ret;
1511 915
1512 fep->sequence_done = 0; 916 /* schedule a link state check */
1513 fep->link = 0; 917 phy_start(fep->phy_dev);
1514
1515 fec_restart(dev, 1);
1516
1517 if (fep->phy) {
1518 mii_do_cmd(dev, fep->phy->ack_int);
1519 mii_do_cmd(dev, fep->phy->config);
1520 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1521
1522 /* Poll until the PHY tells us its configuration
1523 * (not link state).
1524 * Request is initiated by mii_do_cmd above, but answer
1525 * comes by interrupt.
1526 * This should take about 25 usec per register at 2.5 MHz,
1527 * and we read approximately 5 registers.
1528 */
1529 while(!fep->sequence_done)
1530 schedule();
1531
1532 mii_do_cmd(dev, fep->phy->startup);
1533 }
1534
1535 /* Set the initial link state to true. A lot of hardware
1536 * based on this device does not implement a PHY interrupt,
1537 * so we are never notified of link change.
1538 */
1539 fep->link = 1;
1540
1541 netif_start_queue(dev); 918 netif_start_queue(dev);
1542 fep->opened = 1; 919 fep->opened = 1;
1543 return 0; 920 return 0;
@@ -1550,6 +927,7 @@ fec_enet_close(struct net_device *dev)
1550 927
1551 /* Don't know what to do yet. */ 928 /* Don't know what to do yet. */
1552 fep->opened = 0; 929 fep->opened = 0;
930 phy_stop(fep->phy_dev);
1553 netif_stop_queue(dev); 931 netif_stop_queue(dev);
1554 fec_stop(dev); 932 fec_stop(dev);
1555 933
@@ -1574,7 +952,7 @@ fec_enet_close(struct net_device *dev)
1574static void set_multicast_list(struct net_device *dev) 952static void set_multicast_list(struct net_device *dev)
1575{ 953{
1576 struct fec_enet_private *fep = netdev_priv(dev); 954 struct fec_enet_private *fep = netdev_priv(dev);
1577 struct dev_mc_list *dmi; 955 struct netdev_hw_addr *ha;
1578 unsigned int i, bit, data, crc, tmp; 956 unsigned int i, bit, data, crc, tmp;
1579 unsigned char hash; 957 unsigned char hash;
1580 958
@@ -1604,16 +982,16 @@ static void set_multicast_list(struct net_device *dev)
1604 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 982 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1605 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 983 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1606 984
1607 netdev_for_each_mc_addr(dmi, dev) { 985 netdev_for_each_mc_addr(ha, dev) {
1608 /* Only support group multicast for now */ 986 /* Only support group multicast for now */
1609 if (!(dmi->dmi_addr[0] & 1)) 987 if (!(ha->addr[0] & 1))
1610 continue; 988 continue;
1611 989
1612 /* calculate crc32 value of mac address */ 990 /* calculate crc32 value of mac address */
1613 crc = 0xffffffff; 991 crc = 0xffffffff;
1614 992
1615 for (i = 0; i < dmi->dmi_addrlen; i++) { 993 for (i = 0; i < dev->addr_len; i++) {
1616 data = dmi->dmi_addr[i]; 994 data = ha->addr[i];
1617 for (bit = 0; bit < 8; bit++, data >>= 1) { 995 for (bit = 0; bit < 8; bit++, data >>= 1) {
1618 crc = (crc >> 1) ^ 996 crc = (crc >> 1) ^
1619 (((crc ^ data) & 1) ? CRC32_POLY : 0); 997 (((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1653,7 +1031,7 @@ fec_set_mac_address(struct net_device *dev, void *p)
1653 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24), 1031 (dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
1654 fep->hwp + FEC_ADDR_LOW); 1032 fep->hwp + FEC_ADDR_LOW);
1655 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24), 1033 writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
1656 fep + FEC_ADDR_HIGH); 1034 fep->hwp + FEC_ADDR_HIGH);
1657 return 0; 1035 return 0;
1658} 1036}
1659 1037
@@ -1666,6 +1044,7 @@ static const struct net_device_ops fec_netdev_ops = {
1666 .ndo_validate_addr = eth_validate_addr, 1044 .ndo_validate_addr = eth_validate_addr,
1667 .ndo_tx_timeout = fec_timeout, 1045 .ndo_tx_timeout = fec_timeout,
1668 .ndo_set_mac_address = fec_set_mac_address, 1046 .ndo_set_mac_address = fec_set_mac_address,
1047 .ndo_do_ioctl = fec_enet_ioctl,
1669}; 1048};
1670 1049
1671 /* 1050 /*
@@ -1689,7 +1068,6 @@ static int fec_enet_init(struct net_device *dev, int index)
1689 } 1068 }
1690 1069
1691 spin_lock_init(&fep->hw_lock); 1070 spin_lock_init(&fep->hw_lock);
1692 spin_lock_init(&fep->mii_lock);
1693 1071
1694 fep->index = index; 1072 fep->index = index;
1695 fep->hwp = (void __iomem *)dev->base_addr; 1073 fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1094,10 @@ static int fec_enet_init(struct net_device *dev, int index)
1716 fep->rx_bd_base = cbd_base; 1094 fep->rx_bd_base = cbd_base;
1717 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1095 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1718 1096
1719#ifdef HAVE_mii_link_interrupt
1720 fec_request_mii_intr(dev);
1721#endif
1722 /* The FEC Ethernet specific entries in the device structure */ 1097 /* The FEC Ethernet specific entries in the device structure */
1723 dev->watchdog_timeo = TX_TIMEOUT; 1098 dev->watchdog_timeo = TX_TIMEOUT;
1724 dev->netdev_ops = &fec_netdev_ops; 1099 dev->netdev_ops = &fec_netdev_ops;
1725 1100 dev->ethtool_ops = &fec_enet_ethtool_ops;
1726 for (i=0; i<NMII-1; i++)
1727 mii_cmds[i].mii_next = &mii_cmds[i+1];
1728 mii_free = mii_cmds;
1729
1730 /* Set MII speed to 2.5 MHz */
1731 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1732 / 2500000) / 2) & 0x3F) << 1;
1733 1101
1734 /* Initialize the receive buffer descriptors. */ 1102 /* Initialize the receive buffer descriptors. */
1735 bdp = fep->rx_bd_base; 1103 bdp = fep->rx_bd_base;
@@ -1760,13 +1128,6 @@ static int fec_enet_init(struct net_device *dev, int index)
1760 1128
1761 fec_restart(dev, 0); 1129 fec_restart(dev, 0);
1762 1130
1763 /* Queue up command to detect the PHY and initialize the
1764 * remainder of the interface.
1765 */
1766 fep->phy_id_done = 0;
1767 fep->phy_addr = 0;
1768 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1769
1770 return 0; 1131 return 0;
1771} 1132}
1772 1133
@@ -1835,8 +1196,7 @@ fec_restart(struct net_device *dev, int duplex)
1835 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1196 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1836 1197
1837 /* Enable interrupts we wish to service */ 1198 /* Enable interrupts we wish to service */
1838 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, 1199 writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
1839 fep->hwp + FEC_IMASK);
1840} 1200}
1841 1201
1842static void 1202static void
@@ -1859,7 +1219,6 @@ fec_stop(struct net_device *dev)
1859 /* Clear outstanding MII command interrupts. */ 1219 /* Clear outstanding MII command interrupts. */
1860 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 1220 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1861 1221
1862 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1863 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1222 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1864} 1223}
1865 1224
@@ -1891,6 +1250,7 @@ fec_probe(struct platform_device *pdev)
1891 memset(fep, 0, sizeof(*fep)); 1250 memset(fep, 0, sizeof(*fep));
1892 1251
1893 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1252 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1253 fep->pdev = pdev;
1894 1254
1895 if (!ndev->base_addr) { 1255 if (!ndev->base_addr) {
1896 ret = -ENOMEM; 1256 ret = -ENOMEM;
@@ -1926,13 +1286,24 @@ fec_probe(struct platform_device *pdev)
1926 if (ret) 1286 if (ret)
1927 goto failed_init; 1287 goto failed_init;
1928 1288
1289 ret = fec_enet_mii_init(pdev);
1290 if (ret)
1291 goto failed_mii_init;
1292
1929 ret = register_netdev(ndev); 1293 ret = register_netdev(ndev);
1930 if (ret) 1294 if (ret)
1931 goto failed_register; 1295 goto failed_register;
1932 1296
1297 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
1298 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
1299 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1300 fep->phy_dev->irq);
1301
1933 return 0; 1302 return 0;
1934 1303
1935failed_register: 1304failed_register:
1305 fec_enet_mii_remove(fep);
1306failed_mii_init:
1936failed_init: 1307failed_init:
1937 clk_disable(fep->clk); 1308 clk_disable(fep->clk);
1938 clk_put(fep->clk); 1309 clk_put(fep->clk);
@@ -1959,6 +1330,7 @@ fec_drv_remove(struct platform_device *pdev)
1959 platform_set_drvdata(pdev, NULL); 1330 platform_set_drvdata(pdev, NULL);
1960 1331
1961 fec_stop(ndev); 1332 fec_stop(ndev);
1333 fec_enet_mii_remove(fep);
1962 clk_disable(fep->clk); 1334 clk_disable(fep->clk);
1963 clk_put(fep->clk); 1335 clk_put(fep->clk);
1964 iounmap((void __iomem *)ndev->base_addr); 1336 iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index be540b67ea57..25e6cc6840b1 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -327,7 +327,6 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
327 } 327 }
328 328
329 spin_lock_irqsave(&priv->lock, flags); 329 spin_lock_irqsave(&priv->lock, flags);
330 dev->trans_start = jiffies;
331 330
332 bd = (struct bcom_fec_bd *) 331 bd = (struct bcom_fec_bd *)
333 bcom_prepare_next_buffer(priv->tx_dmatsk); 332 bcom_prepare_next_buffer(priv->tx_dmatsk);
@@ -436,7 +435,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
436 DMA_FROM_DEVICE); 435 DMA_FROM_DEVICE);
437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 436 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */ 437 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->dev = dev;
440 rskb->protocol = eth_type_trans(rskb, dev); 438 rskb->protocol = eth_type_trans(rskb, dev);
441 netif_rx(rskb); 439 netif_rx(rskb);
442 440
@@ -576,12 +574,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
576 out_be32(&fec->gaddr2, 0xffffffff); 574 out_be32(&fec->gaddr2, 0xffffffff);
577 } else { 575 } else {
578 u32 crc; 576 u32 crc;
579 struct dev_mc_list *dmi; 577 struct netdev_hw_addr *ha;
580 u32 gaddr1 = 0x00000000; 578 u32 gaddr1 = 0x00000000;
581 u32 gaddr2 = 0x00000000; 579 u32 gaddr2 = 0x00000000;
582 580
583 netdev_for_each_mc_addr(dmi, dev) { 581 netdev_for_each_mc_addr(ha, dev) {
584 crc = ether_crc_le(6, dmi->dmi_addr) >> 26; 582 crc = ether_crc_le(6, ha->addr) >> 26;
585 if (crc >= 32) 583 if (crc >= 32)
586 gaddr1 |= 1 << (crc-32); 584 gaddr1 |= 1 << (crc-32);
587 else 585 else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c22425..268ea4d566d7 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -1104,20 +1104,16 @@ static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
1104 1104
1105static void nv_napi_enable(struct net_device *dev) 1105static void nv_napi_enable(struct net_device *dev)
1106{ 1106{
1107#ifdef CONFIG_FORCEDETH_NAPI
1108 struct fe_priv *np = get_nvpriv(dev); 1107 struct fe_priv *np = get_nvpriv(dev);
1109 1108
1110 napi_enable(&np->napi); 1109 napi_enable(&np->napi);
1111#endif
1112} 1110}
1113 1111
1114static void nv_napi_disable(struct net_device *dev) 1112static void nv_napi_disable(struct net_device *dev)
1115{ 1113{
1116#ifdef CONFIG_FORCEDETH_NAPI
1117 struct fe_priv *np = get_nvpriv(dev); 1114 struct fe_priv *np = get_nvpriv(dev);
1118 1115
1119 napi_disable(&np->napi); 1116 napi_disable(&np->napi);
1120#endif
1121} 1117}
1122 1118
1123#define MII_READ (-1) 1119#define MII_READ (-1)
@@ -1810,7 +1806,6 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1810} 1806}
1811 1807
1812/* If rx bufs are exhausted called after 50ms to attempt to refresh */ 1808/* If rx bufs are exhausted called after 50ms to attempt to refresh */
1813#ifdef CONFIG_FORCEDETH_NAPI
1814static void nv_do_rx_refill(unsigned long data) 1809static void nv_do_rx_refill(unsigned long data)
1815{ 1810{
1816 struct net_device *dev = (struct net_device *) data; 1811 struct net_device *dev = (struct net_device *) data;
@@ -1819,41 +1814,6 @@ static void nv_do_rx_refill(unsigned long data)
1819 /* Just reschedule NAPI rx processing */ 1814 /* Just reschedule NAPI rx processing */
1820 napi_schedule(&np->napi); 1815 napi_schedule(&np->napi);
1821} 1816}
1822#else
1823static void nv_do_rx_refill(unsigned long data)
1824{
1825 struct net_device *dev = (struct net_device *) data;
1826 struct fe_priv *np = netdev_priv(dev);
1827 int retcode;
1828
1829 if (!using_multi_irqs(dev)) {
1830 if (np->msi_flags & NV_MSI_X_ENABLED)
1831 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1832 else
1833 disable_irq(np->pci_dev->irq);
1834 } else {
1835 disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1836 }
1837 if (!nv_optimized(np))
1838 retcode = nv_alloc_rx(dev);
1839 else
1840 retcode = nv_alloc_rx_optimized(dev);
1841 if (retcode) {
1842 spin_lock_irq(&np->lock);
1843 if (!np->in_shutdown)
1844 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
1845 spin_unlock_irq(&np->lock);
1846 }
1847 if (!using_multi_irqs(dev)) {
1848 if (np->msi_flags & NV_MSI_X_ENABLED)
1849 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector);
1850 else
1851 enable_irq(np->pci_dev->irq);
1852 } else {
1853 enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
1854 }
1855}
1856#endif
1857 1817
1858static void nv_init_rx(struct net_device *dev) 1818static void nv_init_rx(struct net_device *dev)
1859{ 1819{
@@ -2148,7 +2108,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2148 unsigned int i; 2108 unsigned int i;
2149 u32 offset = 0; 2109 u32 offset = 0;
2150 u32 bcnt; 2110 u32 bcnt;
2151 u32 size = skb->len-skb->data_len; 2111 u32 size = skb_headlen(skb);
2152 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2112 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2153 u32 empty_slots; 2113 u32 empty_slots;
2154 struct ring_desc* put_tx; 2114 struct ring_desc* put_tx;
@@ -2254,7 +2214,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2254 dprintk("\n"); 2214 dprintk("\n");
2255 } 2215 }
2256 2216
2257 dev->trans_start = jiffies;
2258 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2217 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2259 return NETDEV_TX_OK; 2218 return NETDEV_TX_OK;
2260} 2219}
@@ -2269,7 +2228,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2269 unsigned int i; 2228 unsigned int i;
2270 u32 offset = 0; 2229 u32 offset = 0;
2271 u32 bcnt; 2230 u32 bcnt;
2272 u32 size = skb->len-skb->data_len; 2231 u32 size = skb_headlen(skb);
2273 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2232 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2274 u32 empty_slots; 2233 u32 empty_slots;
2275 struct ring_desc_ex* put_tx; 2234 struct ring_desc_ex* put_tx;
@@ -2409,7 +2368,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2409 dprintk("\n"); 2368 dprintk("\n");
2410 } 2369 }
2411 2370
2412 dev->trans_start = jiffies;
2413 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2371 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2414 return NETDEV_TX_OK; 2372 return NETDEV_TX_OK;
2415} 2373}
@@ -2816,11 +2774,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2816 skb->protocol = eth_type_trans(skb, dev); 2774 skb->protocol = eth_type_trans(skb, dev);
2817 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n", 2775 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2818 dev->name, len, skb->protocol); 2776 dev->name, len, skb->protocol);
2819#ifdef CONFIG_FORCEDETH_NAPI 2777 napi_gro_receive(&np->napi, skb);
2820 netif_receive_skb(skb);
2821#else
2822 netif_rx(skb);
2823#endif
2824 dev->stats.rx_packets++; 2778 dev->stats.rx_packets++;
2825 dev->stats.rx_bytes += len; 2779 dev->stats.rx_bytes += len;
2826next_pkt: 2780next_pkt:
@@ -2909,27 +2863,14 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2909 dev->name, len, skb->protocol); 2863 dev->name, len, skb->protocol);
2910 2864
2911 if (likely(!np->vlangrp)) { 2865 if (likely(!np->vlangrp)) {
2912#ifdef CONFIG_FORCEDETH_NAPI 2866 napi_gro_receive(&np->napi, skb);
2913 netif_receive_skb(skb);
2914#else
2915 netif_rx(skb);
2916#endif
2917 } else { 2867 } else {
2918 vlanflags = le32_to_cpu(np->get_rx.ex->buflow); 2868 vlanflags = le32_to_cpu(np->get_rx.ex->buflow);
2919 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) { 2869 if (vlanflags & NV_RX3_VLAN_TAG_PRESENT) {
2920#ifdef CONFIG_FORCEDETH_NAPI 2870 vlan_gro_receive(&np->napi, np->vlangrp,
2921 vlan_hwaccel_receive_skb(skb, np->vlangrp, 2871 vlanflags & NV_RX3_VLAN_TAG_MASK, skb);
2922 vlanflags & NV_RX3_VLAN_TAG_MASK);
2923#else
2924 vlan_hwaccel_rx(skb, np->vlangrp,
2925 vlanflags & NV_RX3_VLAN_TAG_MASK);
2926#endif
2927 } else { 2872 } else {
2928#ifdef CONFIG_FORCEDETH_NAPI 2873 napi_gro_receive(&np->napi, skb);
2929 netif_receive_skb(skb);
2930#else
2931 netif_rx(skb);
2932#endif
2933 } 2874 }
2934 } 2875 }
2935 2876
@@ -3104,12 +3045,14 @@ static void nv_set_multicast(struct net_device *dev)
3104 if (dev->flags & IFF_ALLMULTI) { 3045 if (dev->flags & IFF_ALLMULTI) {
3105 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3046 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3106 } else { 3047 } else {
3107 struct dev_mc_list *walk; 3048 struct netdev_hw_addr *ha;
3108 3049
3109 netdev_for_each_mc_addr(walk, dev) { 3050 netdev_for_each_mc_addr(ha, dev) {
3051 unsigned char *addr = ha->addr;
3110 u32 a, b; 3052 u32 a, b;
3111 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3053
3112 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3054 a = le32_to_cpu(*(__le32 *) addr);
3055 b = le16_to_cpu(*(__le16 *) (&addr[4]));
3113 alwaysOn[0] &= a; 3056 alwaysOn[0] &= a;
3114 alwaysOff[0] &= ~a; 3057 alwaysOff[0] &= ~a;
3115 alwaysOn[1] &= b; 3058 alwaysOn[1] &= b;
@@ -3494,10 +3437,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3494 struct net_device *dev = (struct net_device *) data; 3437 struct net_device *dev = (struct net_device *) data;
3495 struct fe_priv *np = netdev_priv(dev); 3438 struct fe_priv *np = netdev_priv(dev);
3496 u8 __iomem *base = get_hwbase(dev); 3439 u8 __iomem *base = get_hwbase(dev);
3497#ifndef CONFIG_FORCEDETH_NAPI
3498 int total_work = 0;
3499 int loop_count = 0;
3500#endif
3501 3440
3502 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name); 3441 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3503 3442
@@ -3514,7 +3453,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3514 3453
3515 nv_msi_workaround(np); 3454 nv_msi_workaround(np);
3516 3455
3517#ifdef CONFIG_FORCEDETH_NAPI
3518 if (napi_schedule_prep(&np->napi)) { 3456 if (napi_schedule_prep(&np->napi)) {
3519 /* 3457 /*
3520 * Disable further irq's (msix not enabled with napi) 3458 * Disable further irq's (msix not enabled with napi)
@@ -3523,65 +3461,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3523 __napi_schedule(&np->napi); 3461 __napi_schedule(&np->napi);
3524 } 3462 }
3525 3463
3526#else
3527 do
3528 {
3529 int work = 0;
3530 if ((work = nv_rx_process(dev, RX_WORK_PER_LOOP))) {
3531 if (unlikely(nv_alloc_rx(dev))) {
3532 spin_lock(&np->lock);
3533 if (!np->in_shutdown)
3534 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3535 spin_unlock(&np->lock);
3536 }
3537 }
3538
3539 spin_lock(&np->lock);
3540 work += nv_tx_done(dev, TX_WORK_PER_LOOP);
3541 spin_unlock(&np->lock);
3542
3543 if (!work)
3544 break;
3545
3546 total_work += work;
3547
3548 loop_count++;
3549 }
3550 while (loop_count < max_interrupt_work);
3551
3552 if (nv_change_interrupt_mode(dev, total_work)) {
3553 /* setup new irq mask */
3554 writel(np->irqmask, base + NvRegIrqMask);
3555 }
3556
3557 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3558 spin_lock(&np->lock);
3559 nv_link_irq(dev);
3560 spin_unlock(&np->lock);
3561 }
3562 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3563 spin_lock(&np->lock);
3564 nv_linkchange(dev);
3565 spin_unlock(&np->lock);
3566 np->link_timeout = jiffies + LINK_TIMEOUT;
3567 }
3568 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3569 spin_lock(&np->lock);
3570 /* disable interrupts on the nic */
3571 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3572 writel(0, base + NvRegIrqMask);
3573 else
3574 writel(np->irqmask, base + NvRegIrqMask);
3575 pci_push(base);
3576
3577 if (!np->in_shutdown) {
3578 np->nic_poll_irq = np->irqmask;
3579 np->recover_error = 1;
3580 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3581 }
3582 spin_unlock(&np->lock);
3583 }
3584#endif
3585 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name); 3464 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3586 3465
3587 return IRQ_HANDLED; 3466 return IRQ_HANDLED;
@@ -3597,10 +3476,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3597 struct net_device *dev = (struct net_device *) data; 3476 struct net_device *dev = (struct net_device *) data;
3598 struct fe_priv *np = netdev_priv(dev); 3477 struct fe_priv *np = netdev_priv(dev);
3599 u8 __iomem *base = get_hwbase(dev); 3478 u8 __iomem *base = get_hwbase(dev);
3600#ifndef CONFIG_FORCEDETH_NAPI
3601 int total_work = 0;
3602 int loop_count = 0;
3603#endif
3604 3479
3605 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name); 3480 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3606 3481
@@ -3617,7 +3492,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3617 3492
3618 nv_msi_workaround(np); 3493 nv_msi_workaround(np);
3619 3494
3620#ifdef CONFIG_FORCEDETH_NAPI
3621 if (napi_schedule_prep(&np->napi)) { 3495 if (napi_schedule_prep(&np->napi)) {
3622 /* 3496 /*
3623 * Disable further irq's (msix not enabled with napi) 3497 * Disable further irq's (msix not enabled with napi)
@@ -3625,66 +3499,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3625 writel(0, base + NvRegIrqMask); 3499 writel(0, base + NvRegIrqMask);
3626 __napi_schedule(&np->napi); 3500 __napi_schedule(&np->napi);
3627 } 3501 }
3628#else
3629 do
3630 {
3631 int work = 0;
3632 if ((work = nv_rx_process_optimized(dev, RX_WORK_PER_LOOP))) {
3633 if (unlikely(nv_alloc_rx_optimized(dev))) {
3634 spin_lock(&np->lock);
3635 if (!np->in_shutdown)
3636 mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
3637 spin_unlock(&np->lock);
3638 }
3639 }
3640
3641 spin_lock(&np->lock);
3642 work += nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
3643 spin_unlock(&np->lock);
3644
3645 if (!work)
3646 break;
3647
3648 total_work += work;
3649
3650 loop_count++;
3651 }
3652 while (loop_count < max_interrupt_work);
3653
3654 if (nv_change_interrupt_mode(dev, total_work)) {
3655 /* setup new irq mask */
3656 writel(np->irqmask, base + NvRegIrqMask);
3657 }
3658
3659 if (unlikely(np->events & NVREG_IRQ_LINK)) {
3660 spin_lock(&np->lock);
3661 nv_link_irq(dev);
3662 spin_unlock(&np->lock);
3663 }
3664 if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
3665 spin_lock(&np->lock);
3666 nv_linkchange(dev);
3667 spin_unlock(&np->lock);
3668 np->link_timeout = jiffies + LINK_TIMEOUT;
3669 }
3670 if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
3671 spin_lock(&np->lock);
3672 /* disable interrupts on the nic */
3673 if (!(np->msi_flags & NV_MSI_X_ENABLED))
3674 writel(0, base + NvRegIrqMask);
3675 else
3676 writel(np->irqmask, base + NvRegIrqMask);
3677 pci_push(base);
3678
3679 if (!np->in_shutdown) {
3680 np->nic_poll_irq = np->irqmask;
3681 np->recover_error = 1;
3682 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3683 }
3684 spin_unlock(&np->lock);
3685 }
3686
3687#endif
3688 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name); 3502 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3689 3503
3690 return IRQ_HANDLED; 3504 return IRQ_HANDLED;
@@ -3733,7 +3547,6 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3733 return IRQ_RETVAL(i); 3547 return IRQ_RETVAL(i);
3734} 3548}
3735 3549
3736#ifdef CONFIG_FORCEDETH_NAPI
3737static int nv_napi_poll(struct napi_struct *napi, int budget) 3550static int nv_napi_poll(struct napi_struct *napi, int budget)
3738{ 3551{
3739 struct fe_priv *np = container_of(napi, struct fe_priv, napi); 3552 struct fe_priv *np = container_of(napi, struct fe_priv, napi);
@@ -3741,23 +3554,27 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3741 u8 __iomem *base = get_hwbase(dev); 3554 u8 __iomem *base = get_hwbase(dev);
3742 unsigned long flags; 3555 unsigned long flags;
3743 int retcode; 3556 int retcode;
3744 int tx_work, rx_work; 3557 int rx_count, tx_work=0, rx_work=0;
3745 3558
3746 if (!nv_optimized(np)) { 3559 do {
3747 spin_lock_irqsave(&np->lock, flags); 3560 if (!nv_optimized(np)) {
3748 tx_work = nv_tx_done(dev, np->tx_ring_size); 3561 spin_lock_irqsave(&np->lock, flags);
3749 spin_unlock_irqrestore(&np->lock, flags); 3562 tx_work += nv_tx_done(dev, np->tx_ring_size);
3563 spin_unlock_irqrestore(&np->lock, flags);
3750 3564
3751 rx_work = nv_rx_process(dev, budget); 3565 rx_count = nv_rx_process(dev, budget - rx_work);
3752 retcode = nv_alloc_rx(dev); 3566 retcode = nv_alloc_rx(dev);
3753 } else { 3567 } else {
3754 spin_lock_irqsave(&np->lock, flags); 3568 spin_lock_irqsave(&np->lock, flags);
3755 tx_work = nv_tx_done_optimized(dev, np->tx_ring_size); 3569 tx_work += nv_tx_done_optimized(dev, np->tx_ring_size);
3756 spin_unlock_irqrestore(&np->lock, flags); 3570 spin_unlock_irqrestore(&np->lock, flags);
3757 3571
3758 rx_work = nv_rx_process_optimized(dev, budget); 3572 rx_count = nv_rx_process_optimized(dev,
3759 retcode = nv_alloc_rx_optimized(dev); 3573 budget - rx_work);
3760 } 3574 retcode = nv_alloc_rx_optimized(dev);
3575 }
3576 } while (retcode == 0 &&
3577 rx_count > 0 && (rx_work += rx_count) < budget);
3761 3578
3762 if (retcode) { 3579 if (retcode) {
3763 spin_lock_irqsave(&np->lock, flags); 3580 spin_lock_irqsave(&np->lock, flags);
@@ -3800,7 +3617,6 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3800 } 3617 }
3801 return rx_work; 3618 return rx_work;
3802} 3619}
3803#endif
3804 3620
3805static irqreturn_t nv_nic_irq_rx(int foo, void *data) 3621static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3806{ 3622{
@@ -5706,6 +5522,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5706 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 5522 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
5707 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 5523 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
5708 dev->features |= NETIF_F_TSO; 5524 dev->features |= NETIF_F_TSO;
5525 dev->features |= NETIF_F_GRO;
5709 } 5526 }
5710 5527
5711 np->vlanctl_bits = 0; 5528 np->vlanctl_bits = 0;
@@ -5758,9 +5575,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5758 else 5575 else
5759 dev->netdev_ops = &nv_netdev_ops_optimized; 5576 dev->netdev_ops = &nv_netdev_ops_optimized;
5760 5577
5761#ifdef CONFIG_FORCEDETH_NAPI
5762 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP); 5578 netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
5763#endif
5764 SET_ETHTOOL_OPS(dev, &ops); 5579 SET_ETHTOOL_OPS(dev, &ops);
5765 dev->watchdog_timeo = NV_WATCHDOG_TIMEO; 5580 dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
5766 5581
@@ -5863,7 +5678,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5863 /* msix has had reported issues when modifying irqmask 5678 /* msix has had reported issues when modifying irqmask
5864 as in the case of napi, therefore, disable for now 5679 as in the case of napi, therefore, disable for now
5865 */ 5680 */
5866#ifndef CONFIG_FORCEDETH_NAPI 5681#if 0
5867 np->msi_flags |= NV_MSI_X_CAPABLE; 5682 np->msi_flags |= NV_MSI_X_CAPABLE;
5868#endif 5683#endif
5869 } 5684 }
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cae2d16858d1..309a0eaddd81 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -674,8 +674,6 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
674 skb->data, skb->len, DMA_TO_DEVICE)); 674 skb->data, skb->len, DMA_TO_DEVICE));
675 CBDW_DATLEN(bdp, skb->len); 675 CBDW_DATLEN(bdp, skb->len);
676 676
677 dev->trans_start = jiffies;
678
679 /* 677 /*
680 * If this was the last BD in the ring, start at the beginning again. 678 * If this was the last BD in the ring, start at the beginning again.
681 */ 679 */
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 9d4f272137d6..5d45084b287d 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
231 231
232static void set_multicast_list(struct net_device *dev) 232static void set_multicast_list(struct net_device *dev)
233{ 233{
234 struct dev_mc_list *pmc; 234 struct netdev_hw_addr *ha;
235 235
236 if ((dev->flags & IFF_PROMISC) == 0) { 236 if ((dev->flags & IFF_PROMISC) == 0) {
237 set_multicast_start(dev); 237 set_multicast_start(dev);
238 netdev_for_each_mc_addr(pmc, dev) 238 netdev_for_each_mc_addr(ha, dev)
239 set_multicast_one(dev, pmc->dmi_addr); 239 set_multicast_one(dev, ha->addr);
240 set_multicast_finish(dev); 240 set_multicast_finish(dev);
241 } else 241 } else
242 set_promiscuous_mode(dev); 242 set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index bd7a6e7064bb..7ca1642276d0 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
232 232
233static void set_multicast_list(struct net_device *dev) 233static void set_multicast_list(struct net_device *dev)
234{ 234{
235 struct dev_mc_list *pmc; 235 struct netdev_hw_addr *ha;
236 236
237 if ((dev->flags & IFF_PROMISC) == 0) { 237 if ((dev->flags & IFF_PROMISC) == 0) {
238 set_multicast_start(dev); 238 set_multicast_start(dev);
239 netdev_for_each_mc_addr(pmc, dev) 239 netdev_for_each_mc_addr(ha, dev)
240 set_multicast_one(dev, pmc->dmi_addr); 240 set_multicast_one(dev, ha->addr);
241 set_multicast_finish(dev); 241 set_multicast_finish(dev);
242 } else 242 } else
243 set_promiscuous_mode(dev); 243 set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 49a4d8c60168..a3c44544846d 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
223 223
224static void set_multicast_list(struct net_device *dev) 224static void set_multicast_list(struct net_device *dev)
225{ 225{
226 struct dev_mc_list *pmc; 226 struct netdev_hw_addr *ha;
227 227
228 if ((dev->flags & IFF_PROMISC) == 0) { 228 if ((dev->flags & IFF_PROMISC) == 0) {
229 set_multicast_start(dev); 229 set_multicast_start(dev);
230 netdev_for_each_mc_addr(pmc, dev) 230 netdev_for_each_mc_addr(ha, dev)
231 set_multicast_one(dev, pmc->dmi_addr); 231 set_multicast_one(dev, ha->addr);
232 set_multicast_finish(dev); 232 set_multicast_finish(dev);
233 } else 233 } else
234 set_promiscuous_mode(dev); 234 set_promiscuous_mode(dev);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index 16508535720a..b4c41d72c423 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -205,8 +205,6 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np) 205static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
206{ 206{
207 struct gfar __iomem *enet_regs; 207 struct gfar __iomem *enet_regs;
208 u32 __iomem *ioremap_tbipa;
209 u64 addr, size;
210 208
211 /* 209 /*
212 * This is mildly evil, but so is our hardware for doing this. 210 * This is mildly evil, but so is our hardware for doing this.
@@ -220,9 +218,7 @@ static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct devi
220 return &enet_regs->tbipa; 218 return &enet_regs->tbipa;
221 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") || 219 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
222 of_device_is_compatible(np, "fsl,etsec2-tbi")) { 220 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
223 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL)); 221 return of_iomap(np, 1);
224 ioremap_tbipa = ioremap(addr, size);
225 return ioremap_tbipa;
226 } else 222 } else
227 return NULL; 223 return NULL;
228} 224}
@@ -279,16 +275,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
279 u32 __iomem *tbipa; 275 u32 __iomem *tbipa;
280 struct mii_bus *new_bus; 276 struct mii_bus *new_bus;
281 int tbiaddr = -1; 277 int tbiaddr = -1;
278 const u32 *addrp;
282 u64 addr = 0, size = 0; 279 u64 addr = 0, size = 0;
283 int err = 0; 280 int err;
284 281
285 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 282 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
286 if (!priv) 283 if (!priv)
287 return -ENOMEM; 284 return -ENOMEM;
288 285
289 new_bus = mdiobus_alloc(); 286 new_bus = mdiobus_alloc();
290 if (NULL == new_bus) 287 if (!new_bus) {
288 err = -ENOMEM;
291 goto err_free_priv; 289 goto err_free_priv;
290 }
292 291
293 new_bus->name = "Freescale PowerQUICC MII Bus", 292 new_bus->name = "Freescale PowerQUICC MII Bus",
294 new_bus->read = &fsl_pq_mdio_read, 293 new_bus->read = &fsl_pq_mdio_read,
@@ -297,8 +296,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
297 new_bus->priv = priv; 296 new_bus->priv = priv;
298 fsl_pq_mdio_bus_name(new_bus->id, np); 297 fsl_pq_mdio_bus_name(new_bus->id, np);
299 298
299 addrp = of_get_address(np, 0, &size, NULL);
300 if (!addrp) {
301 err = -EINVAL;
302 goto err_free_bus;
303 }
304
300 /* Set the PHY base address */ 305 /* Set the PHY base address */
301 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 306 addr = of_translate_address(np, addrp);
307 if (addr == OF_BAD_ADDR) {
308 err = -EINVAL;
309 goto err_free_bus;
310 }
311
302 map = ioremap(addr, size); 312 map = ioremap(addr, size);
303 if (!map) { 313 if (!map) {
304 err = -ENOMEM; 314 err = -ENOMEM;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index c3b292a31328..1830f3199cb5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
82#include <linux/tcp.h> 82#include <linux/tcp.h>
83#include <linux/udp.h> 83#include <linux/udp.h>
84#include <linux/in.h> 84#include <linux/in.h>
85#include <linux/net_tstamp.h>
85 86
86#include <asm/io.h> 87#include <asm/io.h>
87#include <asm/irq.h> 88#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
377 rctrl |= RCTRL_PADDING(priv->padding); 378 rctrl |= RCTRL_PADDING(priv->padding);
378 } 379 }
379 380
381 /* Insert receive time stamps into padding alignment bytes */
382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
383 rctrl &= ~RCTRL_PAL_MASK;
384 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
385 priv->padding = 8;
386 }
387
380 /* keep vlan related bits if it's enabled */ 388 /* keep vlan related bits if it's enabled */
381 if (priv->vlgrp) { 389 if (priv->vlgrp) {
382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
501/* Returns 1 if incoming frames use an FCB */ 509/* Returns 1 if incoming frames use an FCB */
502static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
503{ 511{
504 return priv->vlgrp || priv->rx_csum_enable; 512 return priv->vlgrp || priv->rx_csum_enable ||
513 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
505} 514}
506 515
507static void free_tx_pointers(struct gfar_private *priv) 516static void free_tx_pointers(struct gfar_private *priv)
@@ -549,12 +558,8 @@ static int gfar_parse_group(struct device_node *np,
549 struct gfar_private *priv, const char *model) 558 struct gfar_private *priv, const char *model)
550{ 559{
551 u32 *queue_mask; 560 u32 *queue_mask;
552 u64 addr, size;
553
554 addr = of_translate_address(np,
555 of_get_address(np, 0, &size, NULL));
556 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
557 561
562 priv->gfargrp[priv->num_grps].regs = of_iomap(np, 0);
558 if (!priv->gfargrp[priv->num_grps].regs) 563 if (!priv->gfargrp[priv->num_grps].regs)
559 return -ENOMEM; 564 return -ENOMEM;
560 565
@@ -742,7 +747,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
742 FSL_GIANFAR_DEV_HAS_CSUM | 747 FSL_GIANFAR_DEV_HAS_CSUM |
743 FSL_GIANFAR_DEV_HAS_VLAN | 748 FSL_GIANFAR_DEV_HAS_VLAN |
744 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 749 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
745 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 750 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
751 FSL_GIANFAR_DEV_HAS_TIMER;
746 752
747 ctype = of_get_property(np, "phy-connection-type", NULL); 753 ctype = of_get_property(np, "phy-connection-type", NULL);
748 754
@@ -772,6 +778,48 @@ err_grp_init:
772 return err; 778 return err;
773} 779}
774 780
781static int gfar_hwtstamp_ioctl(struct net_device *netdev,
782 struct ifreq *ifr, int cmd)
783{
784 struct hwtstamp_config config;
785 struct gfar_private *priv = netdev_priv(netdev);
786
787 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
788 return -EFAULT;
789
790 /* reserved for future extensions */
791 if (config.flags)
792 return -EINVAL;
793
794 switch (config.tx_type) {
795 case HWTSTAMP_TX_OFF:
796 priv->hwts_tx_en = 0;
797 break;
798 case HWTSTAMP_TX_ON:
799 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
800 return -ERANGE;
801 priv->hwts_tx_en = 1;
802 break;
803 default:
804 return -ERANGE;
805 }
806
807 switch (config.rx_filter) {
808 case HWTSTAMP_FILTER_NONE:
809 priv->hwts_rx_en = 0;
810 break;
811 default:
812 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
813 return -ERANGE;
814 priv->hwts_rx_en = 1;
815 config.rx_filter = HWTSTAMP_FILTER_ALL;
816 break;
817 }
818
819 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
820 -EFAULT : 0;
821}
822
775/* Ioctl MII Interface */ 823/* Ioctl MII Interface */
776static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 824static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
777{ 825{
@@ -780,6 +828,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
780 if (!netif_running(dev)) 828 if (!netif_running(dev))
781 return -EINVAL; 829 return -EINVAL;
782 830
831 if (cmd == SIOCSHWTSTAMP)
832 return gfar_hwtstamp_ioctl(dev, rq, cmd);
833
783 if (!priv->phydev) 834 if (!priv->phydev)
784 return -ENODEV; 835 return -ENODEV;
785 836
@@ -982,7 +1033,8 @@ static int gfar_probe(struct of_device *ofdev,
982 else 1033 else
983 priv->padding = 0; 1034 priv->padding = 0;
984 1035
985 if (dev->features & NETIF_F_IP_CSUM) 1036 if (dev->features & NETIF_F_IP_CSUM ||
1037 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
986 dev->hard_header_len += GMAC_FCB_LEN; 1038 dev->hard_header_len += GMAC_FCB_LEN;
987 1039
988 /* Program the isrg regs only if number of grps > 1 */ 1040 /* Program the isrg regs only if number of grps > 1 */
@@ -1292,21 +1344,9 @@ static struct dev_pm_ops gfar_pm_ops = {
1292 1344
1293#define GFAR_PM_OPS (&gfar_pm_ops) 1345#define GFAR_PM_OPS (&gfar_pm_ops)
1294 1346
1295static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1296{
1297 return gfar_suspend(&ofdev->dev);
1298}
1299
1300static int gfar_legacy_resume(struct of_device *ofdev)
1301{
1302 return gfar_resume(&ofdev->dev);
1303}
1304
1305#else 1347#else
1306 1348
1307#define GFAR_PM_OPS NULL 1349#define GFAR_PM_OPS NULL
1308#define gfar_legacy_suspend NULL
1309#define gfar_legacy_resume NULL
1310 1350
1311#endif 1351#endif
1312 1352
@@ -1515,9 +1555,9 @@ static void gfar_halt_nodisable(struct net_device *dev)
1515 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1555 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1516 gfar_write(&regs->dmactrl, tempval); 1556 gfar_write(&regs->dmactrl, tempval);
1517 1557
1518 while (!(gfar_read(&regs->ievent) & 1558 spin_event_timeout(((gfar_read(&regs->ievent) &
1519 (IEVENT_GRSC | IEVENT_GTSC))) 1559 (IEVENT_GRSC | IEVENT_GTSC)) ==
1520 cpu_relax(); 1560 (IEVENT_GRSC | IEVENT_GTSC)), -1, 0);
1521 } 1561 }
1522} 1562}
1523 1563
@@ -1653,6 +1693,7 @@ static void free_skb_resources(struct gfar_private *priv)
1653 sizeof(struct rxbd8) * priv->total_rx_ring_size, 1693 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1654 priv->tx_queue[0]->tx_bd_base, 1694 priv->tx_queue[0]->tx_bd_base,
1655 priv->tx_queue[0]->tx_bd_dma_base); 1695 priv->tx_queue[0]->tx_bd_dma_base);
1696 skb_queue_purge(&priv->rx_recycle);
1656} 1697}
1657 1698
1658void gfar_start(struct net_device *dev) 1699void gfar_start(struct net_device *dev)
@@ -1686,7 +1727,7 @@ void gfar_start(struct net_device *dev)
1686 gfar_write(&regs->imask, IMASK_DEFAULT); 1727 gfar_write(&regs->imask, IMASK_DEFAULT);
1687 } 1728 }
1688 1729
1689 dev->trans_start = jiffies; 1730 dev->trans_start = jiffies; /* prevent tx timeout */
1690} 1731}
1691 1732
1692void gfar_configure_coalescing(struct gfar_private *priv, 1733void gfar_configure_coalescing(struct gfar_private *priv,
@@ -1926,23 +1967,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 struct netdev_queue *txq; 1967 struct netdev_queue *txq;
1927 struct gfar __iomem *regs = NULL; 1968 struct gfar __iomem *regs = NULL;
1928 struct txfcb *fcb = NULL; 1969 struct txfcb *fcb = NULL;
1929 struct txbd8 *txbdp, *txbdp_start, *base; 1970 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1930 u32 lstatus; 1971 u32 lstatus;
1931 int i, rq = 0; 1972 int i, rq = 0, do_tstamp = 0;
1932 u32 bufaddr; 1973 u32 bufaddr;
1933 unsigned long flags; 1974 unsigned long flags;
1934 unsigned int nr_frags, length; 1975 unsigned int nr_frags, nr_txbds, length;
1935 1976 union skb_shared_tx *shtx;
1936 1977
1937 rq = skb->queue_mapping; 1978 rq = skb->queue_mapping;
1938 tx_queue = priv->tx_queue[rq]; 1979 tx_queue = priv->tx_queue[rq];
1939 txq = netdev_get_tx_queue(dev, rq); 1980 txq = netdev_get_tx_queue(dev, rq);
1940 base = tx_queue->tx_bd_base; 1981 base = tx_queue->tx_bd_base;
1941 regs = tx_queue->grp->regs; 1982 regs = tx_queue->grp->regs;
1983 shtx = skb_tx(skb);
1984
1985 /* check if time stamp should be generated */
1986 if (unlikely(shtx->hardware && priv->hwts_tx_en))
1987 do_tstamp = 1;
1942 1988
1943 /* make space for additional header when fcb is needed */ 1989 /* make space for additional header when fcb is needed */
1944 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1990 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1945 (priv->vlgrp && vlan_tx_tag_present(skb))) && 1991 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
1992 unlikely(do_tstamp)) &&
1946 (skb_headroom(skb) < GMAC_FCB_LEN)) { 1993 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1947 struct sk_buff *skb_new; 1994 struct sk_buff *skb_new;
1948 1995
@@ -1959,8 +2006,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1959 /* total number of fragments in the SKB */ 2006 /* total number of fragments in the SKB */
1960 nr_frags = skb_shinfo(skb)->nr_frags; 2007 nr_frags = skb_shinfo(skb)->nr_frags;
1961 2008
2009 /* calculate the required number of TxBDs for this skb */
2010 if (unlikely(do_tstamp))
2011 nr_txbds = nr_frags + 2;
2012 else
2013 nr_txbds = nr_frags + 1;
2014
1962 /* check if there is space to queue this packet */ 2015 /* check if there is space to queue this packet */
1963 if ((nr_frags+1) > tx_queue->num_txbdfree) { 2016 if (nr_txbds > tx_queue->num_txbdfree) {
1964 /* no space, stop the queue */ 2017 /* no space, stop the queue */
1965 netif_tx_stop_queue(txq); 2018 netif_tx_stop_queue(txq);
1966 dev->stats.tx_fifo_errors++; 2019 dev->stats.tx_fifo_errors++;
@@ -1972,9 +2025,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1972 txq->tx_packets ++; 2025 txq->tx_packets ++;
1973 2026
1974 txbdp = txbdp_start = tx_queue->cur_tx; 2027 txbdp = txbdp_start = tx_queue->cur_tx;
2028 lstatus = txbdp->lstatus;
2029
2030 /* Time stamp insertion requires one additional TxBD */
2031 if (unlikely(do_tstamp))
2032 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2033 tx_queue->tx_ring_size);
1975 2034
1976 if (nr_frags == 0) { 2035 if (nr_frags == 0) {
1977 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2036 if (unlikely(do_tstamp))
2037 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2038 TXBD_INTERRUPT);
2039 else
2040 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1978 } else { 2041 } else {
1979 /* Place the fragment addresses and lengths into the TxBDs */ 2042 /* Place the fragment addresses and lengths into the TxBDs */
1980 for (i = 0; i < nr_frags; i++) { 2043 for (i = 0; i < nr_frags; i++) {
@@ -2020,11 +2083,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2020 gfar_tx_vlan(skb, fcb); 2083 gfar_tx_vlan(skb, fcb);
2021 } 2084 }
2022 2085
2023 /* setup the TxBD length and buffer pointer for the first BD */ 2086 /* Setup tx hardware time stamping if requested */
2087 if (unlikely(do_tstamp)) {
2088 shtx->in_progress = 1;
2089 if (fcb == NULL)
2090 fcb = gfar_add_fcb(skb);
2091 fcb->ptp = 1;
2092 lstatus |= BD_LFLAG(TXBD_TOE);
2093 }
2094
2024 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2095 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2025 skb_headlen(skb), DMA_TO_DEVICE); 2096 skb_headlen(skb), DMA_TO_DEVICE);
2026 2097
2027 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2098 /*
2099 * If time stamping is requested one additional TxBD must be set up. The
2100 * first TxBD points to the FCB and must have a data length of
2101 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2102 * the full frame length.
2103 */
2104 if (unlikely(do_tstamp)) {
2105 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2106 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2107 (skb_headlen(skb) - GMAC_FCB_LEN);
2108 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2109 } else {
2110 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2111 }
2028 2112
2029 /* 2113 /*
2030 * We can work in parallel with gfar_clean_tx_ring(), except 2114 * We can work in parallel with gfar_clean_tx_ring(), except
@@ -2064,9 +2148,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2148 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2065 2149
2066 /* reduce TxBD free count */ 2150 /* reduce TxBD free count */
2067 tx_queue->num_txbdfree -= (nr_frags + 1); 2151 tx_queue->num_txbdfree -= (nr_txbds);
2068
2069 dev->trans_start = jiffies;
2070 2152
2071 /* If the next BD still needs to be cleaned up, then the bds 2153 /* If the next BD still needs to be cleaned up, then the bds
2072 are full. We need to tell the kernel to stop sending us stuff. */ 2154 are full. We need to tell the kernel to stop sending us stuff. */
@@ -2092,7 +2174,6 @@ static int gfar_close(struct net_device *dev)
2092 2174
2093 disable_napi(priv); 2175 disable_napi(priv);
2094 2176
2095 skb_queue_purge(&priv->rx_recycle);
2096 cancel_work_sync(&priv->reset_task); 2177 cancel_work_sync(&priv->reset_task);
2097 stop_gfar(dev); 2178 stop_gfar(dev);
2098 2179
@@ -2255,16 +2336,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2255 struct net_device *dev = tx_queue->dev; 2336 struct net_device *dev = tx_queue->dev;
2256 struct gfar_private *priv = netdev_priv(dev); 2337 struct gfar_private *priv = netdev_priv(dev);
2257 struct gfar_priv_rx_q *rx_queue = NULL; 2338 struct gfar_priv_rx_q *rx_queue = NULL;
2258 struct txbd8 *bdp; 2339 struct txbd8 *bdp, *next = NULL;
2259 struct txbd8 *lbdp = NULL; 2340 struct txbd8 *lbdp = NULL;
2260 struct txbd8 *base = tx_queue->tx_bd_base; 2341 struct txbd8 *base = tx_queue->tx_bd_base;
2261 struct sk_buff *skb; 2342 struct sk_buff *skb;
2262 int skb_dirtytx; 2343 int skb_dirtytx;
2263 int tx_ring_size = tx_queue->tx_ring_size; 2344 int tx_ring_size = tx_queue->tx_ring_size;
2264 int frags = 0; 2345 int frags = 0, nr_txbds = 0;
2265 int i; 2346 int i;
2266 int howmany = 0; 2347 int howmany = 0;
2267 u32 lstatus; 2348 u32 lstatus;
2349 size_t buflen;
2350 union skb_shared_tx *shtx;
2268 2351
2269 rx_queue = priv->rx_queue[tx_queue->qindex]; 2352 rx_queue = priv->rx_queue[tx_queue->qindex];
2270 bdp = tx_queue->dirty_tx; 2353 bdp = tx_queue->dirty_tx;
@@ -2274,7 +2357,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2274 unsigned long flags; 2357 unsigned long flags;
2275 2358
2276 frags = skb_shinfo(skb)->nr_frags; 2359 frags = skb_shinfo(skb)->nr_frags;
2277 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2360
2361 /*
2362 * When time stamping, one additional TxBD must be freed.
2363 * Also, we need to dma_unmap_single() the TxPAL.
2364 */
2365 shtx = skb_tx(skb);
2366 if (unlikely(shtx->in_progress))
2367 nr_txbds = frags + 2;
2368 else
2369 nr_txbds = frags + 1;
2370
2371 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2278 2372
2279 lstatus = lbdp->lstatus; 2373 lstatus = lbdp->lstatus;
2280 2374
@@ -2283,10 +2377,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2283 (lstatus & BD_LENGTH_MASK)) 2377 (lstatus & BD_LENGTH_MASK))
2284 break; 2378 break;
2285 2379
2286 dma_unmap_single(&priv->ofdev->dev, 2380 if (unlikely(shtx->in_progress)) {
2287 bdp->bufPtr, 2381 next = next_txbd(bdp, base, tx_ring_size);
2288 bdp->length, 2382 buflen = next->length + GMAC_FCB_LEN;
2289 DMA_TO_DEVICE); 2383 } else
2384 buflen = bdp->length;
2385
2386 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2387 buflen, DMA_TO_DEVICE);
2388
2389 if (unlikely(shtx->in_progress)) {
2390 struct skb_shared_hwtstamps shhwtstamps;
2391 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2392 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2393 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2394 skb_tstamp_tx(skb, &shhwtstamps);
2395 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2396 bdp = next;
2397 }
2290 2398
2291 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2399 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2292 bdp = next_txbd(bdp, base, tx_ring_size); 2400 bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2318,7 +2426,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2318 2426
2319 howmany++; 2427 howmany++;
2320 spin_lock_irqsave(&tx_queue->txlock, flags); 2428 spin_lock_irqsave(&tx_queue->txlock, flags);
2321 tx_queue->num_txbdfree += frags + 1; 2429 tx_queue->num_txbdfree += nr_txbds;
2322 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2430 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2323 } 2431 }
2324 2432
@@ -2474,6 +2582,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2474 skb_pull(skb, amount_pull); 2582 skb_pull(skb, amount_pull);
2475 } 2583 }
2476 2584
2585 /* Get receive timestamp from the skb */
2586 if (priv->hwts_rx_en) {
2587 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2588 u64 *ns = (u64 *) skb->data;
2589 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2590 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2591 }
2592
2593 if (priv->padding)
2594 skb_pull(skb, priv->padding);
2595
2477 if (priv->rx_csum_enable) 2596 if (priv->rx_csum_enable)
2478 gfar_rx_checksum(skb, fcb); 2597 gfar_rx_checksum(skb, fcb);
2479 2598
@@ -2510,8 +2629,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2510 bdp = rx_queue->cur_rx; 2629 bdp = rx_queue->cur_rx;
2511 base = rx_queue->rx_bd_base; 2630 base = rx_queue->rx_bd_base;
2512 2631
2513 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2632 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2514 priv->padding;
2515 2633
2516 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2634 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2517 struct sk_buff *newskb; 2635 struct sk_buff *newskb;
@@ -2798,7 +2916,7 @@ static void adjust_link(struct net_device *dev)
2798 * whenever dev->flags is changed */ 2916 * whenever dev->flags is changed */
2799static void gfar_set_multi(struct net_device *dev) 2917static void gfar_set_multi(struct net_device *dev)
2800{ 2918{
2801 struct dev_mc_list *mc_ptr; 2919 struct netdev_hw_addr *ha;
2802 struct gfar_private *priv = netdev_priv(dev); 2920 struct gfar_private *priv = netdev_priv(dev);
2803 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2921 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2804 u32 tempval; 2922 u32 tempval;
@@ -2871,17 +2989,14 @@ static void gfar_set_multi(struct net_device *dev)
2871 return; 2989 return;
2872 2990
2873 /* Parse the list, and set the appropriate bits */ 2991 /* Parse the list, and set the appropriate bits */
2874 netdev_for_each_mc_addr(mc_ptr, dev) { 2992 netdev_for_each_mc_addr(ha, dev) {
2875 if (idx < em_num) { 2993 if (idx < em_num) {
2876 gfar_set_mac_for_addr(dev, idx, 2994 gfar_set_mac_for_addr(dev, idx, ha->addr);
2877 mc_ptr->dmi_addr);
2878 idx++; 2995 idx++;
2879 } else 2996 } else
2880 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 2997 gfar_set_hash_for_addr(dev, ha->addr);
2881 } 2998 }
2882 } 2999 }
2883
2884 return;
2885} 3000}
2886 3001
2887 3002
@@ -2922,8 +3037,6 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2922 tempval = gfar_read(priv->hash_regs[whichreg]); 3037 tempval = gfar_read(priv->hash_regs[whichreg]);
2923 tempval |= value; 3038 tempval |= value;
2924 gfar_write(priv->hash_regs[whichreg], tempval); 3039 gfar_write(priv->hash_regs[whichreg], tempval);
2925
2926 return;
2927} 3040}
2928 3041
2929 3042
@@ -3062,8 +3175,6 @@ static struct of_platform_driver gfar_driver = {
3062 }, 3175 },
3063 .probe = gfar_probe, 3176 .probe = gfar_probe,
3064 .remove = gfar_remove, 3177 .remove = gfar_remove,
3065 .suspend = gfar_legacy_suspend,
3066 .resume = gfar_legacy_resume,
3067}; 3178};
3068 3179
3069static int __init gfar_init(void) 3180static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e714236..ac4a92e08c09 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
262 262
263#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) 263#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
264 264
265#define RCTRL_TS_ENABLE 0x01000000
265#define RCTRL_PAL_MASK 0x001f0000 266#define RCTRL_PAL_MASK 0x001f0000
266#define RCTRL_VLEX 0x00002000 267#define RCTRL_VLEX 0x00002000
267#define RCTRL_FILREN 0x00001000 268#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
539 540
540struct txfcb { 541struct txfcb {
541 u8 flags; 542 u8 flags;
542 u8 reserved; 543 u8 ptp; /* Flag to enable tx timestamping */
543 u8 l4os; /* Level 4 Header Offset */ 544 u8 l4os; /* Level 4 Header Offset */
544 u8 l3os; /* Level 3 Header Offset */ 545 u8 l3os; /* Level 3 Header Offset */
545 u16 phcs; /* Pseudo-header Checksum */ 546 u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
885#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 886#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
886#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 887#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
887#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 888#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
889#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
888 890
889#if (MAXGROUPS == 2) 891#if (MAXGROUPS == 2)
890#define DEFAULT_MAPPING 0xAA 892#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
1100 1102
1101 /* Network Statistics */ 1103 /* Network Statistics */
1102 struct gfar_extra_stats extra_stats; 1104 struct gfar_extra_stats extra_stats;
1105
1106 /* HW time stamping enabled flag */
1107 int hwts_rx_en;
1108 int hwts_tx_en;
1103}; 1109};
1104 1110
1105extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1111extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 61fd54d35f63..f37a4c143ddd 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
895 else 895 else
896 skb->ip_summed = CHECKSUM_NONE; 896 skb->ip_summed = CHECKSUM_NONE;
897 897
898 skb->dev = dev;
899 skb->protocol = eth_type_trans(skb, dev); 898 skb->protocol = eth_type_trans(skb, dev);
900 dev->stats.rx_packets++; 899 dev->stats.rx_packets++;
901 netif_receive_skb(skb); 900 netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
990 989
991static void greth_set_hash_filter(struct net_device *dev) 990static void greth_set_hash_filter(struct net_device *dev)
992{ 991{
993 struct dev_mc_list *curr; 992 struct netdev_hw_addr *ha;
994 struct greth_private *greth = netdev_priv(dev); 993 struct greth_private *greth = netdev_priv(dev);
995 struct greth_regs *regs = (struct greth_regs *) greth->regs; 994 struct greth_regs *regs = (struct greth_regs *) greth->regs;
996 u32 mc_filter[2]; 995 u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
998 997
999 mc_filter[0] = mc_filter[1] = 0; 998 mc_filter[0] = mc_filter[1] = 0;
1000 999
1001 netdev_for_each_mc_addr(curr, dev) { 1000 netdev_for_each_mc_addr(ha, dev) {
1002 bitnr = greth_hash_get_index(curr->dmi_addr); 1001 bitnr = greth_hash_get_index(ha->addr);
1003 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 1002 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1004 } 1003 }
1005 1004
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f13879592..61f2b1cfcd46 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -859,7 +859,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
859 for (i = 10000; i >= 0; i--) 859 for (i = 10000; i >= 0; i--)
860 if ((readw(ioaddr + MII_Status) & 1) == 0) 860 if ((readw(ioaddr + MII_Status) & 1) == 0)
861 break; 861 break;
862 return;
863} 862}
864 863
865 864
@@ -1225,8 +1224,6 @@ static void hamachi_init_ring(struct net_device *dev)
1225 } 1224 }
1226 /* Mark the last entry of the ring */ 1225 /* Mark the last entry of the ring */
1227 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing); 1226 hmp->tx_ring[TX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
1228
1229 return;
1230} 1227}
1231 1228
1232 1229
@@ -1857,12 +1854,12 @@ static void set_rx_mode(struct net_device *dev)
1857 /* Too many to match, or accept all multicasts. */ 1854 /* Too many to match, or accept all multicasts. */
1858 writew(0x000B, ioaddr + AddrMode); 1855 writew(0x000B, ioaddr + AddrMode);
1859 } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ 1856 } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
1860 struct dev_mc_list *mclist; 1857 struct netdev_hw_addr *ha;
1861 int i = 0; 1858 int i = 0;
1862 1859
1863 netdev_for_each_mc_addr(mclist, dev) { 1860 netdev_for_each_mc_addr(ha, dev) {
1864 writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8); 1861 writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
1865 writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]), 1862 writel(0x20000 | (*(u16 *)&ha->addr[4]),
1866 ioaddr + 0x104 + i*8); 1863 ioaddr + 0x104 + i*8);
1867 i++; 1864 i++;
1868 } 1865 }
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992b3d1a..3e25f10cabd6 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
429 return -EINVAL; 429 return -EINVAL;
430 } 430 }
431 if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) { 431 if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
432 printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n", 432 printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
433 dev->base_addr); 433 dev->base_addr);
434 return -EACCES; 434 return -EACCES;
435 } 435 }
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index f3a96b843911..9f64c8637208 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1629,7 +1629,6 @@ static void scc_net_rx(struct scc_channel *scc, struct sk_buff *skb)
1629 skb->protocol = ax25_type_trans(skb, scc->dev); 1629 skb->protocol = ax25_type_trans(skb, scc->dev);
1630 1630
1631 netif_rx(skb); 1631 netif_rx(skb);
1632 return;
1633} 1632}
1634 1633
1635/* ----> transmit frame <---- */ 1634/* ----> transmit frame <---- */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index efdbcad63c67..82bffc3cabdf 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -351,7 +351,6 @@ hpp_reset_8390(struct net_device *dev)
351 printk("%s: hp_reset_8390() did not complete.\n", dev->name); 351 printk("%s: hp_reset_8390() did not complete.\n", dev->name);
352 352
353 if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); 353 if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
354 return;
355} 354}
356 355
357/* The programmed-I/O version of reading the 4 byte 8390 specific header. 356/* The programmed-I/O version of reading the 4 byte 8390 specific header.
@@ -422,7 +421,6 @@ hpp_io_block_output(struct net_device *dev, int count,
422 int ioaddr = dev->base_addr - NIC_OFFSET; 421 int ioaddr = dev->base_addr - NIC_OFFSET;
423 outw(start_page << 8, ioaddr + HPP_OUT_ADDR); 422 outw(start_page << 8, ioaddr + HPP_OUT_ADDR);
424 outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2); 423 outsl(ioaddr + HP_DATAPORT, buf, (count+3)>>2);
425 return;
426} 424}
427 425
428static void 426static void
@@ -436,8 +434,6 @@ hpp_mem_block_output(struct net_device *dev, int count,
436 outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION); 434 outw(option_reg & ~(MemDisable + BootROMEnb), ioaddr + HPP_OPTION);
437 memcpy_toio(ei_status.mem, buf, (count + 3) & ~3); 435 memcpy_toio(ei_status.mem, buf, (count + 3) & ~3);
438 outw(option_reg, ioaddr + HPP_OPTION); 436 outw(option_reg, ioaddr + HPP_OPTION);
439
440 return;
441} 437}
442 438
443 439
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index 5c4d78c1ff42..86ececd3c658 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -240,7 +240,6 @@ hp_reset_8390(struct net_device *dev)
240 printk("%s: hp_reset_8390() did not complete.\n", dev->name); 240 printk("%s: hp_reset_8390() did not complete.\n", dev->name);
241 241
242 if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies); 242 if (ei_debug > 1) printk("8390 reset done (%ld).", jiffies);
243 return;
244} 243}
245 244
246static void 245static void
@@ -360,7 +359,6 @@ hp_block_output(struct net_device *dev, int count,
360 dev->name, (start_page << 8) + count, addr); 359 dev->name, (start_page << 8) + count, addr);
361 } 360 }
362 outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE); 361 outb_p(saved_config & (~HP_DATAON), nic_base - NIC_OFFSET + HP_CONFIGURE);
363 return;
364} 362}
365 363
366/* This function resets the ethercard if something screws up. */ 364/* This function resets the ethercard if something screws up. */
@@ -371,7 +369,6 @@ hp_init_card(struct net_device *dev)
371 NS8390p_init(dev, 0); 369 NS8390p_init(dev, 0);
372 outb_p(irqmap[irq&0x0f] | HP_RUN, 370 outb_p(irqmap[irq&0x0f] | HP_RUN,
373 dev->base_addr - NIC_OFFSET + HP_CONFIGURE); 371 dev->base_addr - NIC_OFFSET + HP_CONFIGURE);
374 return;
375} 372}
376 373
377#ifdef MODULE 374#ifdef MODULE
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8cd56ea..68e5ac8832ad 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
1102 return -EAGAIN; 1102 return -EAGAIN;
1103 } 1103 }
1104 1104
1105 dev->trans_start = jiffies; 1105 dev->trans_start = jiffies; /* prevent tx timeout */
1106 netif_start_queue(dev); 1106 netif_start_queue(dev);
1107 1107
1108 lp->lan_type = hp100_sense_lan(dev); 1108 lp->lan_type = hp100_sense_lan(dev);
@@ -1510,7 +1510,7 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1510 printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name); 1510 printk("hp100: %s: start_xmit_bm: No TX PDL available.\n", dev->name);
1511#endif 1511#endif
1512 /* not waited long enough since last tx? */ 1512 /* not waited long enough since last tx? */
1513 if (time_before(jiffies, dev->trans_start + HZ)) 1513 if (time_before(jiffies, dev_trans_start(dev) + HZ))
1514 goto drop; 1514 goto drop;
1515 1515
1516 if (hp100_check_lan(dev)) 1516 if (hp100_check_lan(dev))
@@ -1547,7 +1547,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1547 } 1547 }
1548 } 1548 }
1549 1549
1550 dev->trans_start = jiffies;
1551 goto drop; 1550 goto drop;
1552 } 1551 }
1553 1552
@@ -1585,7 +1584,6 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1585 /* Update statistics */ 1584 /* Update statistics */
1586 lp->stats.tx_packets++; 1585 lp->stats.tx_packets++;
1587 lp->stats.tx_bytes += skb->len; 1586 lp->stats.tx_bytes += skb->len;
1588 dev->trans_start = jiffies;
1589 1587
1590 return NETDEV_TX_OK; 1588 return NETDEV_TX_OK;
1591 1589
@@ -1663,7 +1661,7 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
1663 printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i); 1661 printk("hp100: %s: start_xmit: tx free mem = 0x%x\n", dev->name, i);
1664#endif 1662#endif
1665 /* not waited long enough since last failed tx try? */ 1663 /* not waited long enough since last failed tx try? */
1666 if (time_before(jiffies, dev->trans_start + HZ)) { 1664 if (time_before(jiffies, dev_trans_start(dev) + HZ)) {
1667#ifdef HP100_DEBUG 1665#ifdef HP100_DEBUG
1668 printk("hp100: %s: trans_start timing problem\n", 1666 printk("hp100: %s: trans_start timing problem\n",
1669 dev->name); 1667 dev->name);
@@ -1701,7 +1699,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
1701 mdelay(1); 1699 mdelay(1);
1702 } 1700 }
1703 } 1701 }
1704 dev->trans_start = jiffies;
1705 goto drop; 1702 goto drop;
1706 } 1703 }
1707 1704
@@ -1745,7 +1742,6 @@ static netdev_tx_t hp100_start_xmit(struct sk_buff *skb,
1745 1742
1746 lp->stats.tx_packets++; 1743 lp->stats.tx_packets++;
1747 lp->stats.tx_bytes += skb->len; 1744 lp->stats.tx_bytes += skb->len;
1748 dev->trans_start = jiffies;
1749 hp100_ints_on(); 1745 hp100_ints_on();
1750 spin_unlock_irqrestore(&lp->lock, flags); 1746 spin_unlock_irqrestore(&lp->lock, flags);
1751 1747
@@ -2099,15 +2095,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
2099 } else { 2095 } else {
2100 int i, idx; 2096 int i, idx;
2101 u_char *addrs; 2097 u_char *addrs;
2102 struct dev_mc_list *dmi; 2098 struct netdev_hw_addr *ha;
2103 2099
2104 memset(&lp->hash_bytes, 0x00, 8); 2100 memset(&lp->hash_bytes, 0x00, 8);
2105#ifdef HP100_DEBUG 2101#ifdef HP100_DEBUG
2106 printk("hp100: %s: computing hash filter - mc_count = %i\n", 2102 printk("hp100: %s: computing hash filter - mc_count = %i\n",
2107 dev->name, netdev_mc_count(dev)); 2103 dev->name, netdev_mc_count(dev));
2108#endif 2104#endif
2109 netdev_for_each_mc_addr(dmi, dev) { 2105 netdev_for_each_mc_addr(ha, dev) {
2110 addrs = dmi->dmi_addr; 2106 addrs = ha->addr;
2111 if ((*addrs & 0x01) == 0x01) { /* multicast address? */ 2107 if ((*addrs & 0x01) == 0x01) { /* multicast address? */
2112#ifdef HP100_DEBUG 2108#ifdef HP100_DEBUG
2113 printk("hp100: %s: multicast = %pM, ", 2109 printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/hydra.c b/drivers/net/hydra.c
index 24724b4ad709..07d8e5b634f3 100644
--- a/drivers/net/hydra.c
+++ b/drivers/net/hydra.c
@@ -71,6 +71,7 @@ static struct zorro_device_id hydra_zorro_tbl[] __devinitdata = {
71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET }, 71 { ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
72 { 0 } 72 { 0 }
73}; 73};
74MODULE_DEVICE_TABLE(zorro, hydra_zorro_tbl);
74 75
75static struct zorro_driver hydra_driver = { 76static struct zorro_driver hydra_driver = {
76 .name = "hydra", 77 .name = "hydra",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index f8c36a5eb4d7..b150c102ca5a 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -390,18 +390,19 @@ static void emac_hash_mc(struct emac_instance *dev)
390 const int regs = EMAC_XAHT_REGS(dev); 390 const int regs = EMAC_XAHT_REGS(dev);
391 u32 *gaht_base = emac_gaht_base(dev); 391 u32 *gaht_base = emac_gaht_base(dev);
392 u32 gaht_temp[regs]; 392 u32 gaht_temp[regs];
393 struct dev_mc_list *dmi; 393 struct netdev_hw_addr *ha;
394 int i; 394 int i;
395 395
396 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev)); 396 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
397 397
398 memset(gaht_temp, 0, sizeof (gaht_temp)); 398 memset(gaht_temp, 0, sizeof (gaht_temp));
399 399
400 netdev_for_each_mc_addr(dmi, dev->ndev) { 400 netdev_for_each_mc_addr(ha, dev->ndev) {
401 int slot, reg, mask; 401 int slot, reg, mask;
402 DBG2(dev, "mc %pM" NL, dmi->dmi_addr); 402 DBG2(dev, "mc %pM" NL, ha->addr);
403 403
404 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); 404 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
405 ether_crc(ETH_ALEN, ha->addr));
405 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); 406 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
406 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); 407 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
407 408
@@ -1178,7 +1179,7 @@ static int emac_open(struct net_device *ndev)
1178 netif_carrier_on(dev->ndev); 1179 netif_carrier_on(dev->ndev);
1179 1180
1180 /* Required for Pause packet support in EMAC */ 1181 /* Required for Pause packet support in EMAC */
1181 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); 1182 dev_mc_add_global(ndev, default_mcast_addr);
1182 1183
1183 emac_configure(dev); 1184 emac_configure(dev);
1184 mal_poll_add(dev->mal, &dev->commac); 1185 mal_poll_add(dev->mal, &dev->commac);
@@ -1701,7 +1702,6 @@ static int emac_poll_rx(void *param, int budget)
1701 1702
1702 skb_put(skb, len); 1703 skb_put(skb, len);
1703 push_packet: 1704 push_packet:
1704 skb->dev = dev->ndev;
1705 skb->protocol = eth_type_trans(skb, dev->ndev); 1705 skb->protocol = eth_type_trans(skb, dev->ndev);
1706 emac_rx_csum(dev, skb, ctrl); 1706 emac_rx_csum(dev, skb, ctrl);
1707 1707
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf3340c11..294ccfb427cf 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
384 int camcnt; 384 int camcnt;
385 camentry_t cams[16]; 385 camentry_t cams[16];
386 u32 cammask; 386 u32 cammask;
387 struct dev_mc_list *mcptr; 387 struct netdev_hw_addr *ha;
388 u16 rcrval; 388 u16 rcrval;
389 389
390 /* reset the SONIC */ 390 /* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
419 /* start putting the multicast addresses into the CAM list. Stop if 419 /* start putting the multicast addresses into the CAM list. Stop if
420 it is full. */ 420 it is full. */
421 421
422 netdev_for_each_mc_addr(mcptr, dev) { 422 netdev_for_each_mc_addr(ha, dev) {
423 putcam(cams, &camcnt, mcptr->dmi_addr); 423 putcam(cams, &camcnt, ha->addr);
424 if (camcnt == 16) 424 if (camcnt == 16)
425 break; 425 break;
426 } 426 }
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
478 /* if still multicast addresses left or ALLMULTI is set, set the multicast 478 /* if still multicast addresses left or ALLMULTI is set, set the multicast
479 enable bit */ 479 enable bit */
480 480
481 if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL)) 481 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
482 rcrval |= RCREG_AMC; 482 rcrval |= RCREG_AMC;
483 483
484 /* promiscous mode ? */ 484 /* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8ee25b..7acb3edc47ef 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -45,6 +45,7 @@
45#include <linux/init.h> 45#include <linux/init.h>
46#include <linux/delay.h> 46#include <linux/delay.h>
47#include <linux/mm.h> 47#include <linux/mm.h>
48#include <linux/pm.h>
48#include <linux/ethtool.h> 49#include <linux/ethtool.h>
49#include <linux/proc_fs.h> 50#include <linux/proc_fs.h>
50#include <linux/in.h> 51#include <linux/in.h>
@@ -199,7 +200,7 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
199 return -1; 200 return -1;
200 } 201 }
201 202
202 pool->skbuff = kmalloc(sizeof(void*) * pool->size, GFP_KERNEL); 203 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
203 204
204 if(!pool->skbuff) { 205 if(!pool->skbuff) {
205 kfree(pool->dma_addr); 206 kfree(pool->dma_addr);
@@ -210,7 +211,6 @@ static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
210 return -1; 211 return -1;
211 } 212 }
212 213
213 memset(pool->skbuff, 0, sizeof(void*) * pool->size);
214 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size); 214 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
215 215
216 for(i = 0; i < pool->size; ++i) { 216 for(i = 0; i < pool->size; ++i) {
@@ -957,7 +957,7 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
957 } else { 957 } else {
958 tx_packets++; 958 tx_packets++;
959 tx_bytes += skb->len; 959 tx_bytes += skb->len;
960 netdev->trans_start = jiffies; 960 netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
961 } 961 }
962 962
963 if (!used_bounce) 963 if (!used_bounce)
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1073 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1073 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1074 } 1074 }
1075 } else { 1075 } else {
1076 struct dev_mc_list *mclist; 1076 struct netdev_hw_addr *ha;
1077 /* clear the filter table & disable filtering */ 1077 /* clear the filter table & disable filtering */
1078 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1078 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1079 IbmVethMcastEnableRecv | 1079 IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1084 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1084 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1085 } 1085 }
1086 /* add the addresses to the filter table */ 1086 /* add the addresses to the filter table */
1087 netdev_for_each_mc_addr(mclist, netdev) { 1087 netdev_for_each_mc_addr(ha, netdev) {
1088 // add the multicast address to the filter table 1088 // add the multicast address to the filter table
1089 unsigned long mcast_addr = 0; 1089 unsigned long mcast_addr = 0;
1090 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); 1090 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1091 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1091 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1092 IbmVethMcastAddFilter, 1092 IbmVethMcastAddFilter,
1093 mcast_addr); 1093 mcast_addr);
@@ -1421,7 +1421,6 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1421 if (!entry) 1421 if (!entry)
1422 ibmveth_error_printk("Cannot create adapter proc entry"); 1422 ibmveth_error_printk("Cannot create adapter proc entry");
1423 } 1423 }
1424 return;
1425} 1424}
1426 1425
1427static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 1426static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
@@ -1589,6 +1588,12 @@ static struct kobj_type ktype_veth_pool = {
1589 .default_attrs = veth_pool_attrs, 1588 .default_attrs = veth_pool_attrs,
1590}; 1589};
1591 1590
1591static int ibmveth_resume(struct device *dev)
1592{
1593 struct net_device *netdev = dev_get_drvdata(dev);
1594 ibmveth_interrupt(netdev->irq, netdev);
1595 return 0;
1596}
1592 1597
1593static struct vio_device_id ibmveth_device_table[] __devinitdata= { 1598static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1594 { "network", "IBM,l-lan"}, 1599 { "network", "IBM,l-lan"},
@@ -1596,6 +1601,10 @@ static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1596}; 1601};
1597MODULE_DEVICE_TABLE(vio, ibmveth_device_table); 1602MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1598 1603
1604static struct dev_pm_ops ibmveth_pm_ops = {
1605 .resume = ibmveth_resume
1606};
1607
1599static struct vio_driver ibmveth_driver = { 1608static struct vio_driver ibmveth_driver = {
1600 .id_table = ibmveth_device_table, 1609 .id_table = ibmveth_device_table,
1601 .probe = ibmveth_probe, 1610 .probe = ibmveth_probe,
@@ -1604,6 +1613,7 @@ static struct vio_driver ibmveth_driver = {
1604 .driver = { 1613 .driver = {
1605 .name = ibmveth_driver_name, 1614 .name = ibmveth_driver_name,
1606 .owner = THIS_MODULE, 1615 .owner = THIS_MODULE,
1616 .pm = &ibmveth_pm_ops,
1607 } 1617 }
1608}; 1618};
1609 1619
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index f4081c0a2d9c..ab9f675c5b8b 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -182,7 +182,6 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
182 netif_stop_queue(dev); 182 netif_stop_queue(dev);
183 } 183 }
184 184
185 dev->trans_start = jiffies;
186 skb_queue_tail(&dp->rq, skb); 185 skb_queue_tail(&dp->rq, skb);
187 if (!dp->tasklet_pending) { 186 if (!dp->tasklet_pending) {
188 dp->tasklet_pending = 1; 187 dp->tasklet_pending = 1;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed77c71..86438b59fa21 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
104 case E1000_DEV_ID_82580_COPPER_DUAL: 104 case E1000_DEV_ID_82580_COPPER_DUAL:
105 mac->type = e1000_82580; 105 mac->type = e1000_82580;
106 break; 106 break;
107 case E1000_DEV_ID_I350_COPPER:
108 case E1000_DEV_ID_I350_FIBER:
109 case E1000_DEV_ID_I350_SERDES:
110 case E1000_DEV_ID_I350_SGMII:
111 mac->type = e1000_i350;
112 break;
107 default: 113 default:
108 return -E1000_ERR_MAC_INIT; 114 return -E1000_ERR_MAC_INIT;
109 break; 115 break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
153 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 159 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
154 if (mac->type == e1000_82580) 160 if (mac->type == e1000_82580)
155 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 161 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
162 if (mac->type == e1000_i350)
163 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
156 /* reset */ 164 /* reset */
157 if (mac->type == e1000_82580) 165 if (mac->type >= e1000_82580)
158 mac->ops.reset_hw = igb_reset_hw_82580; 166 mac->ops.reset_hw = igb_reset_hw_82580;
159 else 167 else
160 mac->ops.reset_hw = igb_reset_hw_82575; 168 mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
225 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 233 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
226 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 234 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
227 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 235 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
228 } else if (hw->mac.type == e1000_82580) { 236 } else if (hw->mac.type >= e1000_82580) {
229 phy->ops.reset = igb_phy_hw_reset; 237 phy->ops.reset = igb_phy_hw_reset;
230 phy->ops.read_reg = igb_read_phy_reg_82580; 238 phy->ops.read_reg = igb_read_phy_reg_82580;
231 phy->ops.write_reg = igb_write_phy_reg_82580; 239 phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
261 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
262 break; 270 break;
263 case I82580_I_PHY_ID: 271 case I82580_I_PHY_ID:
272 case I350_I_PHY_ID:
264 phy->type = e1000_phy_82580; 273 phy->type = e1000_phy_82580;
265 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; 274 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
266 phy->ops.get_cable_length = igb_get_cable_length_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1205,8 +1214,6 @@ void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
1205 /* If the management interface is not enabled, then power down */ 1214 /* If the management interface is not enabled, then power down */
1206 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw))) 1215 if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
1207 igb_power_down_phy_copper(hw); 1216 igb_power_down_phy_copper(hw);
1208
1209 return;
1210} 1217}
1211 1218
1212/** 1219/**
@@ -1445,7 +1452,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1445 **/ 1452 **/
1446static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1453static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1447{ 1454{
1448 u32 mdicnfg = 0;
1449 s32 ret_val; 1455 s32 ret_val;
1450 1456
1451 1457
@@ -1453,15 +1459,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1453 if (ret_val) 1459 if (ret_val)
1454 goto out; 1460 goto out;
1455 1461
1456 /*
1457 * We config the phy address in MDICNFG register now. Same bits
1458 * as before. The values in MDIC can be written but will be
1459 * ignored. This allows us to call the old function after
1460 * configuring the PHY address in the new register
1461 */
1462 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1463 wr32(E1000_MDICNFG, mdicnfg);
1464
1465 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 1462 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1466 1463
1467 hw->phy.ops.release(hw); 1464 hw->phy.ops.release(hw);
@@ -1480,7 +1477,6 @@ out:
1480 **/ 1477 **/
1481static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1478static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1482{ 1479{
1483 u32 mdicnfg = 0;
1484 s32 ret_val; 1480 s32 ret_val;
1485 1481
1486 1482
@@ -1488,15 +1484,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1488 if (ret_val) 1484 if (ret_val)
1489 goto out; 1485 goto out;
1490 1486
1491 /*
1492 * We config the phy address in MDICNFG register now. Same bits
1493 * as before. The values in MDIC can be written but will be
1494 * ignored. This allows us to call the old function after
1495 * configuring the PHY address in the new register
1496 */
1497 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1498 wr32(E1000_MDICNFG, mdicnfg);
1499
1500 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 1487 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1501 1488
1502 hw->phy.ops.release(hw); 1489 hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99c193c..cbd1e1259e4d 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
38 (ID_LED_DEF1_DEF2 << 4) | \ 38 (ID_LED_DEF1_DEF2 << 4) | \
39 (ID_LED_OFF1_ON2)) 39 (ID_LED_OFF1_ON2))
40 40
41#define E1000_RAR_ENTRIES_82575 16 41#define E1000_RAR_ENTRIES_82575 16
42#define E1000_RAR_ENTRIES_82576 24 42#define E1000_RAR_ENTRIES_82576 24
43#define E1000_RAR_ENTRIES_82580 24 43#define E1000_RAR_ENTRIES_82580 24
44#define E1000_RAR_ENTRIES_I350 32
44 45
45#define E1000_SW_SYNCH_MB 0x00000100 46#define E1000_SW_SYNCH_MB 0x00000100
46#define E1000_STAT_DEV_RST_SET 0x00100000 47#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
52#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 53#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
53#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 54#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
54#define E1000_SRRCTL_DROP_EN 0x80000000 55#define E1000_SRRCTL_DROP_EN 0x80000000
56#define E1000_SRRCTL_TIMESTAMP 0x40000000
55 57
56#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 58#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
57#define E1000_MRQC_ENABLE_VMDQ 0x00000003 59#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
108#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 110#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
109#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 111#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
110#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ 112#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
113#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
111 114
112/* Transmit Descriptor - Advanced */ 115/* Transmit Descriptor - Advanced */
113union e1000_adv_tx_desc { 116union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b696c7..24d9be64342f 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -610,11 +610,7 @@
610#define IGP_LED3_MODE 0x07000000 610#define IGP_LED3_MODE 0x07000000
611 611
612/* PCI/PCI-X/PCI-EX Config space */ 612/* PCI/PCI-X/PCI-EX Config space */
613#define PCIE_LINK_STATUS 0x12
614#define PCIE_DEVICE_CONTROL2 0x28 613#define PCIE_DEVICE_CONTROL2 0x28
615
616#define PCIE_LINK_WIDTH_MASK 0x3F0
617#define PCIE_LINK_WIDTH_SHIFT 4
618#define PCIE_DEVICE_CONTROL2_16ms 0x0005 614#define PCIE_DEVICE_CONTROL2_16ms 0x0005
619 615
620#define PHY_REVISION_MASK 0xFFFFFFF0 616#define PHY_REVISION_MASK 0xFFFFFFF0
@@ -629,6 +625,7 @@
629#define M88E1111_I_PHY_ID 0x01410CC0 625#define M88E1111_I_PHY_ID 0x01410CC0
630#define IGP03E1000_E_PHY_ID 0x02A80390 626#define IGP03E1000_E_PHY_ID 0x02A80390
631#define I82580_I_PHY_ID 0x015403A0 627#define I82580_I_PHY_ID 0x015403A0
628#define I350_I_PHY_ID 0x015403B0
632#define M88_VENDOR 0x0141 629#define M88_VENDOR 0x0141
633 630
634/* M88E1000 Specific Registers */ 631/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f5192a..cb8db78b1a05 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/netdevice.h>
34 35
35#include "e1000_regs.h" 36#include "e1000_regs.h"
36#include "e1000_defines.h" 37#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
53#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
54#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
55#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_I350_COPPER 0x1521
58#define E1000_DEV_ID_I350_FIBER 0x1522
59#define E1000_DEV_ID_I350_SERDES 0x1523
60#define E1000_DEV_ID_I350_SGMII 0x1524
56 61
57#define E1000_REVISION_2 2 62#define E1000_REVISION_2 2
58#define E1000_REVISION_4 4 63#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
72 e1000_82575, 77 e1000_82575,
73 e1000_82576, 78 e1000_82576,
74 e1000_82580, 79 e1000_82580,
80 e1000_i350,
75 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 81 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
76}; 82};
77 83
@@ -502,14 +508,11 @@ struct e1000_hw {
502 u8 revision_id; 508 u8 revision_id;
503}; 509};
504 510
505#ifdef DEBUG 511extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
506extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
507#define hw_dbg(format, arg...) \ 512#define hw_dbg(format, arg...) \
508 printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg) 513 netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
509#else 514
510#define hw_dbg(format, arg...)
511#endif
512#endif
513/* These functions must be implemented by drivers */ 515/* These functions must be implemented by drivers */
514s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 516s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
515s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 517s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
518#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index be8d010e4021..90c5e01e9235 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -53,17 +53,30 @@ s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
53 u16 pcie_link_status; 53 u16 pcie_link_status;
54 54
55 bus->type = e1000_bus_type_pci_express; 55 bus->type = e1000_bus_type_pci_express;
56 bus->speed = e1000_bus_speed_2500;
57 56
58 ret_val = igb_read_pcie_cap_reg(hw, 57 ret_val = igb_read_pcie_cap_reg(hw,
59 PCIE_LINK_STATUS, 58 PCI_EXP_LNKSTA,
60 &pcie_link_status); 59 &pcie_link_status);
61 if (ret_val) 60 if (ret_val) {
62 bus->width = e1000_bus_width_unknown; 61 bus->width = e1000_bus_width_unknown;
63 else 62 bus->speed = e1000_bus_speed_unknown;
63 } else {
64 switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
65 case PCI_EXP_LNKSTA_CLS_2_5GB:
66 bus->speed = e1000_bus_speed_2500;
67 break;
68 case PCI_EXP_LNKSTA_CLS_5_0GB:
69 bus->speed = e1000_bus_speed_5000;
70 break;
71 default:
72 bus->speed = e1000_bus_speed_unknown;
73 break;
74 }
75
64 bus->width = (enum e1000_bus_width)((pcie_link_status & 76 bus->width = (enum e1000_bus_width)((pcie_link_status &
65 PCIE_LINK_WIDTH_MASK) >> 77 PCI_EXP_LNKSTA_NLW) >>
66 PCIE_LINK_WIDTH_SHIFT); 78 PCI_EXP_LNKSTA_NLW_SHIFT);
79 }
67 80
68 reg = rd32(E1000_STATUS); 81 reg = rd32(E1000_STATUS);
69 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; 82 bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b822a5d..6e63d9a7fc75 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
107#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 107#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
108 108
109/* Supported Rx Buffer Sizes */ 109/* Supported Rx Buffer Sizes */
110#define IGB_RXBUFFER_64 64 /* Used for packet split */
110#define IGB_RXBUFFER_128 128 /* Used for packet split */ 111#define IGB_RXBUFFER_128 128 /* Used for packet split */
111#define IGB_RXBUFFER_1024 1024 112#define IGB_RXBUFFER_1024 1024
112#define IGB_RXBUFFER_2048 2048 113#define IGB_RXBUFFER_2048 2048
@@ -140,8 +141,10 @@ struct igb_buffer {
140 unsigned long time_stamp; 141 unsigned long time_stamp;
141 u16 length; 142 u16 length;
142 u16 next_to_watch; 143 u16 next_to_watch;
143 u16 mapped_as_page; 144 unsigned int bytecount;
144 u16 gso_segs; 145 u16 gso_segs;
146 union skb_shared_tx shtx;
147 u8 mapped_as_page;
145 }; 148 };
146 /* RX */ 149 /* RX */
147 struct { 150 struct {
@@ -185,7 +188,7 @@ struct igb_q_vector {
185struct igb_ring { 188struct igb_ring {
186 struct igb_q_vector *q_vector; /* backlink to q_vector */ 189 struct igb_q_vector *q_vector; /* backlink to q_vector */
187 struct net_device *netdev; /* back pointer to net_device */ 190 struct net_device *netdev; /* back pointer to net_device */
188 struct pci_dev *pdev; /* pci device for dma mapping */ 191 struct device *dev; /* device pointer for dma mapping */
189 dma_addr_t dma; /* phys address of the ring */ 192 dma_addr_t dma; /* phys address of the ring */
190 void *desc; /* descriptor ring memory */ 193 void *desc; /* descriptor ring memory */
191 unsigned int size; /* length of desc. ring in bytes */ 194 unsigned int size; /* length of desc. ring in bytes */
@@ -323,6 +326,7 @@ struct igb_adapter {
323 326
324#define IGB_82576_TSYNC_SHIFT 19 327#define IGB_82576_TSYNC_SHIFT 19
325#define IGB_82580_TSYNC_SHIFT 24 328#define IGB_82580_TSYNC_SHIFT 24
329#define IGB_TS_HDR_LEN 16
326enum e1000_state_t { 330enum e1000_state_t {
327 __IGB_TESTING, 331 __IGB_TESTING,
328 __IGB_RESETTING, 332 __IGB_RESETTING,
@@ -336,7 +340,6 @@ enum igb_boards {
336extern char igb_driver_name[]; 340extern char igb_driver_name[];
337extern char igb_driver_version[]; 341extern char igb_driver_version[];
338 342
339extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
340extern int igb_up(struct igb_adapter *); 343extern int igb_up(struct igb_adapter *);
341extern void igb_down(struct igb_adapter *); 344extern void igb_down(struct igb_adapter *);
342extern void igb_reinit_locked(struct igb_adapter *); 345extern void igb_reinit_locked(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 743038490104..f2ebf927e4bc 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
902#define TABLE64_TEST_LO 5 902#define TABLE64_TEST_LO 5
903#define TABLE64_TEST_HI 6 903#define TABLE64_TEST_HI 6
904 904
905/* i350 reg test */
906static struct igb_reg_test reg_test_i350[] = {
907 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
908 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
909 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
910 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
911 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
912 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
913 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
914 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
915 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
916 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
917 /* RDH is read-only for i350, only test RDT. */
918 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
919 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
920 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
921 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
922 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
923 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
924 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
925 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
926 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
927 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
928 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
929 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
930 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
931 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
932 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
933 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
934 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
935 { E1000_RA, 0, 16, TABLE64_TEST_LO,
936 0xFFFFFFFF, 0xFFFFFFFF },
937 { E1000_RA, 0, 16, TABLE64_TEST_HI,
938 0xC3FFFFFF, 0xFFFFFFFF },
939 { E1000_RA2, 0, 16, TABLE64_TEST_LO,
940 0xFFFFFFFF, 0xFFFFFFFF },
941 { E1000_RA2, 0, 16, TABLE64_TEST_HI,
942 0xC3FFFFFF, 0xFFFFFFFF },
943 { E1000_MTA, 0, 128, TABLE32_TEST,
944 0xFFFFFFFF, 0xFFFFFFFF },
945 { 0, 0, 0, 0 }
946};
947
905/* 82580 reg test */ 948/* 82580 reg test */
906static struct igb_reg_test reg_test_82580[] = { 949static struct igb_reg_test reg_test_82580[] = {
907 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 950 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1077 u32 i, toggle; 1120 u32 i, toggle;
1078 1121
1079 switch (adapter->hw.mac.type) { 1122 switch (adapter->hw.mac.type) {
1123 case e1000_i350:
1124 test = reg_test_i350;
1125 toggle = 0x7FEFF3FF;
1126 break;
1080 case e1000_82580: 1127 case e1000_82580:
1081 test = reg_test_82580; 1128 test = reg_test_82580;
1082 toggle = 0x7FEFF3FF; 1129 toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1238 case e1000_82580: 1285 case e1000_82580:
1239 ics_mask = 0x77DCFED5; 1286 ics_mask = 0x77DCFED5;
1240 break; 1287 break;
1288 case e1000_i350:
1289 ics_mask = 0x77DCFED5;
1290 break;
1241 default: 1291 default:
1242 ics_mask = 0x7FFFFFFF; 1292 ics_mask = 0x7FFFFFFF;
1243 break; 1293 break;
@@ -1344,7 +1394,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1344 1394
1345 /* Setup Tx descriptor ring and Tx buffers */ 1395 /* Setup Tx descriptor ring and Tx buffers */
1346 tx_ring->count = IGB_DEFAULT_TXD; 1396 tx_ring->count = IGB_DEFAULT_TXD;
1347 tx_ring->pdev = adapter->pdev; 1397 tx_ring->dev = &adapter->pdev->dev;
1348 tx_ring->netdev = adapter->netdev; 1398 tx_ring->netdev = adapter->netdev;
1349 tx_ring->reg_idx = adapter->vfs_allocated_count; 1399 tx_ring->reg_idx = adapter->vfs_allocated_count;
1350 1400
@@ -1358,7 +1408,7 @@ static int igb_setup_desc_rings(struct igb_adapter *adapter)
1358 1408
1359 /* Setup Rx descriptor ring and Rx buffers */ 1409 /* Setup Rx descriptor ring and Rx buffers */
1360 rx_ring->count = IGB_DEFAULT_RXD; 1410 rx_ring->count = IGB_DEFAULT_RXD;
1361 rx_ring->pdev = adapter->pdev; 1411 rx_ring->dev = &adapter->pdev->dev;
1362 rx_ring->netdev = adapter->netdev; 1412 rx_ring->netdev = adapter->netdev;
1363 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048; 1413 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1364 rx_ring->reg_idx = adapter->vfs_allocated_count; 1414 rx_ring->reg_idx = adapter->vfs_allocated_count;
@@ -1554,10 +1604,10 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1554 buffer_info = &rx_ring->buffer_info[rx_ntc]; 1604 buffer_info = &rx_ring->buffer_info[rx_ntc];
1555 1605
1556 /* unmap rx buffer, will be remapped by alloc_rx_buffers */ 1606 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1557 pci_unmap_single(rx_ring->pdev, 1607 dma_unmap_single(rx_ring->dev,
1558 buffer_info->dma, 1608 buffer_info->dma,
1559 rx_ring->rx_buffer_len, 1609 rx_ring->rx_buffer_len,
1560 PCI_DMA_FROMDEVICE); 1610 DMA_FROM_DEVICE);
1561 buffer_info->dma = 0; 1611 buffer_info->dma = 0;
1562 1612
1563 /* verify contents of skb */ 1613 /* verify contents of skb */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98cd..3881918f5382 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
62}; 62};
63 63
64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -197,6 +201,336 @@ MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
197MODULE_LICENSE("GPL"); 201MODULE_LICENSE("GPL");
198MODULE_VERSION(DRV_VERSION); 202MODULE_VERSION(DRV_VERSION);
199 203
204struct igb_reg_info {
205 u32 ofs;
206 char *name;
207};
208
209static const struct igb_reg_info igb_reg_info_tbl[] = {
210
211 /* General Registers */
212 {E1000_CTRL, "CTRL"},
213 {E1000_STATUS, "STATUS"},
214 {E1000_CTRL_EXT, "CTRL_EXT"},
215
216 /* Interrupt Registers */
217 {E1000_ICR, "ICR"},
218
219 /* RX Registers */
220 {E1000_RCTL, "RCTL"},
221 {E1000_RDLEN(0), "RDLEN"},
222 {E1000_RDH(0), "RDH"},
223 {E1000_RDT(0), "RDT"},
224 {E1000_RXDCTL(0), "RXDCTL"},
225 {E1000_RDBAL(0), "RDBAL"},
226 {E1000_RDBAH(0), "RDBAH"},
227
228 /* TX Registers */
229 {E1000_TCTL, "TCTL"},
230 {E1000_TDBAL(0), "TDBAL"},
231 {E1000_TDBAH(0), "TDBAH"},
232 {E1000_TDLEN(0), "TDLEN"},
233 {E1000_TDH(0), "TDH"},
234 {E1000_TDT(0), "TDT"},
235 {E1000_TXDCTL(0), "TXDCTL"},
236 {E1000_TDFH, "TDFH"},
237 {E1000_TDFT, "TDFT"},
238 {E1000_TDFHS, "TDFHS"},
239 {E1000_TDFPC, "TDFPC"},
240
241 /* List Terminator */
242 {}
243};
244
245/*
246 * igb_regdump - register printout routine
247 */
248static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
249{
250 int n = 0;
251 char rname[16];
252 u32 regs[8];
253
254 switch (reginfo->ofs) {
255 case E1000_RDLEN(0):
256 for (n = 0; n < 4; n++)
257 regs[n] = rd32(E1000_RDLEN(n));
258 break;
259 case E1000_RDH(0):
260 for (n = 0; n < 4; n++)
261 regs[n] = rd32(E1000_RDH(n));
262 break;
263 case E1000_RDT(0):
264 for (n = 0; n < 4; n++)
265 regs[n] = rd32(E1000_RDT(n));
266 break;
267 case E1000_RXDCTL(0):
268 for (n = 0; n < 4; n++)
269 regs[n] = rd32(E1000_RXDCTL(n));
270 break;
271 case E1000_RDBAL(0):
272 for (n = 0; n < 4; n++)
273 regs[n] = rd32(E1000_RDBAL(n));
274 break;
275 case E1000_RDBAH(0):
276 for (n = 0; n < 4; n++)
277 regs[n] = rd32(E1000_RDBAH(n));
278 break;
279 case E1000_TDBAL(0):
280 for (n = 0; n < 4; n++)
281 regs[n] = rd32(E1000_RDBAL(n));
282 break;
283 case E1000_TDBAH(0):
284 for (n = 0; n < 4; n++)
285 regs[n] = rd32(E1000_TDBAH(n));
286 break;
287 case E1000_TDLEN(0):
288 for (n = 0; n < 4; n++)
289 regs[n] = rd32(E1000_TDLEN(n));
290 break;
291 case E1000_TDH(0):
292 for (n = 0; n < 4; n++)
293 regs[n] = rd32(E1000_TDH(n));
294 break;
295 case E1000_TDT(0):
296 for (n = 0; n < 4; n++)
297 regs[n] = rd32(E1000_TDT(n));
298 break;
299 case E1000_TXDCTL(0):
300 for (n = 0; n < 4; n++)
301 regs[n] = rd32(E1000_TXDCTL(n));
302 break;
303 default:
304 printk(KERN_INFO "%-15s %08x\n",
305 reginfo->name, rd32(reginfo->ofs));
306 return;
307 }
308
309 snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
310 printk(KERN_INFO "%-15s ", rname);
311 for (n = 0; n < 4; n++)
312 printk(KERN_CONT "%08x ", regs[n]);
313 printk(KERN_CONT "\n");
314}
315
316/*
317 * igb_dump - Print registers, tx-rings and rx-rings
318 */
319static void igb_dump(struct igb_adapter *adapter)
320{
321 struct net_device *netdev = adapter->netdev;
322 struct e1000_hw *hw = &adapter->hw;
323 struct igb_reg_info *reginfo;
324 int n = 0;
325 struct igb_ring *tx_ring;
326 union e1000_adv_tx_desc *tx_desc;
327 struct my_u0 { u64 a; u64 b; } *u0;
328 struct igb_buffer *buffer_info;
329 struct igb_ring *rx_ring;
330 union e1000_adv_rx_desc *rx_desc;
331 u32 staterr;
332 int i = 0;
333
334 if (!netif_msg_hw(adapter))
335 return;
336
337 /* Print netdevice Info */
338 if (netdev) {
339 dev_info(&adapter->pdev->dev, "Net device Info\n");
340 printk(KERN_INFO "Device Name state "
341 "trans_start last_rx\n");
342 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
343 netdev->name,
344 netdev->state,
345 netdev->trans_start,
346 netdev->last_rx);
347 }
348
349 /* Print Registers */
350 dev_info(&adapter->pdev->dev, "Register Dump\n");
351 printk(KERN_INFO " Register Name Value\n");
352 for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
353 reginfo->name; reginfo++) {
354 igb_regdump(hw, reginfo);
355 }
356
357 /* Print TX Ring Summary */
358 if (!netdev || !netif_running(netdev))
359 goto exit;
360
361 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
362 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
363 " leng ntw timestamp\n");
364 for (n = 0; n < adapter->num_tx_queues; n++) {
365 tx_ring = adapter->tx_ring[n];
366 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
367 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
368 n, tx_ring->next_to_use, tx_ring->next_to_clean,
369 (u64)buffer_info->dma,
370 buffer_info->length,
371 buffer_info->next_to_watch,
372 (u64)buffer_info->time_stamp);
373 }
374
375 /* Print TX Rings */
376 if (!netif_msg_tx_done(adapter))
377 goto rx_ring_summary;
378
379 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
380
381 /* Transmit Descriptor Formats
382 *
383 * Advanced Transmit Descriptor
384 * +--------------------------------------------------------------+
385 * 0 | Buffer Address [63:0] |
386 * +--------------------------------------------------------------+
387 * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
388 * +--------------------------------------------------------------+
389 * 63 46 45 40 39 38 36 35 32 31 24 15 0
390 */
391
392 for (n = 0; n < adapter->num_tx_queues; n++) {
393 tx_ring = adapter->tx_ring[n];
394 printk(KERN_INFO "------------------------------------\n");
395 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
396 printk(KERN_INFO "------------------------------------\n");
397 printk(KERN_INFO "T [desc] [address 63:0 ] "
398 "[PlPOCIStDDM Ln] [bi->dma ] "
399 "leng ntw timestamp bi->skb\n");
400
401 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
402 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
403 buffer_info = &tx_ring->buffer_info[i];
404 u0 = (struct my_u0 *)tx_desc;
405 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
406 " %04X %3X %016llX %p", i,
407 le64_to_cpu(u0->a),
408 le64_to_cpu(u0->b),
409 (u64)buffer_info->dma,
410 buffer_info->length,
411 buffer_info->next_to_watch,
412 (u64)buffer_info->time_stamp,
413 buffer_info->skb);
414 if (i == tx_ring->next_to_use &&
415 i == tx_ring->next_to_clean)
416 printk(KERN_CONT " NTC/U\n");
417 else if (i == tx_ring->next_to_use)
418 printk(KERN_CONT " NTU\n");
419 else if (i == tx_ring->next_to_clean)
420 printk(KERN_CONT " NTC\n");
421 else
422 printk(KERN_CONT "\n");
423
424 if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
425 print_hex_dump(KERN_INFO, "",
426 DUMP_PREFIX_ADDRESS,
427 16, 1, phys_to_virt(buffer_info->dma),
428 buffer_info->length, true);
429 }
430 }
431
432 /* Print RX Rings Summary */
433rx_ring_summary:
434 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
435 printk(KERN_INFO "Queue [NTU] [NTC]\n");
436 for (n = 0; n < adapter->num_rx_queues; n++) {
437 rx_ring = adapter->rx_ring[n];
438 printk(KERN_INFO " %5d %5X %5X\n", n,
439 rx_ring->next_to_use, rx_ring->next_to_clean);
440 }
441
442 /* Print RX Rings */
443 if (!netif_msg_rx_status(adapter))
444 goto exit;
445
446 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
447
448 /* Advanced Receive Descriptor (Read) Format
449 * 63 1 0
450 * +-----------------------------------------------------+
451 * 0 | Packet Buffer Address [63:1] |A0/NSE|
452 * +----------------------------------------------+------+
453 * 8 | Header Buffer Address [63:1] | DD |
454 * +-----------------------------------------------------+
455 *
456 *
457 * Advanced Receive Descriptor (Write-Back) Format
458 *
459 * 63 48 47 32 31 30 21 20 17 16 4 3 0
460 * +------------------------------------------------------+
461 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
462 * | Checksum Ident | | | | Type | Type |
463 * +------------------------------------------------------+
464 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
465 * +------------------------------------------------------+
466 * 63 48 47 32 31 20 19 0
467 */
468
469 for (n = 0; n < adapter->num_rx_queues; n++) {
470 rx_ring = adapter->rx_ring[n];
471 printk(KERN_INFO "------------------------------------\n");
472 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
473 printk(KERN_INFO "------------------------------------\n");
474 printk(KERN_INFO "R [desc] [ PktBuf A0] "
475 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
476 "<-- Adv Rx Read format\n");
477 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
478 "[vl er S cks ln] ---------------- [bi->skb] "
479 "<-- Adv Rx Write-Back format\n");
480
481 for (i = 0; i < rx_ring->count; i++) {
482 buffer_info = &rx_ring->buffer_info[i];
483 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
484 u0 = (struct my_u0 *)rx_desc;
485 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
486 if (staterr & E1000_RXD_STAT_DD) {
487 /* Descriptor Done */
488 printk(KERN_INFO "RWB[0x%03X] %016llX "
489 "%016llX ---------------- %p", i,
490 le64_to_cpu(u0->a),
491 le64_to_cpu(u0->b),
492 buffer_info->skb);
493 } else {
494 printk(KERN_INFO "R [0x%03X] %016llX "
495 "%016llX %016llX %p", i,
496 le64_to_cpu(u0->a),
497 le64_to_cpu(u0->b),
498 (u64)buffer_info->dma,
499 buffer_info->skb);
500
501 if (netif_msg_pktdata(adapter)) {
502 print_hex_dump(KERN_INFO, "",
503 DUMP_PREFIX_ADDRESS,
504 16, 1,
505 phys_to_virt(buffer_info->dma),
506 rx_ring->rx_buffer_len, true);
507 if (rx_ring->rx_buffer_len
508 < IGB_RXBUFFER_1024)
509 print_hex_dump(KERN_INFO, "",
510 DUMP_PREFIX_ADDRESS,
511 16, 1,
512 phys_to_virt(
513 buffer_info->page_dma +
514 buffer_info->page_offset),
515 PAGE_SIZE/2, true);
516 }
517 }
518
519 if (i == rx_ring->next_to_use)
520 printk(KERN_CONT " NTU\n");
521 else if (i == rx_ring->next_to_clean)
522 printk(KERN_CONT " NTC\n");
523 else
524 printk(KERN_CONT "\n");
525
526 }
527 }
528
529exit:
530 return;
531}
532
533
200/** 534/**
201 * igb_read_clock - read raw cycle counter (to be used by time counter) 535 * igb_read_clock - read raw cycle counter (to be used by time counter)
202 */ 536 */
@@ -223,41 +557,15 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
223 return stamp; 557 return stamp;
224} 558}
225 559
226#ifdef DEBUG
227/** 560/**
228 * igb_get_hw_dev_name - return device name string 561 * igb_get_hw_dev - return device
229 * used by hardware layer to print debugging information 562 * used by hardware layer to print debugging information
230 **/ 563 **/
231char *igb_get_hw_dev_name(struct e1000_hw *hw) 564struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
232{ 565{
233 struct igb_adapter *adapter = hw->back; 566 struct igb_adapter *adapter = hw->back;
234 return adapter->netdev->name; 567 return adapter->netdev;
235}
236
237/**
238 * igb_get_time_str - format current NIC and system time as string
239 */
240static char *igb_get_time_str(struct igb_adapter *adapter,
241 char buffer[160])
242{
243 cycle_t hw = adapter->cycles.read(&adapter->cycles);
244 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec sys;
246 struct timespec delta;
247 getnstimeofday(&sys);
248
249 delta = timespec_sub(nic, sys);
250
251 sprintf(buffer,
252 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 hw,
254 (long)nic.tv_sec, nic.tv_nsec,
255 (long)sys.tv_sec, sys.tv_nsec,
256 (long)delta.tv_sec, delta.tv_nsec);
257
258 return buffer;
259} 568}
260#endif
261 569
262/** 570/**
263 * igb_init_module - Driver Registration Routine 571 * igb_init_module - Driver Registration Routine
@@ -328,6 +636,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
328 } 636 }
329 case e1000_82575: 637 case e1000_82575:
330 case e1000_82580: 638 case e1000_82580:
639 case e1000_i350:
331 default: 640 default:
332 for (; i < adapter->num_rx_queues; i++) 641 for (; i < adapter->num_rx_queues; i++)
333 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 642 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -371,7 +680,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
371 goto err; 680 goto err;
372 ring->count = adapter->tx_ring_count; 681 ring->count = adapter->tx_ring_count;
373 ring->queue_index = i; 682 ring->queue_index = i;
374 ring->pdev = adapter->pdev; 683 ring->dev = &adapter->pdev->dev;
375 ring->netdev = adapter->netdev; 684 ring->netdev = adapter->netdev;
376 /* For 82575, context index must be unique per ring. */ 685 /* For 82575, context index must be unique per ring. */
377 if (adapter->hw.mac.type == e1000_82575) 686 if (adapter->hw.mac.type == e1000_82575)
@@ -385,7 +694,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
385 goto err; 694 goto err;
386 ring->count = adapter->rx_ring_count; 695 ring->count = adapter->rx_ring_count;
387 ring->queue_index = i; 696 ring->queue_index = i;
388 ring->pdev = adapter->pdev; 697 ring->dev = &adapter->pdev->dev;
389 ring->netdev = adapter->netdev; 698 ring->netdev = adapter->netdev;
390 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 699 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
391 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 700 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
@@ -471,6 +780,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
471 q_vector->eims_value = 1 << msix_vector; 780 q_vector->eims_value = 1 << msix_vector;
472 break; 781 break;
473 case e1000_82580: 782 case e1000_82580:
783 case e1000_i350:
474 /* 82580 uses the same table-based approach as 82576 but has fewer 784 /* 82580 uses the same table-based approach as 82576 but has fewer
475 entries as a result we carry over for queues greater than 4. */ 785 entries as a result we carry over for queues greater than 4. */
476 if (rx_queue > IGB_N0_QUEUE) { 786 if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +861,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
551 861
552 case e1000_82576: 862 case e1000_82576:
553 case e1000_82580: 863 case e1000_82580:
864 case e1000_i350:
554 /* Turn on MSI-X capability first, or our settings 865 /* Turn on MSI-X capability first, or our settings
555 * won't stick. And it will take days to debug. */ 866 * won't stick. And it will take days to debug. */
556 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 867 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -743,7 +1054,6 @@ msi_only:
743out: 1054out:
744 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */
745 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues;
746 return;
747} 1057}
748 1058
749/** 1059/**
@@ -1253,6 +1563,7 @@ void igb_reset(struct igb_adapter *adapter)
1253 * To take effect CTRL.RST is required. 1563 * To take effect CTRL.RST is required.
1254 */ 1564 */
1255 switch (mac->type) { 1565 switch (mac->type) {
1566 case e1000_i350:
1256 case e1000_82580: 1567 case e1000_82580:
1257 pba = rd32(E1000_RXPBS); 1568 pba = rd32(E1000_RXPBS);
1258 pba = igb_rxpbs_adjust_82580(pba); 1569 pba = igb_rxpbs_adjust_82580(pba);
@@ -1416,15 +1727,15 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1416 return err; 1727 return err;
1417 1728
1418 pci_using_dac = 0; 1729 pci_using_dac = 0;
1419 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 1730 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
1420 if (!err) { 1731 if (!err) {
1421 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 1732 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1422 if (!err) 1733 if (!err)
1423 pci_using_dac = 1; 1734 pci_using_dac = 1;
1424 } else { 1735 } else {
1425 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1736 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1426 if (err) { 1737 if (err) {
1427 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1738 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1428 if (err) { 1739 if (err) {
1429 dev_err(&pdev->dev, "No usable DMA " 1740 dev_err(&pdev->dev, "No usable DMA "
1430 "configuration, aborting\n"); 1741 "configuration, aborting\n");
@@ -1656,6 +1967,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1656 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1967 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1657 netdev->name, 1968 netdev->name,
1658 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 1969 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1970 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
1659 "unknown"), 1971 "unknown"),
1660 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1972 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1661 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1973 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
@@ -1826,6 +2138,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
1826 struct e1000_hw *hw = &adapter->hw; 2138 struct e1000_hw *hw = &adapter->hw;
1827 2139
1828 switch (hw->mac.type) { 2140 switch (hw->mac.type) {
2141 case e1000_i350:
1829 case e1000_82580: 2142 case e1000_82580:
1830 memset(&adapter->cycles, 0, sizeof(adapter->cycles)); 2143 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1831 adapter->cycles.read = igb_read_clock; 2144 adapter->cycles.read = igb_read_clock;
@@ -2096,7 +2409,7 @@ static int igb_close(struct net_device *netdev)
2096 **/ 2409 **/
2097int igb_setup_tx_resources(struct igb_ring *tx_ring) 2410int igb_setup_tx_resources(struct igb_ring *tx_ring)
2098{ 2411{
2099 struct pci_dev *pdev = tx_ring->pdev; 2412 struct device *dev = tx_ring->dev;
2100 int size; 2413 int size;
2101 2414
2102 size = sizeof(struct igb_buffer) * tx_ring->count; 2415 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -2109,9 +2422,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2109 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2422 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2110 tx_ring->size = ALIGN(tx_ring->size, 4096); 2423 tx_ring->size = ALIGN(tx_ring->size, 4096);
2111 2424
2112 tx_ring->desc = pci_alloc_consistent(pdev, 2425 tx_ring->desc = dma_alloc_coherent(dev,
2113 tx_ring->size, 2426 tx_ring->size,
2114 &tx_ring->dma); 2427 &tx_ring->dma,
2428 GFP_KERNEL);
2115 2429
2116 if (!tx_ring->desc) 2430 if (!tx_ring->desc)
2117 goto err; 2431 goto err;
@@ -2122,7 +2436,7 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2122 2436
2123err: 2437err:
2124 vfree(tx_ring->buffer_info); 2438 vfree(tx_ring->buffer_info);
2125 dev_err(&pdev->dev, 2439 dev_err(dev,
2126 "Unable to allocate memory for the transmit descriptor ring\n"); 2440 "Unable to allocate memory for the transmit descriptor ring\n");
2127 return -ENOMEM; 2441 return -ENOMEM;
2128} 2442}
@@ -2246,7 +2560,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2246 **/ 2560 **/
2247int igb_setup_rx_resources(struct igb_ring *rx_ring) 2561int igb_setup_rx_resources(struct igb_ring *rx_ring)
2248{ 2562{
2249 struct pci_dev *pdev = rx_ring->pdev; 2563 struct device *dev = rx_ring->dev;
2250 int size, desc_len; 2564 int size, desc_len;
2251 2565
2252 size = sizeof(struct igb_buffer) * rx_ring->count; 2566 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2261,8 +2575,10 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2261 rx_ring->size = rx_ring->count * desc_len; 2575 rx_ring->size = rx_ring->count * desc_len;
2262 rx_ring->size = ALIGN(rx_ring->size, 4096); 2576 rx_ring->size = ALIGN(rx_ring->size, 4096);
2263 2577
2264 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2578 rx_ring->desc = dma_alloc_coherent(dev,
2265 &rx_ring->dma); 2579 rx_ring->size,
2580 &rx_ring->dma,
2581 GFP_KERNEL);
2266 2582
2267 if (!rx_ring->desc) 2583 if (!rx_ring->desc)
2268 goto err; 2584 goto err;
@@ -2275,8 +2591,8 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2275err: 2591err:
2276 vfree(rx_ring->buffer_info); 2592 vfree(rx_ring->buffer_info);
2277 rx_ring->buffer_info = NULL; 2593 rx_ring->buffer_info = NULL;
2278 dev_err(&pdev->dev, "Unable to allocate memory for " 2594 dev_err(dev, "Unable to allocate memory for the receive descriptor"
2279 "the receive descriptor ring\n"); 2595 " ring\n");
2280 return -ENOMEM; 2596 return -ENOMEM;
2281} 2597}
2282 2598
@@ -2339,6 +2655,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2339 if (adapter->vfs_allocated_count) { 2655 if (adapter->vfs_allocated_count) {
2340 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2656 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2341 switch (hw->mac.type) { 2657 switch (hw->mac.type) {
2658 case e1000_i350:
2342 case e1000_82580: 2659 case e1000_82580:
2343 num_rx_queues = 1; 2660 num_rx_queues = 1;
2344 shift = 0; 2661 shift = 0;
@@ -2590,6 +2907,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
2590 E1000_SRRCTL_BSIZEPKT_SHIFT; 2907 E1000_SRRCTL_BSIZEPKT_SHIFT;
2591 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2908 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2592 } 2909 }
2910 if (hw->mac.type == e1000_82580)
2911 srrctl |= E1000_SRRCTL_TIMESTAMP;
2593 /* Only set Drop Enable if we are supporting multiple queues */ 2912 /* Only set Drop Enable if we are supporting multiple queues */
2594 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 2913 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2595 srrctl |= E1000_SRRCTL_DROP_EN; 2914 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2649,8 +2968,8 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
2649 if (!tx_ring->desc) 2968 if (!tx_ring->desc)
2650 return; 2969 return;
2651 2970
2652 pci_free_consistent(tx_ring->pdev, tx_ring->size, 2971 dma_free_coherent(tx_ring->dev, tx_ring->size,
2653 tx_ring->desc, tx_ring->dma); 2972 tx_ring->desc, tx_ring->dma);
2654 2973
2655 tx_ring->desc = NULL; 2974 tx_ring->desc = NULL;
2656} 2975}
@@ -2674,15 +2993,15 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2674{ 2993{
2675 if (buffer_info->dma) { 2994 if (buffer_info->dma) {
2676 if (buffer_info->mapped_as_page) 2995 if (buffer_info->mapped_as_page)
2677 pci_unmap_page(tx_ring->pdev, 2996 dma_unmap_page(tx_ring->dev,
2678 buffer_info->dma, 2997 buffer_info->dma,
2679 buffer_info->length, 2998 buffer_info->length,
2680 PCI_DMA_TODEVICE); 2999 DMA_TO_DEVICE);
2681 else 3000 else
2682 pci_unmap_single(tx_ring->pdev, 3001 dma_unmap_single(tx_ring->dev,
2683 buffer_info->dma, 3002 buffer_info->dma,
2684 buffer_info->length, 3003 buffer_info->length,
2685 PCI_DMA_TODEVICE); 3004 DMA_TO_DEVICE);
2686 buffer_info->dma = 0; 3005 buffer_info->dma = 0;
2687 } 3006 }
2688 if (buffer_info->skb) { 3007 if (buffer_info->skb) {
@@ -2753,8 +3072,8 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
2753 if (!rx_ring->desc) 3072 if (!rx_ring->desc)
2754 return; 3073 return;
2755 3074
2756 pci_free_consistent(rx_ring->pdev, rx_ring->size, 3075 dma_free_coherent(rx_ring->dev, rx_ring->size,
2757 rx_ring->desc, rx_ring->dma); 3076 rx_ring->desc, rx_ring->dma);
2758 3077
2759 rx_ring->desc = NULL; 3078 rx_ring->desc = NULL;
2760} 3079}
@@ -2790,10 +3109,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2790 for (i = 0; i < rx_ring->count; i++) { 3109 for (i = 0; i < rx_ring->count; i++) {
2791 buffer_info = &rx_ring->buffer_info[i]; 3110 buffer_info = &rx_ring->buffer_info[i];
2792 if (buffer_info->dma) { 3111 if (buffer_info->dma) {
2793 pci_unmap_single(rx_ring->pdev, 3112 dma_unmap_single(rx_ring->dev,
2794 buffer_info->dma, 3113 buffer_info->dma,
2795 rx_ring->rx_buffer_len, 3114 rx_ring->rx_buffer_len,
2796 PCI_DMA_FROMDEVICE); 3115 DMA_FROM_DEVICE);
2797 buffer_info->dma = 0; 3116 buffer_info->dma = 0;
2798 } 3117 }
2799 3118
@@ -2802,10 +3121,10 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2802 buffer_info->skb = NULL; 3121 buffer_info->skb = NULL;
2803 } 3122 }
2804 if (buffer_info->page_dma) { 3123 if (buffer_info->page_dma) {
2805 pci_unmap_page(rx_ring->pdev, 3124 dma_unmap_page(rx_ring->dev,
2806 buffer_info->page_dma, 3125 buffer_info->page_dma,
2807 PAGE_SIZE / 2, 3126 PAGE_SIZE / 2,
2808 PCI_DMA_FROMDEVICE); 3127 DMA_FROM_DEVICE);
2809 buffer_info->page_dma = 0; 3128 buffer_info->page_dma = 0;
2810 } 3129 }
2811 if (buffer_info->page) { 3130 if (buffer_info->page) {
@@ -2876,7 +3195,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2876{ 3195{
2877 struct igb_adapter *adapter = netdev_priv(netdev); 3196 struct igb_adapter *adapter = netdev_priv(netdev);
2878 struct e1000_hw *hw = &adapter->hw; 3197 struct e1000_hw *hw = &adapter->hw;
2879 struct dev_mc_list *mc_ptr; 3198 struct netdev_hw_addr *ha;
2880 u8 *mta_list; 3199 u8 *mta_list;
2881 int i; 3200 int i;
2882 3201
@@ -2893,8 +3212,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2893 3212
2894 /* The shared function expects a packed array of only addresses. */ 3213 /* The shared function expects a packed array of only addresses. */
2895 i = 0; 3214 i = 0;
2896 netdev_for_each_mc_addr(mc_ptr, netdev) 3215 netdev_for_each_mc_addr(ha, netdev)
2897 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 3216 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2898 3217
2899 igb_update_mc_addr_list(hw, mta_list, i); 3218 igb_update_mc_addr_list(hw, mta_list, i);
2900 kfree(mta_list); 3219 kfree(mta_list);
@@ -3397,8 +3716,6 @@ set_itr_now:
3397 q_vector->itr_val = new_itr; 3716 q_vector->itr_val = new_itr;
3398 q_vector->set_itr = 1; 3717 q_vector->set_itr = 1;
3399 } 3718 }
3400
3401 return;
3402} 3719}
3403 3720
3404#define IGB_TX_FLAGS_CSUM 0x00000001 3721#define IGB_TX_FLAGS_CSUM 0x00000001
@@ -3493,7 +3810,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3493 struct sk_buff *skb, u32 tx_flags) 3810 struct sk_buff *skb, u32 tx_flags)
3494{ 3811{
3495 struct e1000_adv_tx_context_desc *context_desc; 3812 struct e1000_adv_tx_context_desc *context_desc;
3496 struct pci_dev *pdev = tx_ring->pdev; 3813 struct device *dev = tx_ring->dev;
3497 struct igb_buffer *buffer_info; 3814 struct igb_buffer *buffer_info;
3498 u32 info = 0, tu_cmd = 0; 3815 u32 info = 0, tu_cmd = 0;
3499 unsigned int i; 3816 unsigned int i;
@@ -3544,7 +3861,7 @@ static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3544 break; 3861 break;
3545 default: 3862 default:
3546 if (unlikely(net_ratelimit())) 3863 if (unlikely(net_ratelimit()))
3547 dev_warn(&pdev->dev, 3864 dev_warn(dev,
3548 "partial checksum but proto=%x!\n", 3865 "partial checksum but proto=%x!\n",
3549 skb->protocol); 3866 skb->protocol);
3550 break; 3867 break;
@@ -3578,59 +3895,61 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3578 unsigned int first) 3895 unsigned int first)
3579{ 3896{
3580 struct igb_buffer *buffer_info; 3897 struct igb_buffer *buffer_info;
3581 struct pci_dev *pdev = tx_ring->pdev; 3898 struct device *dev = tx_ring->dev;
3582 unsigned int len = skb_headlen(skb); 3899 unsigned int hlen = skb_headlen(skb);
3583 unsigned int count = 0, i; 3900 unsigned int count = 0, i;
3584 unsigned int f; 3901 unsigned int f;
3902 u16 gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
3585 3903
3586 i = tx_ring->next_to_use; 3904 i = tx_ring->next_to_use;
3587 3905
3588 buffer_info = &tx_ring->buffer_info[i]; 3906 buffer_info = &tx_ring->buffer_info[i];
3589 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3907 BUG_ON(hlen >= IGB_MAX_DATA_PER_TXD);
3590 buffer_info->length = len; 3908 buffer_info->length = hlen;
3591 /* set time_stamp *before* dma to help avoid a possible race */ 3909 /* set time_stamp *before* dma to help avoid a possible race */
3592 buffer_info->time_stamp = jiffies; 3910 buffer_info->time_stamp = jiffies;
3593 buffer_info->next_to_watch = i; 3911 buffer_info->next_to_watch = i;
3594 buffer_info->dma = pci_map_single(pdev, skb->data, len, 3912 buffer_info->dma = dma_map_single(dev, skb->data, hlen,
3595 PCI_DMA_TODEVICE); 3913 DMA_TO_DEVICE);
3596 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3914 if (dma_mapping_error(dev, buffer_info->dma))
3597 goto dma_error; 3915 goto dma_error;
3598 3916
3599 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 3917 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
3600 struct skb_frag_struct *frag; 3918 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f];
3919 unsigned int len = frag->size;
3601 3920
3602 count++; 3921 count++;
3603 i++; 3922 i++;
3604 if (i == tx_ring->count) 3923 if (i == tx_ring->count)
3605 i = 0; 3924 i = 0;
3606 3925
3607 frag = &skb_shinfo(skb)->frags[f];
3608 len = frag->size;
3609
3610 buffer_info = &tx_ring->buffer_info[i]; 3926 buffer_info = &tx_ring->buffer_info[i];
3611 BUG_ON(len >= IGB_MAX_DATA_PER_TXD); 3927 BUG_ON(len >= IGB_MAX_DATA_PER_TXD);
3612 buffer_info->length = len; 3928 buffer_info->length = len;
3613 buffer_info->time_stamp = jiffies; 3929 buffer_info->time_stamp = jiffies;
3614 buffer_info->next_to_watch = i; 3930 buffer_info->next_to_watch = i;
3615 buffer_info->mapped_as_page = true; 3931 buffer_info->mapped_as_page = true;
3616 buffer_info->dma = pci_map_page(pdev, 3932 buffer_info->dma = dma_map_page(dev,
3617 frag->page, 3933 frag->page,
3618 frag->page_offset, 3934 frag->page_offset,
3619 len, 3935 len,
3620 PCI_DMA_TODEVICE); 3936 DMA_TO_DEVICE);
3621 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 3937 if (dma_mapping_error(dev, buffer_info->dma))
3622 goto dma_error; 3938 goto dma_error;
3623 3939
3624 } 3940 }
3625 3941
3626 tx_ring->buffer_info[i].skb = skb; 3942 tx_ring->buffer_info[i].skb = skb;
3627 tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1; 3943 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags;
3944 /* multiply data chunks by size of headers */
3945 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3946 tx_ring->buffer_info[i].gso_segs = gso_segs;
3628 tx_ring->buffer_info[first].next_to_watch = i; 3947 tx_ring->buffer_info[first].next_to_watch = i;
3629 3948
3630 return ++count; 3949 return ++count;
3631 3950
3632dma_error: 3951dma_error:
3633 dev_err(&pdev->dev, "TX DMA map failed\n"); 3952 dev_err(dev, "TX DMA map failed\n");
3634 3953
3635 /* clear timestamp and dma mappings for failed buffer_info mapping */ 3954 /* clear timestamp and dma mappings for failed buffer_info mapping */
3636 buffer_info->dma = 0; 3955 buffer_info->dma = 0;
@@ -3868,6 +4187,8 @@ static void igb_reset_task(struct work_struct *work)
3868 struct igb_adapter *adapter; 4187 struct igb_adapter *adapter;
3869 adapter = container_of(work, struct igb_adapter, reset_task); 4188 adapter = container_of(work, struct igb_adapter, reset_task);
3870 4189
4190 igb_dump(adapter);
4191 netdev_err(adapter->netdev, "Reset adapter\n");
3871 igb_reinit_locked(adapter); 4192 igb_reinit_locked(adapter);
3872} 4193}
3873 4194
@@ -3920,6 +4241,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3920 * i.e. RXBUFFER_2048 --> size-4096 slab 4241 * i.e. RXBUFFER_2048 --> size-4096 slab
3921 */ 4242 */
3922 4243
4244 if (adapter->hw.mac.type == e1000_82580)
4245 max_frame += IGB_TS_HDR_LEN;
4246
3923 if (max_frame <= IGB_RXBUFFER_1024) 4247 if (max_frame <= IGB_RXBUFFER_1024)
3924 rx_buffer_len = IGB_RXBUFFER_1024; 4248 rx_buffer_len = IGB_RXBUFFER_1024;
3925 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) 4249 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +4251,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3927 else 4251 else
3928 rx_buffer_len = IGB_RXBUFFER_128; 4252 rx_buffer_len = IGB_RXBUFFER_128;
3929 4253
4254 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
4255 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
4256 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
4257
4258 if ((adapter->hw.mac.type == e1000_82580) &&
4259 (rx_buffer_len == IGB_RXBUFFER_128))
4260 rx_buffer_len += IGB_RXBUFFER_64;
4261
3930 if (netif_running(netdev)) 4262 if (netif_running(netdev))
3931 igb_down(adapter); 4263 igb_down(adapter);
3932 4264
@@ -4955,22 +5287,21 @@ static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4955/** 5287/**
4956 * igb_tx_hwtstamp - utility function which checks for TX time stamp 5288 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4957 * @q_vector: pointer to q_vector containing needed info 5289 * @q_vector: pointer to q_vector containing needed info
4958 * @skb: packet that was just sent 5290 * @buffer: pointer to igb_buffer structure
4959 * 5291 *
4960 * If we were asked to do hardware stamping and such a time stamp is 5292 * If we were asked to do hardware stamping and such a time stamp is
4961 * available, then it must have been for this skb here because we only 5293 * available, then it must have been for this skb here because we only
4962 * allow only one such packet into the queue. 5294 * allow only one such packet into the queue.
4963 */ 5295 */
4964static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb) 5296static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *buffer_info)
4965{ 5297{
4966 struct igb_adapter *adapter = q_vector->adapter; 5298 struct igb_adapter *adapter = q_vector->adapter;
4967 union skb_shared_tx *shtx = skb_tx(skb);
4968 struct e1000_hw *hw = &adapter->hw; 5299 struct e1000_hw *hw = &adapter->hw;
4969 struct skb_shared_hwtstamps shhwtstamps; 5300 struct skb_shared_hwtstamps shhwtstamps;
4970 u64 regval; 5301 u64 regval;
4971 5302
4972 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5303 /* if skb does not support hw timestamp or TX stamp not valid exit */
4973 if (likely(!shtx->hardware) || 5304 if (likely(!buffer_info->shtx.hardware) ||
4974 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5305 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4975 return; 5306 return;
4976 5307
@@ -4978,7 +5309,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4978 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 5309 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4979 5310
4980 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval); 5311 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4981 skb_tstamp_tx(skb, &shhwtstamps); 5312 skb_tstamp_tx(buffer_info->skb, &shhwtstamps);
4982} 5313}
4983 5314
4984/** 5315/**
@@ -4993,7 +5324,6 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4993 struct net_device *netdev = tx_ring->netdev; 5324 struct net_device *netdev = tx_ring->netdev;
4994 struct e1000_hw *hw = &adapter->hw; 5325 struct e1000_hw *hw = &adapter->hw;
4995 struct igb_buffer *buffer_info; 5326 struct igb_buffer *buffer_info;
4996 struct sk_buff *skb;
4997 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5327 union e1000_adv_tx_desc *tx_desc, *eop_desc;
4998 unsigned int total_bytes = 0, total_packets = 0; 5328 unsigned int total_bytes = 0, total_packets = 0;
4999 unsigned int i, eop, count = 0; 5329 unsigned int i, eop, count = 0;
@@ -5009,19 +5339,12 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5009 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 5339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
5010 buffer_info = &tx_ring->buffer_info[i]; 5340 buffer_info = &tx_ring->buffer_info[i];
5011 cleaned = (i == eop); 5341 cleaned = (i == eop);
5012 skb = buffer_info->skb;
5013 5342
5014 if (skb) { 5343 if (buffer_info->skb) {
5015 unsigned int segs, bytecount; 5344 total_bytes += buffer_info->bytecount;
5016 /* gso_segs is currently only valid for tcp */ 5345 /* gso_segs is currently only valid for tcp */
5017 segs = buffer_info->gso_segs; 5346 total_packets += buffer_info->gso_segs;
5018 /* multiply data chunks by size of headers */ 5347 igb_tx_hwtstamp(q_vector, buffer_info);
5019 bytecount = ((segs - 1) * skb_headlen(skb)) +
5020 skb->len;
5021 total_packets += segs;
5022 total_bytes += bytecount;
5023
5024 igb_tx_hwtstamp(q_vector, skb);
5025 } 5348 }
5026 5349
5027 igb_unmap_and_free_tx_resource(tx_ring, buffer_info); 5350 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
@@ -5061,7 +5384,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5061 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5384 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
5062 5385
5063 /* detected Tx unit hang */ 5386 /* detected Tx unit hang */
5064 dev_err(&tx_ring->pdev->dev, 5387 dev_err(tx_ring->dev,
5065 "Detected Tx Unit Hang\n" 5388 "Detected Tx Unit Hang\n"
5066 " Tx Queue <%d>\n" 5389 " Tx Queue <%d>\n"
5067 " TDH <%x>\n" 5390 " TDH <%x>\n"
@@ -5140,10 +5463,10 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5140 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5463 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
5141 skb->ip_summed = CHECKSUM_UNNECESSARY; 5464 skb->ip_summed = CHECKSUM_UNNECESSARY;
5142 5465
5143 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5466 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err);
5144} 5467}
5145 5468
5146static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5469static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5147 struct sk_buff *skb) 5470 struct sk_buff *skb)
5148{ 5471{
5149 struct igb_adapter *adapter = q_vector->adapter; 5472 struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5484,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5161 * If nothing went wrong, then it should have a skb_shared_tx that we 5484 * If nothing went wrong, then it should have a skb_shared_tx that we
5162 * can turn into a skb_shared_hwtstamps. 5485 * can turn into a skb_shared_hwtstamps.
5163 */ 5486 */
5164 if (likely(!(staterr & E1000_RXDADV_STAT_TS))) 5487 if (staterr & E1000_RXDADV_STAT_TSIP) {
5165 return; 5488 u32 *stamp = (u32 *)skb->data;
5166 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 5489 regval = le32_to_cpu(*(stamp + 2));
5167 return; 5490 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5491 skb_pull(skb, IGB_TS_HDR_LEN);
5492 } else {
5493 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5494 return;
5168 5495
5169 regval = rd32(E1000_RXSTMPL); 5496 regval = rd32(E1000_RXSTMPL);
5170 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 5497 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5498 }
5171 5499
5172 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5500 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5173} 5501}
@@ -5190,7 +5518,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5190{ 5518{
5191 struct igb_ring *rx_ring = q_vector->rx_ring; 5519 struct igb_ring *rx_ring = q_vector->rx_ring;
5192 struct net_device *netdev = rx_ring->netdev; 5520 struct net_device *netdev = rx_ring->netdev;
5193 struct pci_dev *pdev = rx_ring->pdev; 5521 struct device *dev = rx_ring->dev;
5194 union e1000_adv_rx_desc *rx_desc , *next_rxd; 5522 union e1000_adv_rx_desc *rx_desc , *next_rxd;
5195 struct igb_buffer *buffer_info , *next_buffer; 5523 struct igb_buffer *buffer_info , *next_buffer;
5196 struct sk_buff *skb; 5524 struct sk_buff *skb;
@@ -5230,9 +5558,9 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5230 cleaned_count++; 5558 cleaned_count++;
5231 5559
5232 if (buffer_info->dma) { 5560 if (buffer_info->dma) {
5233 pci_unmap_single(pdev, buffer_info->dma, 5561 dma_unmap_single(dev, buffer_info->dma,
5234 rx_ring->rx_buffer_len, 5562 rx_ring->rx_buffer_len,
5235 PCI_DMA_FROMDEVICE); 5563 DMA_FROM_DEVICE);
5236 buffer_info->dma = 0; 5564 buffer_info->dma = 0;
5237 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) { 5565 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
5238 skb_put(skb, length); 5566 skb_put(skb, length);
@@ -5242,11 +5570,11 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
5242 } 5570 }
5243 5571
5244 if (length) { 5572 if (length) {
5245 pci_unmap_page(pdev, buffer_info->page_dma, 5573 dma_unmap_page(dev, buffer_info->page_dma,
5246 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 5574 PAGE_SIZE / 2, DMA_FROM_DEVICE);
5247 buffer_info->page_dma = 0; 5575 buffer_info->page_dma = 0;
5248 5576
5249 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 5577 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
5250 buffer_info->page, 5578 buffer_info->page,
5251 buffer_info->page_offset, 5579 buffer_info->page_offset,
5252 length); 5580 length);
@@ -5275,7 +5603,8 @@ send_up:
5275 goto next_desc; 5603 goto next_desc;
5276 } 5604 }
5277 5605
5278 igb_rx_hwtstamp(q_vector, staterr, skb); 5606 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5607 igb_rx_hwtstamp(q_vector, staterr, skb);
5279 total_bytes += skb->len; 5608 total_bytes += skb->len;
5280 total_packets++; 5609 total_packets++;
5281 5610
@@ -5350,12 +5679,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5350 buffer_info->page_offset ^= PAGE_SIZE / 2; 5679 buffer_info->page_offset ^= PAGE_SIZE / 2;
5351 } 5680 }
5352 buffer_info->page_dma = 5681 buffer_info->page_dma =
5353 pci_map_page(rx_ring->pdev, buffer_info->page, 5682 dma_map_page(rx_ring->dev, buffer_info->page,
5354 buffer_info->page_offset, 5683 buffer_info->page_offset,
5355 PAGE_SIZE / 2, 5684 PAGE_SIZE / 2,
5356 PCI_DMA_FROMDEVICE); 5685 DMA_FROM_DEVICE);
5357 if (pci_dma_mapping_error(rx_ring->pdev, 5686 if (dma_mapping_error(rx_ring->dev,
5358 buffer_info->page_dma)) { 5687 buffer_info->page_dma)) {
5359 buffer_info->page_dma = 0; 5688 buffer_info->page_dma = 0;
5360 rx_ring->rx_stats.alloc_failed++; 5689 rx_ring->rx_stats.alloc_failed++;
5361 goto no_buffers; 5690 goto no_buffers;
@@ -5373,12 +5702,12 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5373 buffer_info->skb = skb; 5702 buffer_info->skb = skb;
5374 } 5703 }
5375 if (!buffer_info->dma) { 5704 if (!buffer_info->dma) {
5376 buffer_info->dma = pci_map_single(rx_ring->pdev, 5705 buffer_info->dma = dma_map_single(rx_ring->dev,
5377 skb->data, 5706 skb->data,
5378 bufsz, 5707 bufsz,
5379 PCI_DMA_FROMDEVICE); 5708 DMA_FROM_DEVICE);
5380 if (pci_dma_mapping_error(rx_ring->pdev, 5709 if (dma_mapping_error(rx_ring->dev,
5381 buffer_info->dma)) { 5710 buffer_info->dma)) {
5382 buffer_info->dma = 0; 5711 buffer_info->dma = 0;
5383 rx_ring->rx_stats.alloc_failed++; 5712 rx_ring->rx_stats.alloc_failed++;
5384 goto no_buffers; 5713 goto no_buffers;
@@ -5555,6 +5884,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5555 return 0; 5884 return 0;
5556 } 5885 }
5557 5886
5887 /*
5888 * Per-packet timestamping only works if all packets are
5889 * timestamped, so enable timestamping in all packets as
5890 * long as one rx filter was configured.
5891 */
5892 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
5893 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5894 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5895 }
5896
5558 /* enable/disable TX */ 5897 /* enable/disable TX */
5559 regval = rd32(E1000_TSYNCTXCTL); 5898 regval = rd32(E1000_TSYNCTXCTL);
5560 regval &= ~E1000_TSYNCTXCTL_ENABLED; 5899 regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6470,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6131 struct e1000_hw *hw = &adapter->hw; 6470 struct e1000_hw *hw = &adapter->hw;
6132 u32 reg; 6471 u32 reg;
6133 6472
6134 /* replication is not supported for 82575 */ 6473 switch (hw->mac.type) {
6135 if (hw->mac.type == e1000_82575) 6474 case e1000_82575:
6475 default:
6476 /* replication is not supported for 82575 */
6136 return; 6477 return;
6137 6478 case e1000_82576:
6138 /* enable replication vlan tag stripping */ 6479 /* notify HW that the MAC is adding vlan tags */
6139 reg = rd32(E1000_RPLOLR); 6480 reg = rd32(E1000_DTXCTL);
6140 reg |= E1000_RPLOLR_STRVLAN; 6481 reg |= E1000_DTXCTL_VLAN_ADDED;
6141 wr32(E1000_RPLOLR, reg); 6482 wr32(E1000_DTXCTL, reg);
6142 6483 case e1000_82580:
6143 /* notify HW that the MAC is adding vlan tags */ 6484 /* enable replication vlan tag stripping */
6144 reg = rd32(E1000_DTXCTL); 6485 reg = rd32(E1000_RPLOLR);
6145 reg |= E1000_DTXCTL_VLAN_ADDED; 6486 reg |= E1000_RPLOLR_STRVLAN;
6146 wr32(E1000_DTXCTL, reg); 6487 wr32(E1000_RPLOLR, reg);
6488 case e1000_i350:
6489 /* none of the above registers are supported by i350 */
6490 break;
6491 }
6147 6492
6148 if (adapter->vfs_allocated_count) { 6493 if (adapter->vfs_allocated_count) {
6149 igb_vmdq_set_loopback_pf(hw, true); 6494 igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 8afff07ff559..103b3aa1afc2 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -390,8 +390,6 @@ static void igbvf_get_wol(struct net_device *netdev,
390{ 390{
391 wol->supported = 0; 391 wol->supported = 0;
392 wol->wolopts = 0; 392 wol->wolopts = 0;
393
394 return;
395} 393}
396 394
397static int igbvf_set_wol(struct net_device *netdev, 395static int igbvf_set_wol(struct net_device *netdev,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..5e2b2a8c56c6 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -48,6 +48,7 @@
48#define DRV_VERSION "1.0.0-k0" 48#define DRV_VERSION "1.0.0-k0"
49char igbvf_driver_name[] = "igbvf"; 49char igbvf_driver_name[] = "igbvf";
50const char igbvf_driver_version[] = DRV_VERSION; 50const char igbvf_driver_version[] = DRV_VERSION;
51struct pm_qos_request_list *igbvf_driver_pm_qos_req;
51static const char igbvf_driver_string[] = 52static const char igbvf_driver_string[] =
52 "Intel(R) Virtual Function Network Driver"; 53 "Intel(R) Virtual Function Network Driver";
53static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 54static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
@@ -164,10 +165,10 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
164 buffer_info->page_offset ^= PAGE_SIZE / 2; 165 buffer_info->page_offset ^= PAGE_SIZE / 2;
165 } 166 }
166 buffer_info->page_dma = 167 buffer_info->page_dma =
167 pci_map_page(pdev, buffer_info->page, 168 dma_map_page(&pdev->dev, buffer_info->page,
168 buffer_info->page_offset, 169 buffer_info->page_offset,
169 PAGE_SIZE / 2, 170 PAGE_SIZE / 2,
170 PCI_DMA_FROMDEVICE); 171 DMA_FROM_DEVICE);
171 } 172 }
172 173
173 if (!buffer_info->skb) { 174 if (!buffer_info->skb) {
@@ -178,9 +179,9 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
178 } 179 }
179 180
180 buffer_info->skb = skb; 181 buffer_info->skb = skb;
181 buffer_info->dma = pci_map_single(pdev, skb->data, 182 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
182 bufsz, 183 bufsz,
183 PCI_DMA_FROMDEVICE); 184 DMA_FROM_DEVICE);
184 } 185 }
185 /* Refresh the desc even if buffer_addrs didn't change because 186 /* Refresh the desc even if buffer_addrs didn't change because
186 * each write-back erases this info. */ 187 * each write-back erases this info. */
@@ -268,28 +269,28 @@ static bool igbvf_clean_rx_irq(struct igbvf_adapter *adapter,
268 prefetch(skb->data - NET_IP_ALIGN); 269 prefetch(skb->data - NET_IP_ALIGN);
269 buffer_info->skb = NULL; 270 buffer_info->skb = NULL;
270 if (!adapter->rx_ps_hdr_size) { 271 if (!adapter->rx_ps_hdr_size) {
271 pci_unmap_single(pdev, buffer_info->dma, 272 dma_unmap_single(&pdev->dev, buffer_info->dma,
272 adapter->rx_buffer_len, 273 adapter->rx_buffer_len,
273 PCI_DMA_FROMDEVICE); 274 DMA_FROM_DEVICE);
274 buffer_info->dma = 0; 275 buffer_info->dma = 0;
275 skb_put(skb, length); 276 skb_put(skb, length);
276 goto send_up; 277 goto send_up;
277 } 278 }
278 279
279 if (!skb_shinfo(skb)->nr_frags) { 280 if (!skb_shinfo(skb)->nr_frags) {
280 pci_unmap_single(pdev, buffer_info->dma, 281 dma_unmap_single(&pdev->dev, buffer_info->dma,
281 adapter->rx_ps_hdr_size, 282 adapter->rx_ps_hdr_size,
282 PCI_DMA_FROMDEVICE); 283 DMA_FROM_DEVICE);
283 skb_put(skb, hlen); 284 skb_put(skb, hlen);
284 } 285 }
285 286
286 if (length) { 287 if (length) {
287 pci_unmap_page(pdev, buffer_info->page_dma, 288 dma_unmap_page(&pdev->dev, buffer_info->page_dma,
288 PAGE_SIZE / 2, 289 PAGE_SIZE / 2,
289 PCI_DMA_FROMDEVICE); 290 DMA_FROM_DEVICE);
290 buffer_info->page_dma = 0; 291 buffer_info->page_dma = 0;
291 292
292 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, 293 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
293 buffer_info->page, 294 buffer_info->page,
294 buffer_info->page_offset, 295 buffer_info->page_offset,
295 length); 296 length);
@@ -369,15 +370,15 @@ static void igbvf_put_txbuf(struct igbvf_adapter *adapter,
369{ 370{
370 if (buffer_info->dma) { 371 if (buffer_info->dma) {
371 if (buffer_info->mapped_as_page) 372 if (buffer_info->mapped_as_page)
372 pci_unmap_page(adapter->pdev, 373 dma_unmap_page(&adapter->pdev->dev,
373 buffer_info->dma, 374 buffer_info->dma,
374 buffer_info->length, 375 buffer_info->length,
375 PCI_DMA_TODEVICE); 376 DMA_TO_DEVICE);
376 else 377 else
377 pci_unmap_single(adapter->pdev, 378 dma_unmap_single(&adapter->pdev->dev,
378 buffer_info->dma, 379 buffer_info->dma,
379 buffer_info->length, 380 buffer_info->length,
380 PCI_DMA_TODEVICE); 381 DMA_TO_DEVICE);
381 buffer_info->dma = 0; 382 buffer_info->dma = 0;
382 } 383 }
383 if (buffer_info->skb) { 384 if (buffer_info->skb) {
@@ -438,8 +439,8 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 439 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
439 tx_ring->size = ALIGN(tx_ring->size, 4096); 440 tx_ring->size = ALIGN(tx_ring->size, 4096);
440 441
441 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 442 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
442 &tx_ring->dma); 443 &tx_ring->dma, GFP_KERNEL);
443 444
444 if (!tx_ring->desc) 445 if (!tx_ring->desc)
445 goto err; 446 goto err;
@@ -480,8 +481,8 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
480 rx_ring->size = rx_ring->count * desc_len; 481 rx_ring->size = rx_ring->count * desc_len;
481 rx_ring->size = ALIGN(rx_ring->size, 4096); 482 rx_ring->size = ALIGN(rx_ring->size, 4096);
482 483
483 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 484 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
484 &rx_ring->dma); 485 &rx_ring->dma, GFP_KERNEL);
485 486
486 if (!rx_ring->desc) 487 if (!rx_ring->desc)
487 goto err; 488 goto err;
@@ -549,7 +550,8 @@ void igbvf_free_tx_resources(struct igbvf_ring *tx_ring)
549 vfree(tx_ring->buffer_info); 550 vfree(tx_ring->buffer_info);
550 tx_ring->buffer_info = NULL; 551 tx_ring->buffer_info = NULL;
551 552
552 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 553 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
554 tx_ring->dma);
553 555
554 tx_ring->desc = NULL; 556 tx_ring->desc = NULL;
555} 557}
@@ -574,13 +576,13 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
574 buffer_info = &rx_ring->buffer_info[i]; 576 buffer_info = &rx_ring->buffer_info[i];
575 if (buffer_info->dma) { 577 if (buffer_info->dma) {
576 if (adapter->rx_ps_hdr_size){ 578 if (adapter->rx_ps_hdr_size){
577 pci_unmap_single(pdev, buffer_info->dma, 579 dma_unmap_single(&pdev->dev, buffer_info->dma,
578 adapter->rx_ps_hdr_size, 580 adapter->rx_ps_hdr_size,
579 PCI_DMA_FROMDEVICE); 581 DMA_FROM_DEVICE);
580 } else { 582 } else {
581 pci_unmap_single(pdev, buffer_info->dma, 583 dma_unmap_single(&pdev->dev, buffer_info->dma,
582 adapter->rx_buffer_len, 584 adapter->rx_buffer_len,
583 PCI_DMA_FROMDEVICE); 585 DMA_FROM_DEVICE);
584 } 586 }
585 buffer_info->dma = 0; 587 buffer_info->dma = 0;
586 } 588 }
@@ -592,9 +594,10 @@ static void igbvf_clean_rx_ring(struct igbvf_ring *rx_ring)
592 594
593 if (buffer_info->page) { 595 if (buffer_info->page) {
594 if (buffer_info->page_dma) 596 if (buffer_info->page_dma)
595 pci_unmap_page(pdev, buffer_info->page_dma, 597 dma_unmap_page(&pdev->dev,
598 buffer_info->page_dma,
596 PAGE_SIZE / 2, 599 PAGE_SIZE / 2,
597 PCI_DMA_FROMDEVICE); 600 DMA_FROM_DEVICE);
598 put_page(buffer_info->page); 601 put_page(buffer_info->page);
599 buffer_info->page = NULL; 602 buffer_info->page = NULL;
600 buffer_info->page_dma = 0; 603 buffer_info->page_dma = 0;
@@ -1398,7 +1401,7 @@ static void igbvf_set_multi(struct net_device *netdev)
1398{ 1401{
1399 struct igbvf_adapter *adapter = netdev_priv(netdev); 1402 struct igbvf_adapter *adapter = netdev_priv(netdev);
1400 struct e1000_hw *hw = &adapter->hw; 1403 struct e1000_hw *hw = &adapter->hw;
1401 struct dev_mc_list *mc_ptr; 1404 struct netdev_hw_addr *ha;
1402 u8 *mta_list = NULL; 1405 u8 *mta_list = NULL;
1403 int i; 1406 int i;
1404 1407
@@ -1413,8 +1416,8 @@ static void igbvf_set_multi(struct net_device *netdev)
1413 1416
1414 /* prepare a packed array of only addresses. */ 1417 /* prepare a packed array of only addresses. */
1415 i = 0; 1418 i = 0;
1416 netdev_for_each_mc_addr(mc_ptr, netdev) 1419 netdev_for_each_mc_addr(ha, netdev)
1417 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 1420 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1418 1421
1419 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1422 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1420 kfree(mta_list); 1423 kfree(mta_list);
@@ -2104,9 +2107,9 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2104 buffer_info->time_stamp = jiffies; 2107 buffer_info->time_stamp = jiffies;
2105 buffer_info->next_to_watch = i; 2108 buffer_info->next_to_watch = i;
2106 buffer_info->mapped_as_page = false; 2109 buffer_info->mapped_as_page = false;
2107 buffer_info->dma = pci_map_single(pdev, skb->data, len, 2110 buffer_info->dma = dma_map_single(&pdev->dev, skb->data, len,
2108 PCI_DMA_TODEVICE); 2111 DMA_TO_DEVICE);
2109 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2112 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2110 goto dma_error; 2113 goto dma_error;
2111 2114
2112 2115
@@ -2127,12 +2130,12 @@ static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
2127 buffer_info->time_stamp = jiffies; 2130 buffer_info->time_stamp = jiffies;
2128 buffer_info->next_to_watch = i; 2131 buffer_info->next_to_watch = i;
2129 buffer_info->mapped_as_page = true; 2132 buffer_info->mapped_as_page = true;
2130 buffer_info->dma = pci_map_page(pdev, 2133 buffer_info->dma = dma_map_page(&pdev->dev,
2131 frag->page, 2134 frag->page,
2132 frag->page_offset, 2135 frag->page_offset,
2133 len, 2136 len,
2134 PCI_DMA_TODEVICE); 2137 DMA_TO_DEVICE);
2135 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 2138 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2136 goto dma_error; 2139 goto dma_error;
2137 } 2140 }
2138 2141
@@ -2644,16 +2647,16 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
2644 return err; 2647 return err;
2645 2648
2646 pci_using_dac = 0; 2649 pci_using_dac = 0;
2647 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); 2650 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
2648 if (!err) { 2651 if (!err) {
2649 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 2652 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
2650 if (!err) 2653 if (!err)
2651 pci_using_dac = 1; 2654 pci_using_dac = 1;
2652 } else { 2655 } else {
2653 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2656 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2654 if (err) { 2657 if (err) {
2655 err = pci_set_consistent_dma_mask(pdev, 2658 err = dma_set_coherent_mask(&pdev->dev,
2656 DMA_BIT_MASK(32)); 2659 DMA_BIT_MASK(32));
2657 if (err) { 2660 if (err) {
2658 dev_err(&pdev->dev, "No usable DMA " 2661 dev_err(&pdev->dev, "No usable DMA "
2659 "configuration, aborting\n"); 2662 "configuration, aborting\n");
@@ -2899,7 +2902,7 @@ static int __init igbvf_init_module(void)
2899 printk(KERN_INFO "%s\n", igbvf_copyright); 2902 printk(KERN_INFO "%s\n", igbvf_copyright);
2900 2903
2901 ret = pci_register_driver(&igbvf_driver); 2904 ret = pci_register_driver(&igbvf_driver);
2902 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name, 2905 igbvf_driver_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
2903 PM_QOS_DEFAULT_VALUE); 2906 PM_QOS_DEFAULT_VALUE);
2904 2907
2905 return ret; 2908 return ret;
@@ -2915,7 +2918,8 @@ module_init(igbvf_init_module);
2915static void __exit igbvf_exit_module(void) 2918static void __exit igbvf_exit_module(void)
2916{ 2919{
2917 pci_unregister_driver(&igbvf_driver); 2920 pci_unregister_driver(&igbvf_driver);
2918 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, igbvf_driver_name); 2921 pm_qos_remove_request(igbvf_driver_pm_qos_req);
2922 igbvf_driver_pm_qos_req = NULL;
2919} 2923}
2920module_exit(igbvf_exit_module); 2924module_exit(igbvf_exit_module);
2921 2925
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d647c0..e3b5e9490601 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1503,7 +1503,6 @@ static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1503 1503
1504 BARRIER(); 1504 BARRIER();
1505 1505
1506 dev->trans_start = jiffies;
1507 ip->tx_skbs[produce] = skb; /* Remember skb */ 1506 ip->tx_skbs[produce] = skb; /* Remember skb */
1508 produce = (produce + 1) & 127; 1507 produce = (produce + 1) & 127;
1509 ip->tx_pi = produce; 1508 ip->tx_pi = produce;
@@ -1665,7 +1664,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1665 1664
1666static void ioc3_set_multicast_list(struct net_device *dev) 1665static void ioc3_set_multicast_list(struct net_device *dev)
1667{ 1666{
1668 struct dev_mc_list *dmi; 1667 struct netdev_hw_addr *ha;
1669 struct ioc3_private *ip = netdev_priv(dev); 1668 struct ioc3_private *ip = netdev_priv(dev);
1670 struct ioc3 *ioc3 = ip->regs; 1669 struct ioc3 *ioc3 = ip->regs;
1671 u64 ehar = 0; 1670 u64 ehar = 0;
@@ -1689,8 +1688,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1689 ip->ehar_h = 0xffffffff; 1688 ip->ehar_h = 0xffffffff;
1690 ip->ehar_l = 0xffffffff; 1689 ip->ehar_l = 0xffffffff;
1691 } else { 1690 } else {
1692 netdev_for_each_mc_addr(dmi, dev) { 1691 netdev_for_each_mc_addr(ha, dev) {
1693 char *addr = dmi->dmi_addr; 1692 char *addr = ha->addr;
1694 1693
1695 if (!(*addr & 1)) 1694 if (!(*addr & 1))
1696 continue; 1695 continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9fb0279..72e3d2da9e9f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
570static void ipg_nic_set_multicast_list(struct net_device *dev) 570static void ipg_nic_set_multicast_list(struct net_device *dev)
571{ 571{
572 void __iomem *ioaddr = ipg_ioaddr(dev); 572 void __iomem *ioaddr = ipg_ioaddr(dev);
573 struct dev_mc_list *mc_list_ptr; 573 struct netdev_hw_addr *ha;
574 unsigned int hashindex; 574 unsigned int hashindex;
575 u32 hashtable[2]; 575 u32 hashtable[2];
576 u8 receivemode; 576 u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
609 hashtable[1] = 0x00000000; 609 hashtable[1] = 0x00000000;
610 610
611 /* Cycle through all multicast addresses to filter. */ 611 /* Cycle through all multicast addresses to filter. */
612 netdev_for_each_mc_addr(mc_list_ptr, dev) { 612 netdev_for_each_mc_addr(ha, dev) {
613 /* Calculate CRC result for each multicast address. */ 613 /* Calculate CRC result for each multicast address. */
614 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, 614 hashindex = crc32_le(0xffffffff, ha->addr,
615 ETH_ALEN); 615 ETH_ALEN);
616 616
617 /* Use only the least significant 6 bits. */ 617 /* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
1548 container_of(work, struct ipg_nic_private, task.work); 1548 container_of(work, struct ipg_nic_private, task.work);
1549 struct net_device *dev = sp->dev; 1549 struct net_device *dev = sp->dev;
1550 1550
1551 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
1552
1553 /* 1551 /*
1554 * Acknowledge HostError interrupt by resetting 1552 * Acknowledge HostError interrupt by resetting
1555 * IPG DMA and HOST. 1553 * IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
1826 1824
1827 netif_stop_queue(dev); 1825 netif_stop_queue(dev);
1828 1826
1829 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
1830 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
1831 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
1832 IPG_DUMPTFDLIST(dev); 1827 IPG_DUMPTFDLIST(dev);
1833 1828
1834 do { 1829 do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541bb556..6ce027355fcf 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
29/* GMII based PHY IDs */ 29/* GMII based PHY IDs */
30#define NS 0x2000 30#define NS 0x2000
31#define MARVELL 0x0141 31#define MARVELL 0x0141
32#define ICPLUS_PHY 0x243 32#define ICPLUS_PHY 0x243
33 33
34/* NIC Physical Layer Device MII register fields. */ 34/* NIC Physical Layer Device MII register fields. */
35#define MII_PHY_SELECTOR_IEEE8023 0x0001 35#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
96}; 96};
97 97
98/* Ethernet MIB statistic register offsets. */ 98/* Ethernet MIB statistic register offsets. */
99#define IPG_OCTETRCVOK 0xA8 99#define IPG_OCTETRCVOK 0xA8
100#define IPG_MCSTOCTETRCVDOK 0xAC 100#define IPG_MCSTOCTETRCVDOK 0xAC
101#define IPG_BCSTOCTETRCVOK 0xB0 101#define IPG_BCSTOCTETRCVOK 0xB0
102#define IPG_FRAMESRCVDOK 0xB4 102#define IPG_FRAMESRCVDOK 0xB4
103#define IPG_MCSTFRAMESRCVDOK 0xB8 103#define IPG_MCSTFRAMESRCVDOK 0xB8
104#define IPG_BCSTFRAMESRCVDOK 0xBE 104#define IPG_BCSTFRAMESRCVDOK 0xBE
105#define IPG_MACCONTROLFRAMESRCVD 0xC6 105#define IPG_MACCONTROLFRAMESRCVD 0xC6
106#define IPG_FRAMETOOLONGERRRORS 0xC8 106#define IPG_FRAMETOOLONGERRRORS 0xC8
107#define IPG_INRANGELENGTHERRORS 0xCA 107#define IPG_INRANGELENGTHERRORS 0xCA
108#define IPG_FRAMECHECKSEQERRORS 0xCC 108#define IPG_FRAMECHECKSEQERRORS 0xCC
109#define IPG_FRAMESLOSTRXERRORS 0xCE 109#define IPG_FRAMESLOSTRXERRORS 0xCE
110#define IPG_OCTETXMTOK 0xD0 110#define IPG_OCTETXMTOK 0xD0
111#define IPG_MCSTOCTETXMTOK 0xD4 111#define IPG_MCSTOCTETXMTOK 0xD4
112#define IPG_BCSTOCTETXMTOK 0xD8 112#define IPG_BCSTOCTETXMTOK 0xD8
113#define IPG_FRAMESXMTDOK 0xDC 113#define IPG_FRAMESXMTDOK 0xDC
114#define IPG_MCSTFRAMESXMTDOK 0xE0 114#define IPG_MCSTFRAMESXMTDOK 0xE0
115#define IPG_FRAMESWDEFERREDXMT 0xE4 115#define IPG_FRAMESWDEFERREDXMT 0xE4
116#define IPG_LATECOLLISIONS 0xE8 116#define IPG_LATECOLLISIONS 0xE8
117#define IPG_MULTICOLFRAMES 0xEC 117#define IPG_MULTICOLFRAMES 0xEC
118#define IPG_SINGLECOLFRAMES 0xF0 118#define IPG_SINGLECOLFRAMES 0xF0
119#define IPG_BCSTFRAMESXMTDOK 0xF6 119#define IPG_BCSTFRAMESXMTDOK 0xF6
120#define IPG_CARRIERSENSEERRORS 0xF8 120#define IPG_CARRIERSENSEERRORS 0xF8
121#define IPG_MACCONTROLFRAMESXMTDOK 0xFA 121#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
122#define IPG_FRAMESABORTXSCOLLS 0xFC 122#define IPG_FRAMESABORTXSCOLLS 0xFC
123#define IPG_FRAMESWEXDEFERRAL 0xFE 123#define IPG_FRAMESWEXDEFERRAL 0xFE
124 124
125/* RMON statistic register offsets. */ 125/* RMON statistic register offsets. */
126#define IPG_ETHERSTATSCOLLISIONS 0x100 126#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
134#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 134#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
135#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 135#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
136#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 136#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
137#define IPG_ETHERSTATSFRAGMENTS 0x12C 137#define IPG_ETHERSTATSFRAGMENTS 0x12C
138#define IPG_ETHERSTATSJABBERS 0x130 138#define IPG_ETHERSTATSJABBERS 0x130
139#define IPG_ETHERSTATSOCTETS 0x134 139#define IPG_ETHERSTATSOCTETS 0x134
140#define IPG_ETHERSTATSPKTS 0x138 140#define IPG_ETHERSTATSPKTS 0x138
141#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C 141#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
154#define IPG_ETHERSTATSDROPEVENTS 0xCE 154#define IPG_ETHERSTATSDROPEVENTS 0xCE
155 155
156/* Serial EEPROM offsets */ 156/* Serial EEPROM offsets */
157#define IPG_EEPROM_CONFIGPARAM 0x00 157#define IPG_EEPROM_CONFIGPARAM 0x00
158#define IPG_EEPROM_ASICCTRL 0x01 158#define IPG_EEPROM_ASICCTRL 0x01
159#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 159#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
160#define IPG_EEPROM_SUBSYSTEMID 0x03 160#define IPG_EEPROM_SUBSYSTEMID 0x03
161#define IPG_EEPROM_STATIONADDRESS0 0x10 161#define IPG_EEPROM_STATIONADDRESS0 0x10
162#define IPG_EEPROM_STATIONADDRESS1 0x11 162#define IPG_EEPROM_STATIONADDRESS1 0x11
163#define IPG_EEPROM_STATIONADDRESS2 0x12 163#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
168 168
169/* IOBaseAddress */ 169/* IOBaseAddress */
170#define IPG_PIB_RSVD_MASK 0xFFFFFE01 170#define IPG_PIB_RSVD_MASK 0xFFFFFE01
171#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 171#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
172#define IPG_PIB_IOBASEADDRIND 0x00000001 172#define IPG_PIB_IOBASEADDRIND 0x00000001
173 173
174/* MemBaseAddress */ 174/* MemBaseAddress */
175#define IPG_PMB_RSVD_MASK 0xFFFFFE07 175#define IPG_PMB_RSVD_MASK 0xFFFFFE07
176#define IPG_PMB_MEMBASEADDRIND 0x00000001 176#define IPG_PMB_MEMBASEADDRIND 0x00000001
177#define IPG_PMB_MEMMAPTYPE 0x00000006 177#define IPG_PMB_MEMMAPTYPE 0x00000006
178#define IPG_PMB_MEMMAPTYPE0 0x00000002 178#define IPG_PMB_MEMMAPTYPE0 0x00000002
179#define IPG_PMB_MEMMAPTYPE1 0x00000004 179#define IPG_PMB_MEMMAPTYPE1 0x00000004
180#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 180#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
181 181
182/* ConfigStatus */ 182/* ConfigStatus */
183#define IPG_CS_RSVD_MASK 0xFFB0 183#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
196 196
197/* TFDList, TFC */ 197/* TFDList, TFC */
198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF 198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
199#define IPG_TFC_FRAMEID 0x000000000000FFFF 199#define IPG_TFC_FRAMEID 0x000000000000FFFF
200#define IPG_TFC_WORDALIGN 0x0000000000030000 200#define IPG_TFC_WORDALIGN 0x0000000000030000
201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
208#define IPG_TFC_TXINDICATE 0x0000000000400000 208#define IPG_TFC_TXINDICATE 0x0000000000400000
209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
210#define IPG_TFC_FRAGCOUNT 0x000000000F000000 210#define IPG_TFC_FRAGCOUNT 0x000000000F000000
211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
212#define IPG_TFC_TFDDONE 0x0000000080000000 212#define IPG_TFC_TFDDONE 0x0000000080000000
213#define IPG_TFC_VID 0x00000FFF00000000 213#define IPG_TFC_VID 0x00000FFF00000000
214#define IPG_TFC_CFI 0x0000100000000000 214#define IPG_TFC_CFI 0x0000100000000000
215#define IPG_TFC_USERPRIORITY 0x0000E00000000000 215#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
217/* TFDList, FragInfo */ 217/* TFDList, FragInfo */
218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF 219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
220#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL 220#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
221 221
222/* RFD data structure masks. */ 222/* RFD data structure masks. */
223 223
224/* RFDList, RFS */ 224/* RFDList, RFS */
225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF 225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF 226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
230#define IPG_RFS_RXFCSERROR 0x0000000000080000 230#define IPG_RFS_RXFCSERROR 0x0000000000080000
231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
233#define IPG_RFS_VLANDETECTED 0x0000000000400000 233#define IPG_RFS_VLANDETECTED 0x0000000000400000
234#define IPG_RFS_TCPDETECTED 0x0000000000800000 234#define IPG_RFS_TCPDETECTED 0x0000000000800000
235#define IPG_RFS_TCPERROR 0x0000000001000000 235#define IPG_RFS_TCPERROR 0x0000000001000000
236#define IPG_RFS_UDPDETECTED 0x0000000002000000 236#define IPG_RFS_UDPDETECTED 0x0000000002000000
237#define IPG_RFS_UDPERROR 0x0000000004000000 237#define IPG_RFS_UDPERROR 0x0000000004000000
238#define IPG_RFS_IPDETECTED 0x0000000008000000 238#define IPG_RFS_IPDETECTED 0x0000000008000000
239#define IPG_RFS_IPERROR 0x0000000010000000 239#define IPG_RFS_IPERROR 0x0000000010000000
240#define IPG_RFS_FRAMESTART 0x0000000020000000 240#define IPG_RFS_FRAMESTART 0x0000000020000000
241#define IPG_RFS_FRAMEEND 0x0000000040000000 241#define IPG_RFS_FRAMEEND 0x0000000040000000
242#define IPG_RFS_RFDDONE 0x0000000080000000 242#define IPG_RFS_RFDDONE 0x0000000080000000
243#define IPG_RFS_TCI 0x0000FFFF00000000 243#define IPG_RFS_TCI 0x0000FFFF00000000
244 244
245/* RFDList, FragInfo */ 245/* RFDList, FragInfo */
246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF 247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
248#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL 248#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
249 249
250/* I/O Register masks. */ 250/* I/O Register masks. */
251 251
@@ -254,37 +254,37 @@ enum ipg_regs {
254 254
255/* Statistics Mask */ 255/* Statistics Mask */
256#define IPG_SM_ALL 0x0FFFFFFF 256#define IPG_SM_ALL 0x0FFFFFFF
257#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 257#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
258#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 258#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
259#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 259#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
260#define IPG_SM_RXJUMBOFRAMES 0x00000008 260#define IPG_SM_RXJUMBOFRAMES 0x00000008
261#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 261#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
262#define IPG_SM_IPCHECKSUMERRORS 0x00000020 262#define IPG_SM_IPCHECKSUMERRORS 0x00000020
263#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 263#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
264#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 264#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
265#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 265#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
266#define IPG_SM_INRANGELENGTHERRORS 0x00000200 266#define IPG_SM_INRANGELENGTHERRORS 0x00000200
267#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 267#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
268#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 268#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
269#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 269#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
270#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 270#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
271#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 271#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
272#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 272#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
273#define IPG_SM_LATECOLLISIONS 0x00010000 273#define IPG_SM_LATECOLLISIONS 0x00010000
274#define IPG_SM_MULTICOLFRAMES 0x00020000 274#define IPG_SM_MULTICOLFRAMES 0x00020000
275#define IPG_SM_SINGLECOLFRAMES 0x00040000 275#define IPG_SM_SINGLECOLFRAMES 0x00040000
276#define IPG_SM_TXJUMBOFRAMES 0x00080000 276#define IPG_SM_TXJUMBOFRAMES 0x00080000
277#define IPG_SM_CARRIERSENSEERRORS 0x00100000 277#define IPG_SM_CARRIERSENSEERRORS 0x00100000
278#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 278#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
279#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 279#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
280#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 280#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
281 281
282/* Countdown */ 282/* Countdown */
283#define IPG_CD_RSVD_MASK 0x0700FFFF 283#define IPG_CD_RSVD_MASK 0x0700FFFF
284#define IPG_CD_COUNT 0x0000FFFF 284#define IPG_CD_COUNT 0x0000FFFF
285#define IPG_CD_COUNTDOWNSPEED 0x01000000 285#define IPG_CD_COUNTDOWNSPEED 0x01000000
286#define IPG_CD_COUNTDOWNMODE 0x02000000 286#define IPG_CD_COUNTDOWNMODE 0x02000000
287#define IPG_CD_COUNTINTENABLED 0x04000000 287#define IPG_CD_COUNTINTENABLED 0x04000000
288 288
289/* TxDMABurstThresh */ 289/* TxDMABurstThresh */
290#define IPG_TB_RSVD_MASK 0xFF 290#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
653 * Miscellaneous macros. 653 * Miscellaneous macros.
654 */ 654 */
655 655
656/* Marco for printing debug statements. */ 656/* Macros for printing debug statements. */
657#ifdef IPG_DEBUG 657#ifdef IPG_DEBUG
658# define IPG_DEBUG_MSG(args...) 658# define IPG_DEBUG_MSG(fmt, args...) \
659# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args) 659do { \
660 if (0) \
661 printk(KERN_DEBUG "IPG: " fmt, ##args); \
662} while (0)
663# define IPG_DDEBUG_MSG(fmt, args...) \
664 printk(KERN_DEBUG "IPG: " fmt, ##args)
660# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) 665# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
661# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) 666# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
662#else 667#else
663# define IPG_DEBUG_MSG(args...) 668# define IPG_DEBUG_MSG(fmt, args...) \
664# define IPG_DDEBUG_MSG(args...) 669do { \
670 if (0) \
671 printk(KERN_DEBUG "IPG: " fmt, ##args); \
672} while (0)
673# define IPG_DDEBUG_MSG(fmt, args...) \
674do { \
675 if (0) \
676 printk(KERN_DEBUG "IPG: " fmt, ##args); \
677} while (0)
665# define IPG_DUMPRFDLIST(args) 678# define IPG_DUMPRFDLIST(args)
666# define IPG_DUMPTFDLIST(args) 679# define IPG_DUMPTFDLIST(args)
667#endif 680#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97345ce..25bb2a015e18 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
397 To compile it as a module, choose M here: the module will be called 397 To compile it as a module, choose M here: the module will be called
398 mcs7780. 398 mcs7780.
399 399
400config SH_IRDA
401 tristate "SuperH IrDA driver"
402 depends on IRDA && ARCH_SHMOBILE
403 help
404 Say Y here if your want to enable SuperH IrDA devices.
405
400endmenu 406endmenu
401 407
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47e2793..dfc64537f62f 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o 19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
20obj-$(CONFIG_MCS_FIR) += mcs7780.o 20obj-$(CONFIG_MCS_FIR) += mcs7780.o
21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o 21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
22obj-$(CONFIG_SH_IRDA) += sh_irda.o
22# SIR drivers 23# SIR drivers
23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o 24obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o 25obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c815cba..a3cb109006a5 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
753 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 753 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
754 { 754 {
755 self->rcvFramesOverflow = TRUE; 755 self->rcvFramesOverflow = TRUE;
756 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__); 756 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
757 } 757 }
758 758
759 if (ali_ircc_dma_receive_complete(self)) 759 if (ali_ircc_dma_receive_complete(self))
760 { 760 {
761 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__); 761 IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
762 762
763 self->ier = IER_EOM; 763 self->ier = IER_EOM;
764 } 764 }
765 else 765 else
766 { 766 {
767 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__); 767 IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
768 768
769 self->ier = IER_EOM | IER_TIMER; 769 self->ier = IER_EOM | IER_TIMER;
770 } 770 }
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
777 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 777 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
778 { 778 {
779 self->rcvFramesOverflow = TRUE; 779 self->rcvFramesOverflow = TRUE;
780 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__); 780 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
781 } 781 }
782 /* Disable Timer */ 782 /* Disable Timer */
783 switch_bank(iobase, BANK1); 783 switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
942 // benjamin 2000/11/10 06:32PM 942 // benjamin 2000/11/10 06:32PM
943 if (self->io.speed > 115200) 943 if (self->io.speed > 115200)
944 { 944 {
945 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ ); 945 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
946 946
947 self->ier = IER_EOM; 947 self->ier = IER_EOM;
948 // SetCOMInterrupts(self, TRUE); 948 // SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
970 970
971 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 971 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
972 972
973 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud); 973 IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
974 974
975 /* This function *must* be called with irq off and spin-lock. 975 /* This function *must* be called with irq off and spin-lock.
976 * - Jean II */ 976 * - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
1500 diff = self->now.tv_usec - self->stamp.tv_usec; 1500 diff = self->now.tv_usec - self->stamp.tv_usec;
1501 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1501 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1502 1502
1503 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff); 1503 IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
1504 1504
1505 if (diff < 0) 1505 if (diff < 0)
1506 diff += 1000000; 1506 diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1641 tmp = inb(iobase+FIR_LCR_B); 1641 tmp = inb(iobase+FIR_LCR_B);
1642 tmp &= ~0x20; // Disable SIP 1642 tmp &= ~0x20; // Disable SIP
1643 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); 1643 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1644 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B)); 1644 IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1645 1645
1646 outb(0, iobase+FIR_LSR); 1646 outb(0, iobase+FIR_LSR);
1647 1647
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1768 //switch_bank(iobase, BANK0); 1768 //switch_bank(iobase, BANK0);
1769 tmp = inb(iobase+FIR_LCR_B); 1769 tmp = inb(iobase+FIR_LCR_B);
1770 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM 1770 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1771 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B)); 1771 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1772 1772
1773 /* Set Rx Threshold */ 1773 /* Set Rx Threshold */
1774 switch_bank(iobase, BANK1); 1774 switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1840 /* Check for errors */ 1840 /* Check for errors */
1841 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) 1841 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1842 { 1842 {
1843 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ ); 1843 IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
1844 1844
1845 /* Skip frame */ 1845 /* Skip frame */
1846 self->netdev->stats.rx_errors++; 1846 self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1850 if (status & LSR_FIFO_UR) 1850 if (status & LSR_FIFO_UR)
1851 { 1851 {
1852 self->netdev->stats.rx_frame_errors++; 1852 self->netdev->stats.rx_frame_errors++;
1853 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ ); 1853 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
1854 } 1854 }
1855 if (status & LSR_FRAME_ERROR) 1855 if (status & LSR_FRAME_ERROR)
1856 { 1856 {
1857 self->netdev->stats.rx_frame_errors++; 1857 self->netdev->stats.rx_frame_errors++;
1858 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ ); 1858 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
1859 } 1859 }
1860 1860
1861 if (status & LSR_CRC_ERROR) 1861 if (status & LSR_CRC_ERROR)
1862 { 1862 {
1863 self->netdev->stats.rx_crc_errors++; 1863 self->netdev->stats.rx_crc_errors++;
1864 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ ); 1864 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
1865 } 1865 }
1866 1866
1867 if(self->rcvFramesOverflow) 1867 if(self->rcvFramesOverflow)
1868 { 1868 {
1869 self->netdev->stats.rx_frame_errors++; 1869 self->netdev->stats.rx_frame_errors++;
1870 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ ); 1870 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
1871 } 1871 }
1872 if(len == 0) 1872 if(len == 0)
1873 { 1873 {
1874 self->netdev->stats.rx_frame_errors++; 1874 self->netdev->stats.rx_frame_errors++;
1875 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ ); 1875 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
1876 } 1876 }
1877 } 1877 }
1878 else 1878 else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1884 val = inb(iobase+FIR_BSR); 1884 val = inb(iobase+FIR_BSR);
1885 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) 1885 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
1886 { 1886 {
1887 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ ); 1887 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
1888 1888
1889 /* Put this entry back in fifo */ 1889 /* Put this entry back in fifo */
1890 st_fifo->head--; 1890 st_fifo->head--;
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index b5cbd39d0685..a3d696a9456a 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -546,7 +546,6 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
546 546
547 dev_kfree_skb(skb); 547 dev_kfree_skb(skb);
548 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1); 548 aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
549 dev->trans_start = jiffies;
550 return NETDEV_TX_OK; 549 return NETDEV_TX_OK;
551} 550}
552 551
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b7e6625ca75e..48bd5ec9f29b 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -1002,8 +1002,6 @@ toshoboe_hard_xmit (struct sk_buff *skb, struct net_device *dev)
1002 1002
1003 toshoboe_checkstuck (self); 1003 toshoboe_checkstuck (self);
1004 1004
1005 dev->trans_start = jiffies;
1006
1007 /* Check if we need to change the speed */ 1005 /* Check if we need to change the speed */
1008 /* But not now. Wait after transmission if mtt not required */ 1006 /* But not now. Wait after transmission if mtt not required */
1009 speed=irda_get_next_speed(skb); 1007 speed=irda_get_next_speed(skb);
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af16612..4441fa3389c2 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
839 /* Usually precursor to a hot-unplug on OHCI. */ 839 /* Usually precursor to a hot-unplug on OHCI. */
840 default: 840 default:
841 self->netdev->stats.rx_errors++; 841 self->netdev->stats.rx_errors++;
842 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags); 842 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
843 break; 843 break;
844 } 844 }
845 /* If we received an error, we don't want to resubmit the 845 /* If we received an error, we don't want to resubmit the
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index c0e0bb9401d3..5b1036ac38d7 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -434,8 +434,6 @@ static void mcs_unwrap_mir(struct mcs_cb *mcs, __u8 *buf, int len)
434 434
435 mcs->netdev->stats.rx_packets++; 435 mcs->netdev->stats.rx_packets++;
436 mcs->netdev->stats.rx_bytes += new_len; 436 mcs->netdev->stats.rx_bytes += new_len;
437
438 return;
439} 437}
440 438
441/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is 439/* Unwrap received packets at FIR speed. A 32 bit crc_ccitt checksum is
@@ -487,8 +485,6 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
487 485
488 mcs->netdev->stats.rx_packets++; 486 mcs->netdev->stats.rx_packets++;
489 mcs->netdev->stats.rx_bytes += new_len; 487 mcs->netdev->stats.rx_bytes += new_len;
490
491 return;
492} 488}
493 489
494 490
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1a54f6bb68c5..c192c31e4c5c 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -556,7 +556,6 @@ static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
556 } 556 }
557 557
558 dev_kfree_skb(skb); 558 dev_kfree_skb(skb);
559 dev->trans_start = jiffies;
560 return NETDEV_TX_OK; 559 return NETDEV_TX_OK;
561} 560}
562 561
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 1dcdce0631aa..da2705061a60 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -715,8 +715,6 @@ static int sa1100_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
715 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE; 715 Ser2HSCR0 = si->hscr0 | HSCR0_HSSP | HSCR0_TXE;
716 } 716 }
717 717
718 dev->trans_start = jiffies;
719
720 return NETDEV_TX_OK; 718 return NETDEV_TX_OK;
721} 719}
722 720
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 000000000000..9a828b06a57e
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on sh_sir.c
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/*
17 * CAUTION
18 *
19 * This driver is very simple.
20 * So, it doesn't have below support now
21 * - MIR/FIR support
22 * - DMA transfer support
23 * - FIFO mode support
24 */
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <net/irda/wrapper.h>
29#include <net/irda/irda_device.h>
30
31#define DRIVER_NAME "sh_irda"
32
33#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
34#define __IRDARAM_LEN 0x13FF
35#else
36#define __IRDARAM_LEN 0x1039
37#endif
38
39#define IRTMR 0x1F00 /* Transfer mode */
40#define IRCFR 0x1F02 /* Configuration */
41#define IRCTR 0x1F04 /* IR control */
42#define IRTFLR 0x1F20 /* Transmit frame length */
43#define IRTCTR 0x1F22 /* Transmit control */
44#define IRRFLR 0x1F40 /* Receive frame length */
45#define IRRCTR 0x1F42 /* Receive control */
46#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
47#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
48#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
49#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
50#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
51#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
52#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
53#define CRCCTR 0x1F80 /* CRC engine control */
54#define CRCIR 0x1F86 /* CRC engine input data */
55#define CRCCR 0x1F8A /* CRC engine calculation */
56#define CRCOR 0x1F8E /* CRC engine output data */
57#define FIFOCP 0x1FC0 /* FIFO current pointer */
58#define FIFOFP 0x1FC2 /* FIFO follow pointer */
59#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
60#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
61#define FIFOSEL 0x1FC8 /* FIFO select */
62#define FIFORS 0x1FCA /* FIFO receive status */
63#define FIFORFL 0x1FCC /* FIFO receive frame length */
64#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
65#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
66#define BIFCTL 0x1FD2 /* BUS interface control */
67#define IRDARAM 0x0000 /* IrDA buffer RAM */
68#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
69
70/* IRTMR */
71#define TMD_MASK (0x3 << 14) /* Transfer Mode */
72#define TMD_SIR (0x0 << 14)
73#define TMD_MIR (0x3 << 14)
74#define TMD_FIR (0x2 << 14)
75
76#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
77#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
78#define SIM (1 << 0) /* SIR Interrupt Mask */
79#define xIM_MASK (FIFORIM | MIM | SIM)
80
81/* IRCFR */
82#define RTO_SHIFT 8 /* shift for Receive Timeout */
83#define RTO (0x3 << RTO_SHIFT)
84
85/* IRTCTR */
86#define ARMOD (1 << 15) /* Auto-Receive Mode */
87#define TE (1 << 0) /* Transmit Enable */
88
89/* IRRFLR */
90#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
91
92/* IRRCTR */
93#define RE (1 << 0) /* Receive Enable */
94
95/*
96 * SIRISR, SIRIMR, SIRICR,
97 * MFIRISR, MFIRIMR, MFIRICR
98 */
99#define FRE (1 << 15) /* Frame Receive End */
100#define TROV (1 << 11) /* Transfer Area Overflow */
101#define xIR_9 (1 << 9)
102#define TOT xIR_9 /* for SIR Timeout */
103#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
104#define xIR_8 (1 << 8)
105#define FER xIR_8 /* for SIR Framing Error */
106#define CRCER xIR_8 /* for MIR/FIR CRC error */
107#define FTE (1 << 7) /* Frame Transmit End */
108#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
109
110/* SIRBCR */
111#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
112
113/* CRCCTR */
114#define CRC_RST (1 << 15) /* CRC Engine Reset */
115#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
116
117/* CRCIR */
118#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
119
120/************************************************************************
121
122
123 enum / structure
124
125
126************************************************************************/
127enum sh_irda_mode {
128 SH_IRDA_NONE = 0,
129 SH_IRDA_SIR,
130 SH_IRDA_MIR,
131 SH_IRDA_FIR,
132};
133
134struct sh_irda_self;
135struct sh_irda_xir_func {
136 int (*xir_fre) (struct sh_irda_self *self);
137 int (*xir_trov) (struct sh_irda_self *self);
138 int (*xir_9) (struct sh_irda_self *self);
139 int (*xir_8) (struct sh_irda_self *self);
140 int (*xir_fte) (struct sh_irda_self *self);
141};
142
143struct sh_irda_self {
144 void __iomem *membase;
145 unsigned int irq;
146 struct clk *clk;
147
148 struct net_device *ndev;
149
150 struct irlap_cb *irlap;
151 struct qos_info qos;
152
153 iobuff_t tx_buff;
154 iobuff_t rx_buff;
155
156 enum sh_irda_mode mode;
157 spinlock_t lock;
158
159 struct sh_irda_xir_func *xir_func;
160};
161
162/************************************************************************
163
164
165 common function
166
167
168************************************************************************/
169static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
170{
171 unsigned long flags;
172
173 spin_lock_irqsave(&self->lock, flags);
174 iowrite16(data, self->membase + offset);
175 spin_unlock_irqrestore(&self->lock, flags);
176}
177
178static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
179{
180 unsigned long flags;
181 u16 ret;
182
183 spin_lock_irqsave(&self->lock, flags);
184 ret = ioread16(self->membase + offset);
185 spin_unlock_irqrestore(&self->lock, flags);
186
187 return ret;
188}
189
190static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
191 u16 mask, u16 data)
192{
193 unsigned long flags;
194 u16 old, new;
195
196 spin_lock_irqsave(&self->lock, flags);
197 old = ioread16(self->membase + offset);
198 new = (old & ~mask) | data;
199 if (old != new)
200 iowrite16(data, self->membase + offset);
201 spin_unlock_irqrestore(&self->lock, flags);
202}
203
204/************************************************************************
205
206
207 mode function
208
209
210************************************************************************/
211/*=====================================
212 *
213 * common
214 *
215 *=====================================*/
216static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
217{
218 struct device *dev = &self->ndev->dev;
219
220 sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
221 dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
222}
223
224static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
225{
226 struct device *dev = &self->ndev->dev;
227
228 if (SH_IRDA_SIR != self->mode)
229 interval = 0;
230
231 if (interval < 0 || interval > 2) {
232 dev_err(dev, "unsupported timeout interval\n");
233 return -EINVAL;
234 }
235
236 sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
237 return 0;
238}
239
240static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
241{
242 struct device *dev = &self->ndev->dev;
243 u16 val;
244
245 if (baudrate < 0)
246 return 0;
247
248 if (SH_IRDA_SIR != self->mode) {
249 dev_err(dev, "it is not SIR mode\n");
250 return -EINVAL;
251 }
252
253 /*
254 * Baud rate (bits/s) =
255 * (48 MHz / 26) / (baud rate counter value + 1) x 16
256 */
257 val = (48000000 / 26 / 16 / baudrate) - 1;
258 dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
259
260 sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
261
262 return 0;
263}
264
265static int xir_get_rcv_length(struct sh_irda_self *self)
266{
267 return RFL_MASK & sh_irda_read(self, IRRFLR);
268}
269
270/*=====================================
271 *
272 * NONE MODE
273 *
274 *=====================================*/
275static int xir_fre(struct sh_irda_self *self)
276{
277 struct device *dev = &self->ndev->dev;
278 dev_err(dev, "none mode: frame recv\n");
279 return 0;
280}
281
282static int xir_trov(struct sh_irda_self *self)
283{
284 struct device *dev = &self->ndev->dev;
285 dev_err(dev, "none mode: buffer ram over\n");
286 return 0;
287}
288
289static int xir_9(struct sh_irda_self *self)
290{
291 struct device *dev = &self->ndev->dev;
292 dev_err(dev, "none mode: time over\n");
293 return 0;
294}
295
296static int xir_8(struct sh_irda_self *self)
297{
298 struct device *dev = &self->ndev->dev;
299 dev_err(dev, "none mode: framing error\n");
300 return 0;
301}
302
303static int xir_fte(struct sh_irda_self *self)
304{
305 struct device *dev = &self->ndev->dev;
306 dev_err(dev, "none mode: frame transmit end\n");
307 return 0;
308}
309
310static struct sh_irda_xir_func xir_func = {
311 .xir_fre = xir_fre,
312 .xir_trov = xir_trov,
313 .xir_9 = xir_9,
314 .xir_8 = xir_8,
315 .xir_fte = xir_fte,
316};
317
318/*=====================================
319 *
320 * MIR/FIR MODE
321 *
322 * MIR/FIR are not supported now
323 *=====================================*/
324static struct sh_irda_xir_func mfir_func = {
325 .xir_fre = xir_fre,
326 .xir_trov = xir_trov,
327 .xir_9 = xir_9,
328 .xir_8 = xir_8,
329 .xir_fte = xir_fte,
330};
331
332/*=====================================
333 *
334 * SIR MODE
335 *
336 *=====================================*/
337static int sir_fre(struct sh_irda_self *self)
338{
339 struct device *dev = &self->ndev->dev;
340 u16 data16;
341 u8 *data = (u8 *)&data16;
342 int len = xir_get_rcv_length(self);
343 int i, j;
344
345 if (len > IRDARAM_LEN)
346 len = IRDARAM_LEN;
347
348 dev_dbg(dev, "frame recv length = %d\n", len);
349
350 for (i = 0; i < len; i++) {
351 j = i % 2;
352 if (!j)
353 data16 = sh_irda_read(self, IRDARAM + i);
354
355 async_unwrap_char(self->ndev, &self->ndev->stats,
356 &self->rx_buff, data[j]);
357 }
358 self->ndev->last_rx = jiffies;
359
360 sh_irda_rcv_ctrl(self, 1);
361
362 return 0;
363}
364
365static int sir_trov(struct sh_irda_self *self)
366{
367 struct device *dev = &self->ndev->dev;
368
369 dev_err(dev, "buffer ram over\n");
370 sh_irda_rcv_ctrl(self, 1);
371 return 0;
372}
373
374static int sir_tot(struct sh_irda_self *self)
375{
376 struct device *dev = &self->ndev->dev;
377
378 dev_err(dev, "time over\n");
379 sh_irda_set_baudrate(self, 9600);
380 sh_irda_rcv_ctrl(self, 1);
381 return 0;
382}
383
384static int sir_fer(struct sh_irda_self *self)
385{
386 struct device *dev = &self->ndev->dev;
387
388 dev_err(dev, "framing error\n");
389 sh_irda_rcv_ctrl(self, 1);
390 return 0;
391}
392
393static int sir_fte(struct sh_irda_self *self)
394{
395 struct device *dev = &self->ndev->dev;
396
397 dev_dbg(dev, "frame transmit end\n");
398 netif_wake_queue(self->ndev);
399
400 return 0;
401}
402
403static struct sh_irda_xir_func sir_func = {
404 .xir_fre = sir_fre,
405 .xir_trov = sir_trov,
406 .xir_9 = sir_tot,
407 .xir_8 = sir_fer,
408 .xir_fte = sir_fte,
409};
410
411static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
412{
413 struct device *dev = &self->ndev->dev;
414 struct sh_irda_xir_func *func;
415 const char *name;
416 u16 data;
417
418 switch (mode) {
419 case SH_IRDA_SIR:
420 name = "SIR";
421 data = TMD_SIR;
422 func = &sir_func;
423 break;
424 case SH_IRDA_MIR:
425 name = "MIR";
426 data = TMD_MIR;
427 func = &mfir_func;
428 break;
429 case SH_IRDA_FIR:
430 name = "FIR";
431 data = TMD_FIR;
432 func = &mfir_func;
433 break;
434 default:
435 name = "NONE";
436 data = 0;
437 func = &xir_func;
438 break;
439 }
440
441 self->mode = mode;
442 self->xir_func = func;
443 sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
444
445 dev_dbg(dev, "switch to %s mode", name);
446}
447
448/************************************************************************
449
450
451 irq function
452
453
454************************************************************************/
455static void sh_irda_set_irq_mask(struct sh_irda_self *self)
456{
457 u16 tmr_hole;
458 u16 xir_reg;
459
460 /* set all mask */
461 sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
462 sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
463 sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
464
465 /* clear irq */
466 sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
467 sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
468
469 switch (self->mode) {
470 case SH_IRDA_SIR:
471 tmr_hole = SIM;
472 xir_reg = SIRIMR;
473 break;
474 case SH_IRDA_MIR:
475 case SH_IRDA_FIR:
476 tmr_hole = MIM;
477 xir_reg = MFIRIMR;
478 break;
479 default:
480 tmr_hole = 0;
481 xir_reg = 0;
482 break;
483 }
484
485 /* open mask */
486 if (xir_reg) {
487 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
488 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
489 }
490}
491
492static irqreturn_t sh_irda_irq(int irq, void *dev_id)
493{
494 struct sh_irda_self *self = dev_id;
495 struct sh_irda_xir_func *func = self->xir_func;
496 u16 isr = sh_irda_read(self, SIRISR);
497
498 /* clear irq */
499 sh_irda_write(self, SIRICR, isr);
500
501 if (isr & FRE)
502 func->xir_fre(self);
503 if (isr & TROV)
504 func->xir_trov(self);
505 if (isr & xIR_9)
506 func->xir_9(self);
507 if (isr & xIR_8)
508 func->xir_8(self);
509 if (isr & FTE)
510 func->xir_fte(self);
511
512 return IRQ_HANDLED;
513}
514
515/************************************************************************
516
517
518 CRC function
519
520
521************************************************************************/
522static void sh_irda_crc_reset(struct sh_irda_self *self)
523{
524 sh_irda_write(self, CRCCTR, CRC_RST);
525}
526
527static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
528{
529 sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
530}
531
532static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
533{
534 return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
535}
536
537static u16 sh_irda_crc_out(struct sh_irda_self *self)
538{
539 return sh_irda_read(self, CRCOR);
540}
541
542static int sh_irda_crc_init(struct sh_irda_self *self)
543{
544 struct device *dev = &self->ndev->dev;
545 int ret = -EIO;
546 u16 val;
547
548 sh_irda_crc_reset(self);
549
550 sh_irda_crc_add(self, 0xCC);
551 sh_irda_crc_add(self, 0xF5);
552 sh_irda_crc_add(self, 0xF1);
553 sh_irda_crc_add(self, 0xA7);
554
555 val = sh_irda_crc_cnt(self);
556 if (4 != val) {
557 dev_err(dev, "CRC count error %x\n", val);
558 goto crc_init_out;
559 }
560
561 val = sh_irda_crc_out(self);
562 if (0x51DF != val) {
563 dev_err(dev, "CRC result error%x\n", val);
564 goto crc_init_out;
565 }
566
567 ret = 0;
568
569crc_init_out:
570
571 sh_irda_crc_reset(self);
572 return ret;
573}
574
575/************************************************************************
576
577
578 iobuf function
579
580
581************************************************************************/
582static void sh_irda_remove_iobuf(struct sh_irda_self *self)
583{
584 kfree(self->rx_buff.head);
585
586 self->tx_buff.head = NULL;
587 self->tx_buff.data = NULL;
588 self->rx_buff.head = NULL;
589 self->rx_buff.data = NULL;
590}
591
592static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
593{
594 if (self->rx_buff.head ||
595 self->tx_buff.head) {
596 dev_err(&self->ndev->dev, "iobuff has already existed.");
597 return -EINVAL;
598 }
599
600 /* rx_buff */
601 self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
602 if (!self->rx_buff.head)
603 return -ENOMEM;
604
605 self->rx_buff.truesize = rxsize;
606 self->rx_buff.in_frame = FALSE;
607 self->rx_buff.state = OUTSIDE_FRAME;
608 self->rx_buff.data = self->rx_buff.head;
609
610 /* tx_buff */
611 self->tx_buff.head = self->membase + IRDARAM;
612 self->tx_buff.truesize = IRDARAM_LEN;
613
614 return 0;
615}
616
617/************************************************************************
618
619
620 net_device_ops function
621
622
623************************************************************************/
624static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
625{
626 struct sh_irda_self *self = netdev_priv(ndev);
627 struct device *dev = &self->ndev->dev;
628 int speed = irda_get_next_speed(skb);
629 int ret;
630
631 dev_dbg(dev, "hard xmit\n");
632
633 netif_stop_queue(ndev);
634 sh_irda_rcv_ctrl(self, 0);
635
636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0)
638 return ret;
639
640 self->tx_buff.len = 0;
641 if (skb->len) {
642 unsigned long flags;
643
644 spin_lock_irqsave(&self->lock, flags);
645 self->tx_buff.len = async_wrap_skb(skb,
646 self->tx_buff.head,
647 self->tx_buff.truesize);
648 spin_unlock_irqrestore(&self->lock, flags);
649
650 if (self->tx_buff.len > self->tx_buff.truesize)
651 self->tx_buff.len = self->tx_buff.truesize;
652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 }
656
657 dev_kfree_skb(skb);
658
659 return 0;
660}
661
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
663{
664 /*
665 * FIXME
666 *
667 * This function is needed for irda framework.
668 * But nothing to do now
669 */
670 return 0;
671}
672
673static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
674{
675 struct sh_irda_self *self = netdev_priv(ndev);
676
677 return &self->ndev->stats;
678}
679
680static int sh_irda_open(struct net_device *ndev)
681{
682 struct sh_irda_self *self = netdev_priv(ndev);
683 int err;
684
685 clk_enable(self->clk);
686 err = sh_irda_crc_init(self);
687 if (err)
688 goto open_err;
689
690 sh_irda_set_mode(self, SH_IRDA_SIR);
691 sh_irda_set_timeout(self, 2);
692 sh_irda_set_baudrate(self, 9600);
693
694 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
695 if (!self->irlap) {
696 err = -ENODEV;
697 goto open_err;
698 }
699
700 netif_start_queue(ndev);
701 sh_irda_rcv_ctrl(self, 1);
702 sh_irda_set_irq_mask(self);
703
704 dev_info(&ndev->dev, "opened\n");
705
706 return 0;
707
708open_err:
709 clk_disable(self->clk);
710
711 return err;
712}
713
714static int sh_irda_stop(struct net_device *ndev)
715{
716 struct sh_irda_self *self = netdev_priv(ndev);
717
718 /* Stop IrLAP */
719 if (self->irlap) {
720 irlap_close(self->irlap);
721 self->irlap = NULL;
722 }
723
724 netif_stop_queue(ndev);
725
726 dev_info(&ndev->dev, "stoped\n");
727
728 return 0;
729}
730
731static const struct net_device_ops sh_irda_ndo = {
732 .ndo_open = sh_irda_open,
733 .ndo_stop = sh_irda_stop,
734 .ndo_start_xmit = sh_irda_hard_xmit,
735 .ndo_do_ioctl = sh_irda_ioctl,
736 .ndo_get_stats = sh_irda_stats,
737};
738
739/************************************************************************
740
741
742 platform_driver function
743
744
745************************************************************************/
746static int __devinit sh_irda_probe(struct platform_device *pdev)
747{
748 struct net_device *ndev;
749 struct sh_irda_self *self;
750 struct resource *res;
751 char clk_name[8];
752 unsigned int irq;
753 int err = -ENOMEM;
754
755 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
756 irq = platform_get_irq(pdev, 0);
757 if (!res || irq < 0) {
758 dev_err(&pdev->dev, "Not enough platform resources.\n");
759 goto exit;
760 }
761
762 ndev = alloc_irdadev(sizeof(*self));
763 if (!ndev)
764 goto exit;
765
766 self = netdev_priv(ndev);
767 self->membase = ioremap_nocache(res->start, resource_size(res));
768 if (!self->membase) {
769 err = -ENXIO;
770 dev_err(&pdev->dev, "Unable to ioremap.\n");
771 goto err_mem_1;
772 }
773
774 err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
775 if (err)
776 goto err_mem_2;
777
778 snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
779 self->clk = clk_get(&pdev->dev, clk_name);
780 if (IS_ERR(self->clk)) {
781 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
782 goto err_mem_3;
783 }
784
785 irda_init_max_qos_capabilies(&self->qos);
786
787 ndev->netdev_ops = &sh_irda_ndo;
788 ndev->irq = irq;
789
790 self->ndev = ndev;
791 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
792 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
793 spin_lock_init(&self->lock);
794
795 irda_qos_bits_to_value(&self->qos);
796
797 err = register_netdev(ndev);
798 if (err)
799 goto err_mem_4;
800
801 platform_set_drvdata(pdev, ndev);
802
803 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
804 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
805 goto err_mem_4;
806 }
807
808 dev_info(&pdev->dev, "SuperH IrDA probed\n");
809
810 goto exit;
811
812err_mem_4:
813 clk_put(self->clk);
814err_mem_3:
815 sh_irda_remove_iobuf(self);
816err_mem_2:
817 iounmap(self->membase);
818err_mem_1:
819 free_netdev(ndev);
820exit:
821 return err;
822}
823
824static int __devexit sh_irda_remove(struct platform_device *pdev)
825{
826 struct net_device *ndev = platform_get_drvdata(pdev);
827 struct sh_irda_self *self = netdev_priv(ndev);
828
829 if (!self)
830 return 0;
831
832 unregister_netdev(ndev);
833 clk_put(self->clk);
834 sh_irda_remove_iobuf(self);
835 iounmap(self->membase);
836 free_netdev(ndev);
837 platform_set_drvdata(pdev, NULL);
838
839 return 0;
840}
841
842static struct platform_driver sh_irda_driver = {
843 .probe = sh_irda_probe,
844 .remove = __devexit_p(sh_irda_remove),
845 .driver = {
846 .name = DRIVER_NAME,
847 },
848};
849
850static int __init sh_irda_init(void)
851{
852 return platform_driver_register(&sh_irda_driver);
853}
854
855static void __exit sh_irda_exit(void)
856{
857 platform_driver_unregister(&sh_irda_driver);
858}
859
860module_init(sh_irda_init);
861module_exit(sh_irda_exit);
862
863MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
864MODULE_DESCRIPTION("SuperH IrDA driver");
865MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581c4b5e..5c5f99d50341 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
646 sh_sir_set_baudrate(self, 9600); 646 sh_sir_set_baudrate(self, 9600);
647 647
648 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); 648 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
649 if (!self->irlap) 649 if (!self->irlap) {
650 err = -ENODEV;
650 goto open_err; 651 goto open_err;
652 }
651 653
652 /* 654 /*
653 * Now enable the interrupt then start the queue 655 * Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
707 struct sh_sir_self *self; 709 struct sh_sir_self *self;
708 struct resource *res; 710 struct resource *res;
709 char clk_name[8]; 711 char clk_name[8];
710 void __iomem *base;
711 unsigned int irq; 712 unsigned int irq;
712 int err = -ENOMEM; 713 int err = -ENOMEM;
713 714
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
722 if (!ndev) 723 if (!ndev)
723 goto exit; 724 goto exit;
724 725
725 base = ioremap_nocache(res->start, resource_size(res)); 726 self = netdev_priv(ndev);
726 if (!base) { 727 self->membase = ioremap_nocache(res->start, resource_size(res));
728 if (!self->membase) {
727 err = -ENXIO; 729 err = -ENXIO;
728 dev_err(&pdev->dev, "Unable to ioremap.\n"); 730 dev_err(&pdev->dev, "Unable to ioremap.\n");
729 goto err_mem_1; 731 goto err_mem_1;
730 } 732 }
731 733
732 self = netdev_priv(ndev);
733 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); 734 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
734 if (err) 735 if (err)
735 goto err_mem_2; 736 goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
746 ndev->netdev_ops = &sh_sir_ndo; 747 ndev->netdev_ops = &sh_sir_ndo;
747 ndev->irq = irq; 748 ndev->irq = irq;
748 749
749 self->membase = base;
750 self->ndev = ndev; 750 self->ndev = ndev;
751 self->qos.baud_rate.bits &= IR_9600; /* FIXME */ 751 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
752 self->qos.min_turn_time.bits = 1; /* 10 ms or more */ 752 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/sir_dev.c b/drivers/net/irda/sir_dev.c
index de91cd14016b..1b051dab7b29 100644
--- a/drivers/net/irda/sir_dev.c
+++ b/drivers/net/irda/sir_dev.c
@@ -655,7 +655,6 @@ static netdev_tx_t sirdev_hard_xmit(struct sk_buff *skb,
655 655
656 if (likely(actual > 0)) { 656 if (likely(actual > 0)) {
657 dev->tx_skb = skb; 657 dev->tx_skb = skb;
658 ndev->trans_start = jiffies;
659 dev->tx_buff.data += actual; 658 dev->tx_buff.data += actual;
660 dev->tx_buff.len -= actual; 659 dev->tx_buff.len -= actual;
661 } 660 }
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 6af84d88cd03..d67e48418e55 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -868,7 +868,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
868 spin_lock_irqsave(&self->lock, flags); 868 spin_lock_irqsave(&self->lock, flags);
869 smsc_ircc_sir_start(self); 869 smsc_ircc_sir_start(self);
870 smsc_ircc_change_speed(self, self->io.speed); 870 smsc_ircc_change_speed(self, self->io.speed);
871 dev->trans_start = jiffies; 871 dev->trans_start = jiffies; /* prevent tx timeout */
872 netif_wake_queue(dev); 872 netif_wake_queue(dev);
873 spin_unlock_irqrestore(&self->lock, flags); 873 spin_unlock_irqrestore(&self->lock, flags);
874} 874}
@@ -2822,7 +2822,6 @@ static void __init preconfigure_ali_port(struct pci_dev *dev,
2822 tmpbyte |= mask; 2822 tmpbyte |= mask;
2823 pci_write_config_byte(dev, reg, tmpbyte); 2823 pci_write_config_byte(dev, reg, tmpbyte);
2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port); 2824 IRDA_MESSAGE("Activated ALi 1533 ISA bridge port 0x%04x.\n", port);
2825 return;
2826} 2825}
2827 2826
2828static int __init preconfigure_through_ali(struct pci_dev *dev, 2827static int __init preconfigure_through_ali(struct pci_dev *dev,
diff --git a/drivers/net/irda/via-ircc.h b/drivers/net/irda/via-ircc.h
index d9d1db03fa2d..5a84822b5a43 100644
--- a/drivers/net/irda/via-ircc.h
+++ b/drivers/net/irda/via-ircc.h
@@ -774,7 +774,7 @@ static void SetBaudRate(__u16 iobase, __u32 rate)
774 break; 774 break;
775 default: 775 default:
776 break; 776 break;
777 }; 777 }
778 } else if (IsMIROn(iobase)) { 778 } else if (IsMIROn(iobase)) {
779 value = 0; // will automatically be fixed in 1.152M 779 value = 0; // will automatically be fixed in 1.152M
780 } else if (IsFIROn(iobase)) { 780 } else if (IsFIROn(iobase)) {
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bcfaced..c3d07382b7fa 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1037,7 +1037,6 @@ static netdev_tx_t vlsi_hard_start_xmit(struct sk_buff *skb,
1037 wmb(); 1037 wmb();
1038 outw(0, iobase+VLSI_PIO_PROMPT); 1038 outw(0, iobase+VLSI_PIO_PROMPT);
1039 } 1039 }
1040 ndev->trans_start = jiffies;
1041 1040
1042 if (ring_put(r) == NULL) { 1041 if (ring_put(r) == NULL) {
1043 netif_stop_queue(ndev); 1042 netif_stop_queue(ndev);
@@ -1742,7 +1741,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1742 vlsi_irda_dev_t *idev; 1741 vlsi_irda_dev_t *idev;
1743 1742
1744 if (!ndev) { 1743 if (!ndev) {
1745 IRDA_ERROR("%s - %s: no netdevice \n", 1744 IRDA_ERROR("%s - %s: no netdevice\n",
1746 __func__, pci_name(pdev)); 1745 __func__, pci_name(pdev));
1747 return 0; 1746 return 0;
1748 } 1747 }
@@ -1781,7 +1780,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1781 vlsi_irda_dev_t *idev; 1780 vlsi_irda_dev_t *idev;
1782 1781
1783 if (!ndev) { 1782 if (!ndev) {
1784 IRDA_ERROR("%s - %s: no netdevice \n", 1783 IRDA_ERROR("%s - %s: no netdevice\n",
1785 __func__, pci_name(pdev)); 1784 __func__, pci_name(pdev));
1786 return 0; 1785 return 0;
1787 } 1786 }
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index cb0cb758be64..1f9c3f08d1a3 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -515,7 +515,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
515 /* Check for empty frame */ 515 /* Check for empty frame */
516 if (!skb->len) { 516 if (!skb->len) {
517 w83977af_change_speed(self, speed); 517 w83977af_change_speed(self, speed);
518 dev->trans_start = jiffies;
519 dev_kfree_skb(skb); 518 dev_kfree_skb(skb);
520 return NETDEV_TX_OK; 519 return NETDEV_TX_OK;
521 } else 520 } else
@@ -549,7 +548,6 @@ static netdev_tx_t w83977af_hard_xmit(struct sk_buff *skb,
549 switch_bank(iobase, SET0); 548 switch_bank(iobase, SET0);
550 outb(ICR_ETXTHI, iobase+ICR); 549 outb(ICR_ETXTHI, iobase+ICR);
551 } 550 }
552 dev->trans_start = jiffies;
553 dev_kfree_skb(skb); 551 dev_kfree_skb(skb);
554 552
555 /* Restore set register */ 553 /* Restore set register */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c89691..ba1de5973fb2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
962 (netdev_mc_count(dev) > VETH_MAX_MCAST)) { 962 (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
963 port->promiscuous = 1; 963 port->promiscuous = 1;
964 } else { 964 } else {
965 struct dev_mc_list *dmi; 965 struct netdev_hw_addr *ha;
966 966
967 port->promiscuous = 0; 967 port->promiscuous = 0;
968 968
969 /* Update table */ 969 /* Update table */
970 port->num_mcast = 0; 970 port->num_mcast = 0;
971 971
972 netdev_for_each_mc_addr(dmi, dev) { 972 netdev_for_each_mc_addr(ha, dev) {
973 u8 *addr = dmi->dmi_addr; 973 u8 *addr = ha->addr;
974 u64 xaddr = 0; 974 u64 xaddr = 0;
975 975
976 if (addr[0] & 0x01) {/* multicast address? */ 976 if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 92d2e71d0c8b..521c0c732998 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -78,9 +78,13 @@ struct ixgb_adapter;
78#define PFX "ixgb: " 78#define PFX "ixgb: "
79 79
80#ifdef _DEBUG_DRIVER_ 80#ifdef _DEBUG_DRIVER_
81#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args) 81#define IXGB_DBG(fmt, args...) printk(KERN_DEBUG PFX fmt, ##args)
82#else 82#else
83#define IXGB_DBG(args...) 83#define IXGB_DBG(fmt, args...) \
84do { \
85 if (0) \
86 printk(KERN_DEBUG PFX fmt, ##args); \
87} while (0)
84#endif 88#endif
85 89
86/* TX/RX descriptor defines */ 90/* TX/RX descriptor defines */
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 89ffa7264a12..813993f9c65c 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb_hw.h" 31#include "ixgb_hw.h"
30#include "ixgb_ee.h" 32#include "ixgb_ee.h"
31/* Local prototypes */ 33/* Local prototypes */
@@ -56,7 +58,6 @@ ixgb_raise_clock(struct ixgb_hw *hw,
56 *eecd_reg = *eecd_reg | IXGB_EECD_SK; 58 *eecd_reg = *eecd_reg | IXGB_EECD_SK;
57 IXGB_WRITE_REG(hw, EECD, *eecd_reg); 59 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
58 udelay(50); 60 udelay(50);
59 return;
60} 61}
61 62
62/****************************************************************************** 63/******************************************************************************
@@ -75,7 +76,6 @@ ixgb_lower_clock(struct ixgb_hw *hw,
75 *eecd_reg = *eecd_reg & ~IXGB_EECD_SK; 76 *eecd_reg = *eecd_reg & ~IXGB_EECD_SK;
76 IXGB_WRITE_REG(hw, EECD, *eecd_reg); 77 IXGB_WRITE_REG(hw, EECD, *eecd_reg);
77 udelay(50); 78 udelay(50);
78 return;
79} 79}
80 80
81/****************************************************************************** 81/******************************************************************************
@@ -125,7 +125,6 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
125 /* We leave the "DI" bit set to "0" when we leave this routine. */ 125 /* We leave the "DI" bit set to "0" when we leave this routine. */
126 eecd_reg &= ~IXGB_EECD_DI; 126 eecd_reg &= ~IXGB_EECD_DI;
127 IXGB_WRITE_REG(hw, EECD, eecd_reg); 127 IXGB_WRITE_REG(hw, EECD, eecd_reg);
128 return;
129} 128}
130 129
131/****************************************************************************** 130/******************************************************************************
@@ -190,7 +189,6 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
190 /* Set CS */ 189 /* Set CS */
191 eecd_reg |= IXGB_EECD_CS; 190 eecd_reg |= IXGB_EECD_CS;
192 IXGB_WRITE_REG(hw, EECD, eecd_reg); 191 IXGB_WRITE_REG(hw, EECD, eecd_reg);
193 return;
194} 192}
195 193
196/****************************************************************************** 194/******************************************************************************
@@ -224,7 +222,6 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
224 eecd_reg &= ~IXGB_EECD_SK; 222 eecd_reg &= ~IXGB_EECD_SK;
225 IXGB_WRITE_REG(hw, EECD, eecd_reg); 223 IXGB_WRITE_REG(hw, EECD, eecd_reg);
226 udelay(50); 224 udelay(50);
227 return;
228} 225}
229 226
230/****************************************************************************** 227/******************************************************************************
@@ -248,7 +245,6 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
248 eecd_reg &= ~IXGB_EECD_SK; 245 eecd_reg &= ~IXGB_EECD_SK;
249 IXGB_WRITE_REG(hw, EECD, eecd_reg); 246 IXGB_WRITE_REG(hw, EECD, eecd_reg);
250 udelay(50); 247 udelay(50);
251 return;
252} 248}
253 249
254/****************************************************************************** 250/******************************************************************************
@@ -268,7 +264,6 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
268 IXGB_WRITE_REG(hw, EECD, eecd_reg); 264 IXGB_WRITE_REG(hw, EECD, eecd_reg);
269 265
270 ixgb_clock_eeprom(hw); 266 ixgb_clock_eeprom(hw);
271 return;
272} 267}
273 268
274/****************************************************************************** 269/******************************************************************************
@@ -357,7 +352,6 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
357 checksum = (u16) EEPROM_SUM - checksum; 352 checksum = (u16) EEPROM_SUM - checksum;
358 353
359 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum); 354 ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
360 return;
361} 355}
362 356
363/****************************************************************************** 357/******************************************************************************
@@ -412,8 +406,6 @@ ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
412 406
413 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */ 407 /* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
414 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 408 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
415
416 return;
417} 409}
418 410
419/****************************************************************************** 411/******************************************************************************
@@ -467,11 +459,11 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
467 u16 checksum = 0; 459 u16 checksum = 0;
468 struct ixgb_ee_map_type *ee_map; 460 struct ixgb_ee_map_type *ee_map;
469 461
470 DEBUGFUNC("ixgb_get_eeprom_data"); 462 ENTER();
471 463
472 ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 464 ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
473 465
474 DEBUGOUT("ixgb_ee: Reading eeprom data\n"); 466 pr_debug("Reading eeprom data\n");
475 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) { 467 for (i = 0; i < IXGB_EEPROM_SIZE ; i++) {
476 u16 ee_data; 468 u16 ee_data;
477 ee_data = ixgb_read_eeprom(hw, i); 469 ee_data = ixgb_read_eeprom(hw, i);
@@ -480,7 +472,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
480 } 472 }
481 473
482 if (checksum != (u16) EEPROM_SUM) { 474 if (checksum != (u16) EEPROM_SUM) {
483 DEBUGOUT("ixgb_ee: Checksum invalid.\n"); 475 pr_debug("Checksum invalid\n");
484 /* clear the init_ctrl_reg_1 to signify that the cache is 476 /* clear the init_ctrl_reg_1 to signify that the cache is
485 * invalidated */ 477 * invalidated */
486 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR); 478 ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
@@ -489,7 +481,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
489 481
490 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK)) 482 if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
491 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) { 483 != cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
492 DEBUGOUT("ixgb_ee: Signature invalid.\n"); 484 pr_debug("Signature invalid\n");
493 return(false); 485 return(false);
494 } 486 }
495 487
@@ -555,13 +547,13 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
555 int i; 547 int i;
556 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom; 548 struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
557 549
558 DEBUGFUNC("ixgb_get_ee_mac_addr"); 550 ENTER();
559 551
560 if (ixgb_check_and_get_eeprom_data(hw) == true) { 552 if (ixgb_check_and_get_eeprom_data(hw) == true) {
561 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) { 553 for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
562 mac_addr[i] = ee_map->mac_addr[i]; 554 mac_addr[i] = ee_map->mac_addr[i];
563 DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
564 } 555 }
556 pr_debug("eeprom mac address = %pM\n", mac_addr);
565 } 557 }
566} 558}
567 559
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index ff67a84e6802..397acabccab6 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -30,9 +30,13 @@
30 * Shared functions for accessing and configuring the adapter 30 * Shared functions for accessing and configuring the adapter
31 */ 31 */
32 32
33#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34
33#include "ixgb_hw.h" 35#include "ixgb_hw.h"
34#include "ixgb_ids.h" 36#include "ixgb_ids.h"
35 37
38#include <linux/etherdevice.h>
39
36/* Local function prototypes */ 40/* Local function prototypes */
37 41
38static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr); 42static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
@@ -120,13 +124,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
120 u32 ctrl_reg; 124 u32 ctrl_reg;
121 u32 icr_reg; 125 u32 icr_reg;
122 126
123 DEBUGFUNC("ixgb_adapter_stop"); 127 ENTER();
124 128
125 /* If we are stopped or resetting exit gracefully and wait to be 129 /* If we are stopped or resetting exit gracefully and wait to be
126 * started again before accessing the hardware. 130 * started again before accessing the hardware.
127 */ 131 */
128 if (hw->adapter_stopped) { 132 if (hw->adapter_stopped) {
129 DEBUGOUT("Exiting because the adapter is already stopped!!!\n"); 133 pr_debug("Exiting because the adapter is already stopped!!!\n");
130 return false; 134 return false;
131 } 135 }
132 136
@@ -136,7 +140,7 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
136 hw->adapter_stopped = true; 140 hw->adapter_stopped = true;
137 141
138 /* Clear interrupt mask to stop board from generating interrupts */ 142 /* Clear interrupt mask to stop board from generating interrupts */
139 DEBUGOUT("Masking off all interrupts\n"); 143 pr_debug("Masking off all interrupts\n");
140 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF); 144 IXGB_WRITE_REG(hw, IMC, 0xFFFFFFFF);
141 145
142 /* Disable the Transmit and Receive units. Then delay to allow 146 /* Disable the Transmit and Receive units. Then delay to allow
@@ -152,12 +156,12 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
152 * the current PCI configuration. The global reset bit is self- 156 * the current PCI configuration. The global reset bit is self-
153 * clearing, and should clear within a microsecond. 157 * clearing, and should clear within a microsecond.
154 */ 158 */
155 DEBUGOUT("Issuing a global reset to MAC\n"); 159 pr_debug("Issuing a global reset to MAC\n");
156 160
157 ctrl_reg = ixgb_mac_reset(hw); 161 ctrl_reg = ixgb_mac_reset(hw);
158 162
159 /* Clear interrupt mask to stop board from generating interrupts */ 163 /* Clear interrupt mask to stop board from generating interrupts */
160 DEBUGOUT("Masking off all interrupts\n"); 164 pr_debug("Masking off all interrupts\n");
161 IXGB_WRITE_REG(hw, IMC, 0xffffffff); 165 IXGB_WRITE_REG(hw, IMC, 0xffffffff);
162 166
163 /* Clear any pending interrupt events. */ 167 /* Clear any pending interrupt events. */
@@ -183,7 +187,7 @@ ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
183 u16 vendor_name[5]; 187 u16 vendor_name[5];
184 ixgb_xpak_vendor xpak_vendor; 188 ixgb_xpak_vendor xpak_vendor;
185 189
186 DEBUGFUNC("ixgb_identify_xpak_vendor"); 190 ENTER();
187 191
188 /* Read the first few bytes of the vendor string from the XPAK NVR 192 /* Read the first few bytes of the vendor string from the XPAK NVR
189 * registers. These are standard XENPAK/XPAK registers, so all XPAK 193 * registers. These are standard XENPAK/XPAK registers, so all XPAK
@@ -222,12 +226,12 @@ ixgb_identify_phy(struct ixgb_hw *hw)
222 ixgb_phy_type phy_type; 226 ixgb_phy_type phy_type;
223 ixgb_xpak_vendor xpak_vendor; 227 ixgb_xpak_vendor xpak_vendor;
224 228
225 DEBUGFUNC("ixgb_identify_phy"); 229 ENTER();
226 230
227 /* Infer the transceiver/phy type from the device id */ 231 /* Infer the transceiver/phy type from the device id */
228 switch (hw->device_id) { 232 switch (hw->device_id) {
229 case IXGB_DEVICE_ID_82597EX: 233 case IXGB_DEVICE_ID_82597EX:
230 DEBUGOUT("Identified TXN17401 optics\n"); 234 pr_debug("Identified TXN17401 optics\n");
231 phy_type = ixgb_phy_type_txn17401; 235 phy_type = ixgb_phy_type_txn17401;
232 break; 236 break;
233 237
@@ -237,30 +241,30 @@ ixgb_identify_phy(struct ixgb_hw *hw)
237 * type of optics. */ 241 * type of optics. */
238 xpak_vendor = ixgb_identify_xpak_vendor(hw); 242 xpak_vendor = ixgb_identify_xpak_vendor(hw);
239 if (xpak_vendor == ixgb_xpak_vendor_intel) { 243 if (xpak_vendor == ixgb_xpak_vendor_intel) {
240 DEBUGOUT("Identified TXN17201 optics\n"); 244 pr_debug("Identified TXN17201 optics\n");
241 phy_type = ixgb_phy_type_txn17201; 245 phy_type = ixgb_phy_type_txn17201;
242 } else { 246 } else {
243 DEBUGOUT("Identified G6005 optics\n"); 247 pr_debug("Identified G6005 optics\n");
244 phy_type = ixgb_phy_type_g6005; 248 phy_type = ixgb_phy_type_g6005;
245 } 249 }
246 break; 250 break;
247 case IXGB_DEVICE_ID_82597EX_LR: 251 case IXGB_DEVICE_ID_82597EX_LR:
248 DEBUGOUT("Identified G6104 optics\n"); 252 pr_debug("Identified G6104 optics\n");
249 phy_type = ixgb_phy_type_g6104; 253 phy_type = ixgb_phy_type_g6104;
250 break; 254 break;
251 case IXGB_DEVICE_ID_82597EX_CX4: 255 case IXGB_DEVICE_ID_82597EX_CX4:
252 DEBUGOUT("Identified CX4\n"); 256 pr_debug("Identified CX4\n");
253 xpak_vendor = ixgb_identify_xpak_vendor(hw); 257 xpak_vendor = ixgb_identify_xpak_vendor(hw);
254 if (xpak_vendor == ixgb_xpak_vendor_intel) { 258 if (xpak_vendor == ixgb_xpak_vendor_intel) {
255 DEBUGOUT("Identified TXN17201 optics\n"); 259 pr_debug("Identified TXN17201 optics\n");
256 phy_type = ixgb_phy_type_txn17201; 260 phy_type = ixgb_phy_type_txn17201;
257 } else { 261 } else {
258 DEBUGOUT("Identified G6005 optics\n"); 262 pr_debug("Identified G6005 optics\n");
259 phy_type = ixgb_phy_type_g6005; 263 phy_type = ixgb_phy_type_g6005;
260 } 264 }
261 break; 265 break;
262 default: 266 default:
263 DEBUGOUT("Unknown physical layer module\n"); 267 pr_debug("Unknown physical layer module\n");
264 phy_type = ixgb_phy_type_unknown; 268 phy_type = ixgb_phy_type_unknown;
265 break; 269 break;
266 } 270 }
@@ -296,18 +300,18 @@ ixgb_init_hw(struct ixgb_hw *hw)
296 u32 ctrl_reg; 300 u32 ctrl_reg;
297 bool status; 301 bool status;
298 302
299 DEBUGFUNC("ixgb_init_hw"); 303 ENTER();
300 304
301 /* Issue a global reset to the MAC. This will reset the chip's 305 /* Issue a global reset to the MAC. This will reset the chip's
302 * transmit, receive, DMA, and link units. It will not effect 306 * transmit, receive, DMA, and link units. It will not effect
303 * the current PCI configuration. The global reset bit is self- 307 * the current PCI configuration. The global reset bit is self-
304 * clearing, and should clear within a microsecond. 308 * clearing, and should clear within a microsecond.
305 */ 309 */
306 DEBUGOUT("Issuing a global reset to MAC\n"); 310 pr_debug("Issuing a global reset to MAC\n");
307 311
308 ctrl_reg = ixgb_mac_reset(hw); 312 ctrl_reg = ixgb_mac_reset(hw);
309 313
310 DEBUGOUT("Issuing an EE reset to MAC\n"); 314 pr_debug("Issuing an EE reset to MAC\n");
311#ifdef HP_ZX1 315#ifdef HP_ZX1
312 /* Workaround for 82597EX reset errata */ 316 /* Workaround for 82597EX reset errata */
313 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST); 317 IXGB_WRITE_REG_IO(hw, CTRL1, IXGB_CTRL1_EE_RST);
@@ -335,7 +339,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
335 * If it is not valid, we fail hardware init. 339 * If it is not valid, we fail hardware init.
336 */ 340 */
337 if (!mac_addr_valid(hw->curr_mac_addr)) { 341 if (!mac_addr_valid(hw->curr_mac_addr)) {
338 DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n"); 342 pr_debug("MAC address invalid after ixgb_init_rx_addrs\n");
339 return(false); 343 return(false);
340 } 344 }
341 345
@@ -346,7 +350,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
346 ixgb_get_bus_info(hw); 350 ixgb_get_bus_info(hw);
347 351
348 /* Zero out the Multicast HASH table */ 352 /* Zero out the Multicast HASH table */
349 DEBUGOUT("Zeroing the MTA\n"); 353 pr_debug("Zeroing the MTA\n");
350 for (i = 0; i < IXGB_MC_TBL_SIZE; i++) 354 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
351 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 355 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
352 356
@@ -379,7 +383,7 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
379{ 383{
380 u32 i; 384 u32 i;
381 385
382 DEBUGFUNC("ixgb_init_rx_addrs"); 386 ENTER();
383 387
384 /* 388 /*
385 * If the current mac address is valid, assume it is a software override 389 * If the current mac address is valid, assume it is a software override
@@ -391,35 +395,24 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
391 /* Get the MAC address from the eeprom for later reference */ 395 /* Get the MAC address from the eeprom for later reference */
392 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr); 396 ixgb_get_ee_mac_addr(hw, hw->curr_mac_addr);
393 397
394 DEBUGOUT3(" Keeping Permanent MAC Addr =%.2X %.2X %.2X ", 398 pr_debug("Keeping Permanent MAC Addr = %pM\n",
395 hw->curr_mac_addr[0], 399 hw->curr_mac_addr);
396 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
397 DEBUGOUT3("%.2X %.2X %.2X\n",
398 hw->curr_mac_addr[3],
399 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
400 } else { 400 } else {
401 401
402 /* Setup the receive address. */ 402 /* Setup the receive address. */
403 DEBUGOUT("Overriding MAC Address in RAR[0]\n"); 403 pr_debug("Overriding MAC Address in RAR[0]\n");
404 DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", 404 pr_debug("New MAC Addr = %pM\n", hw->curr_mac_addr);
405 hw->curr_mac_addr[0],
406 hw->curr_mac_addr[1], hw->curr_mac_addr[2]);
407 DEBUGOUT3("%.2X %.2X %.2X\n",
408 hw->curr_mac_addr[3],
409 hw->curr_mac_addr[4], hw->curr_mac_addr[5]);
410 405
411 ixgb_rar_set(hw, hw->curr_mac_addr, 0); 406 ixgb_rar_set(hw, hw->curr_mac_addr, 0);
412 } 407 }
413 408
414 /* Zero out the other 15 receive addresses. */ 409 /* Zero out the other 15 receive addresses. */
415 DEBUGOUT("Clearing RAR[1-15]\n"); 410 pr_debug("Clearing RAR[1-15]\n");
416 for (i = 1; i < IXGB_RAR_ENTRIES; i++) { 411 for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
417 /* Write high reg first to disable the AV bit first */ 412 /* Write high reg first to disable the AV bit first */
418 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 413 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
419 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 414 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
420 } 415 }
421
422 return;
423} 416}
424 417
425/****************************************************************************** 418/******************************************************************************
@@ -444,65 +437,50 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
444 u32 hash_value; 437 u32 hash_value;
445 u32 i; 438 u32 i;
446 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */ 439 u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
440 u8 *mca;
447 441
448 DEBUGFUNC("ixgb_mc_addr_list_update"); 442 ENTER();
449 443
450 /* Set the new number of MC addresses that we are being requested to use. */ 444 /* Set the new number of MC addresses that we are being requested to use. */
451 hw->num_mc_addrs = mc_addr_count; 445 hw->num_mc_addrs = mc_addr_count;
452 446
453 /* Clear RAR[1-15] */ 447 /* Clear RAR[1-15] */
454 DEBUGOUT(" Clearing RAR[1-15]\n"); 448 pr_debug("Clearing RAR[1-15]\n");
455 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) { 449 for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
456 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 450 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
457 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 451 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
458 } 452 }
459 453
460 /* Clear the MTA */ 454 /* Clear the MTA */
461 DEBUGOUT(" Clearing MTA\n"); 455 pr_debug("Clearing MTA\n");
462 for (i = 0; i < IXGB_MC_TBL_SIZE; i++) 456 for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
463 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0); 457 IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
464 458
465 /* Add the new addresses */ 459 /* Add the new addresses */
460 mca = mc_addr_list;
466 for (i = 0; i < mc_addr_count; i++) { 461 for (i = 0; i < mc_addr_count; i++) {
467 DEBUGOUT(" Adding the multicast addresses:\n"); 462 pr_debug("Adding the multicast addresses:\n");
468 DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i, 463 pr_debug("MC Addr #%d = %pM\n", i, mca);
469 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
470 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
471 1],
472 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
473 2],
474 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
475 3],
476 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
477 4],
478 mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad) +
479 5]);
480 464
481 /* Place this multicast address in the RAR if there is room, * 465 /* Place this multicast address in the RAR if there is room, *
482 * else put it in the MTA 466 * else put it in the MTA
483 */ 467 */
484 if (rar_used_count < IXGB_RAR_ENTRIES) { 468 if (rar_used_count < IXGB_RAR_ENTRIES) {
485 ixgb_rar_set(hw, 469 ixgb_rar_set(hw, mca, rar_used_count);
486 mc_addr_list + 470 pr_debug("Added a multicast address to RAR[%d]\n", i);
487 (i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
488 rar_used_count);
489 DEBUGOUT1("Added a multicast address to RAR[%d]\n", i);
490 rar_used_count++; 471 rar_used_count++;
491 } else { 472 } else {
492 hash_value = ixgb_hash_mc_addr(hw, 473 hash_value = ixgb_hash_mc_addr(hw, mca);
493 mc_addr_list +
494 (i *
495 (IXGB_ETH_LENGTH_OF_ADDRESS
496 + pad)));
497 474
498 DEBUGOUT1(" Hash value = 0x%03X\n", hash_value); 475 pr_debug("Hash value = 0x%03X\n", hash_value);
499 476
500 ixgb_mta_set(hw, hash_value); 477 ixgb_mta_set(hw, hash_value);
501 } 478 }
479
480 mca += IXGB_ETH_LENGTH_OF_ADDRESS + pad;
502 } 481 }
503 482
504 DEBUGOUT("MC Update Complete\n"); 483 pr_debug("MC Update Complete\n");
505 return;
506} 484}
507 485
508/****************************************************************************** 486/******************************************************************************
@@ -520,7 +498,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
520{ 498{
521 u32 hash_value = 0; 499 u32 hash_value = 0;
522 500
523 DEBUGFUNC("ixgb_hash_mc_addr"); 501 ENTER();
524 502
525 /* The portion of the address that is used for the hash table is 503 /* The portion of the address that is used for the hash table is
526 * determined by the mc_filter_type setting. 504 * determined by the mc_filter_type setting.
@@ -547,7 +525,7 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
547 break; 525 break;
548 default: 526 default:
549 /* Invalid mc_filter_type, what should we do? */ 527 /* Invalid mc_filter_type, what should we do? */
550 DEBUGOUT("MC filter type param set incorrectly\n"); 528 pr_debug("MC filter type param set incorrectly\n");
551 ASSERT(0); 529 ASSERT(0);
552 break; 530 break;
553 } 531 }
@@ -585,8 +563,6 @@ ixgb_mta_set(struct ixgb_hw *hw,
585 mta_reg |= (1 << hash_bit); 563 mta_reg |= (1 << hash_bit);
586 564
587 IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg); 565 IXGB_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta_reg);
588
589 return;
590} 566}
591 567
592/****************************************************************************** 568/******************************************************************************
@@ -603,7 +579,7 @@ ixgb_rar_set(struct ixgb_hw *hw,
603{ 579{
604 u32 rar_low, rar_high; 580 u32 rar_low, rar_high;
605 581
606 DEBUGFUNC("ixgb_rar_set"); 582 ENTER();
607 583
608 /* HW expects these in little endian so we reverse the byte order 584 /* HW expects these in little endian so we reverse the byte order
609 * from network order (big endian) to little endian 585 * from network order (big endian) to little endian
@@ -619,7 +595,6 @@ ixgb_rar_set(struct ixgb_hw *hw,
619 595
620 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); 596 IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
621 IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); 597 IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
622 return;
623} 598}
624 599
625/****************************************************************************** 600/******************************************************************************
@@ -635,7 +610,6 @@ ixgb_write_vfta(struct ixgb_hw *hw,
635 u32 value) 610 u32 value)
636{ 611{
637 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value); 612 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
638 return;
639} 613}
640 614
641/****************************************************************************** 615/******************************************************************************
@@ -650,7 +624,6 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
650 624
651 for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++) 625 for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
652 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0); 626 IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
653 return;
654} 627}
655 628
656/****************************************************************************** 629/******************************************************************************
@@ -666,7 +639,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
666 u32 pap_reg = 0; /* by default, assume no pause time */ 639 u32 pap_reg = 0; /* by default, assume no pause time */
667 bool status = true; 640 bool status = true;
668 641
669 DEBUGFUNC("ixgb_setup_fc"); 642 ENTER();
670 643
671 /* Get the current control reg 0 settings */ 644 /* Get the current control reg 0 settings */
672 ctrl_reg = IXGB_READ_REG(hw, CTRL0); 645 ctrl_reg = IXGB_READ_REG(hw, CTRL0);
@@ -710,7 +683,7 @@ ixgb_setup_fc(struct ixgb_hw *hw)
710 break; 683 break;
711 default: 684 default:
712 /* We should never get here. The value should be 0-3. */ 685 /* We should never get here. The value should be 0-3. */
713 DEBUGOUT("Flow control param set incorrectly\n"); 686 pr_debug("Flow control param set incorrectly\n");
714 ASSERT(0); 687 ASSERT(0);
715 break; 688 break;
716 } 689 }
@@ -940,7 +913,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
940 u32 status_reg; 913 u32 status_reg;
941 u32 xpcss_reg; 914 u32 xpcss_reg;
942 915
943 DEBUGFUNC("ixgb_check_for_link"); 916 ENTER();
944 917
945 xpcss_reg = IXGB_READ_REG(hw, XPCSS); 918 xpcss_reg = IXGB_READ_REG(hw, XPCSS);
946 status_reg = IXGB_READ_REG(hw, STATUS); 919 status_reg = IXGB_READ_REG(hw, STATUS);
@@ -950,7 +923,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
950 hw->link_up = true; 923 hw->link_up = true;
951 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) && 924 } else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
952 (status_reg & IXGB_STATUS_LU)) { 925 (status_reg & IXGB_STATUS_LU)) {
953 DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n"); 926 pr_debug("XPCSS Not Aligned while Status:LU is set\n");
954 hw->link_up = ixgb_link_reset(hw); 927 hw->link_up = ixgb_link_reset(hw);
955 } else { 928 } else {
956 /* 929 /*
@@ -981,8 +954,7 @@ bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
981 newRFC = IXGB_READ_REG(hw, RFC); 954 newRFC = IXGB_READ_REG(hw, RFC);
982 if ((hw->lastLFC + 250 < newLFC) 955 if ((hw->lastLFC + 250 < newLFC)
983 || (hw->lastRFC + 250 < newRFC)) { 956 || (hw->lastRFC + 250 < newRFC)) {
984 DEBUGOUT 957 pr_debug("BAD LINK! too many LFC/RFC since last check\n");
985 ("BAD LINK! too many LFC/RFC since last check\n");
986 bad_link_returncode = true; 958 bad_link_returncode = true;
987 } 959 }
988 hw->lastLFC = newLFC; 960 hw->lastLFC = newLFC;
@@ -1002,11 +974,11 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1002{ 974{
1003 volatile u32 temp_reg; 975 volatile u32 temp_reg;
1004 976
1005 DEBUGFUNC("ixgb_clear_hw_cntrs"); 977 ENTER();
1006 978
1007 /* if we are stopped or resetting exit gracefully */ 979 /* if we are stopped or resetting exit gracefully */
1008 if (hw->adapter_stopped) { 980 if (hw->adapter_stopped) {
1009 DEBUGOUT("Exiting because the adapter is stopped!!!\n"); 981 pr_debug("Exiting because the adapter is stopped!!!\n");
1010 return; 982 return;
1011 } 983 }
1012 984
@@ -1070,7 +1042,6 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
1070 temp_reg = IXGB_READ_REG(hw, XOFFRXC); 1042 temp_reg = IXGB_READ_REG(hw, XOFFRXC);
1071 temp_reg = IXGB_READ_REG(hw, XOFFTXC); 1043 temp_reg = IXGB_READ_REG(hw, XOFFTXC);
1072 temp_reg = IXGB_READ_REG(hw, RJC); 1044 temp_reg = IXGB_READ_REG(hw, RJC);
1073 return;
1074} 1045}
1075 1046
1076/****************************************************************************** 1047/******************************************************************************
@@ -1086,7 +1057,6 @@ ixgb_led_on(struct ixgb_hw *hw)
1086 /* To turn on the LED, clear software-definable pin 0 (SDP0). */ 1057 /* To turn on the LED, clear software-definable pin 0 (SDP0). */
1087 ctrl0_reg &= ~IXGB_CTRL0_SDP0; 1058 ctrl0_reg &= ~IXGB_CTRL0_SDP0;
1088 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); 1059 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1089 return;
1090} 1060}
1091 1061
1092/****************************************************************************** 1062/******************************************************************************
@@ -1102,7 +1072,6 @@ ixgb_led_off(struct ixgb_hw *hw)
1102 /* To turn off the LED, set software-definable pin 0 (SDP0). */ 1072 /* To turn off the LED, set software-definable pin 0 (SDP0). */
1103 ctrl0_reg |= IXGB_CTRL0_SDP0; 1073 ctrl0_reg |= IXGB_CTRL0_SDP0;
1104 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg); 1074 IXGB_WRITE_REG(hw, CTRL0, ctrl0_reg);
1105 return;
1106} 1075}
1107 1076
1108/****************************************************************************** 1077/******************************************************************************
@@ -1142,8 +1111,6 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
1142 1111
1143 hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ? 1112 hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
1144 ixgb_bus_width_64 : ixgb_bus_width_32; 1113 ixgb_bus_width_64 : ixgb_bus_width_32;
1145
1146 return;
1147} 1114}
1148 1115
1149/****************************************************************************** 1116/******************************************************************************
@@ -1156,26 +1123,21 @@ static bool
1156mac_addr_valid(u8 *mac_addr) 1123mac_addr_valid(u8 *mac_addr)
1157{ 1124{
1158 bool is_valid = true; 1125 bool is_valid = true;
1159 DEBUGFUNC("mac_addr_valid"); 1126 ENTER();
1160 1127
1161 /* Make sure it is not a multicast address */ 1128 /* Make sure it is not a multicast address */
1162 if (IS_MULTICAST(mac_addr)) { 1129 if (is_multicast_ether_addr(mac_addr)) {
1163 DEBUGOUT("MAC address is multicast\n"); 1130 pr_debug("MAC address is multicast\n");
1164 is_valid = false; 1131 is_valid = false;
1165 } 1132 }
1166 /* Not a broadcast address */ 1133 /* Not a broadcast address */
1167 else if (IS_BROADCAST(mac_addr)) { 1134 else if (is_broadcast_ether_addr(mac_addr)) {
1168 DEBUGOUT("MAC address is broadcast\n"); 1135 pr_debug("MAC address is broadcast\n");
1169 is_valid = false; 1136 is_valid = false;
1170 } 1137 }
1171 /* Reject the zero address */ 1138 /* Reject the zero address */
1172 else if (mac_addr[0] == 0 && 1139 else if (is_zero_ether_addr(mac_addr)) {
1173 mac_addr[1] == 0 && 1140 pr_debug("MAC address is all zeros\n");
1174 mac_addr[2] == 0 &&
1175 mac_addr[3] == 0 &&
1176 mac_addr[4] == 0 &&
1177 mac_addr[5] == 0) {
1178 DEBUGOUT("MAC address is all zeros\n");
1179 is_valid = false; 1141 is_valid = false;
1180 } 1142 }
1181 return (is_valid); 1143 return (is_valid);
@@ -1235,8 +1197,6 @@ ixgb_optics_reset(struct ixgb_hw *hw)
1235 IXGB_PHY_ADDRESS, 1197 IXGB_PHY_ADDRESS,
1236 MDIO_MMD_PMAPMD); 1198 MDIO_MMD_PMAPMD);
1237 } 1199 }
1238
1239 return;
1240} 1200}
1241 1201
1242/****************************************************************************** 1202/******************************************************************************
@@ -1297,6 +1257,4 @@ ixgb_optics_reset_bcm(struct ixgb_hw *hw)
1297 1257
1298 /* SerDes needs extra delay */ 1258 /* SerDes needs extra delay */
1299 msleep(IXGB_SUN_PHY_RESET_DELAY); 1259 msleep(IXGB_SUN_PHY_RESET_DELAY);
1300
1301 return;
1302} 1260}
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index af6ca3aab5ad..873d32b89fba 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -636,18 +636,6 @@ struct ixgb_flash_buffer {
636 u8 filler3[0xAAAA]; 636 u8 filler3[0xAAAA];
637}; 637};
638 638
639/*
640 * This is a little-endian specific check.
641 */
642#define IS_MULTICAST(Address) \
643 (bool)(((u8 *)(Address))[0] & ((u8)0x01))
644
645/*
646 * Check whether an address is broadcast.
647 */
648#define IS_BROADCAST(Address) \
649 ((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
650
651/* Flow control parameters */ 639/* Flow control parameters */
652struct ixgb_fc { 640struct ixgb_fc {
653 u32 high_water; /* Flow Control High-water */ 641 u32 high_water; /* Flow Control High-water */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65cb98b..c6b75c83100c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb.h" 31#include "ixgb.h"
30 32
31char ixgb_driver_name[] = "ixgb"; 33char ixgb_driver_name[] = "ixgb";
@@ -146,10 +148,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
146static int __init 148static int __init
147ixgb_init_module(void) 149ixgb_init_module(void)
148{ 150{
149 printk(KERN_INFO "%s - version %s\n", 151 pr_info("%s - version %s\n", ixgb_driver_string, ixgb_driver_version);
150 ixgb_driver_string, ixgb_driver_version); 152 pr_info("%s\n", ixgb_copyright);
151
152 printk(KERN_INFO "%s\n", ixgb_copyright);
153 153
154 return pci_register_driver(&ixgb_driver); 154 return pci_register_driver(&ixgb_driver);
155} 155}
@@ -368,17 +368,22 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
368 if (err) 368 if (err)
369 return err; 369 return err;
370 370
371 if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) && 371 pci_using_dac = 0;
372 !(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) { 372 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
373 pci_using_dac = 1; 373 if (!err) {
374 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
375 if (!err)
376 pci_using_dac = 1;
374 } else { 377 } else {
375 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) || 378 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
376 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) { 379 if (err) {
377 printk(KERN_ERR 380 err = dma_set_coherent_mask(&pdev->dev,
378 "ixgb: No usable DMA configuration, aborting\n"); 381 DMA_BIT_MASK(32));
379 goto err_dma_mask; 382 if (err) {
383 pr_err("No usable DMA configuration, aborting\n");
384 goto err_dma_mask;
385 }
380 } 386 }
381 pci_using_dac = 0;
382 } 387 }
383 388
384 err = pci_request_regions(pdev, ixgb_driver_name); 389 err = pci_request_regions(pdev, ixgb_driver_name);
@@ -674,7 +679,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
674 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc); 679 txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
675 txdr->size = ALIGN(txdr->size, 4096); 680 txdr->size = ALIGN(txdr->size, 4096);
676 681
677 txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma); 682 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
683 GFP_KERNEL);
678 if (!txdr->desc) { 684 if (!txdr->desc) {
679 vfree(txdr->buffer_info); 685 vfree(txdr->buffer_info);
680 netif_err(adapter, probe, adapter->netdev, 686 netif_err(adapter, probe, adapter->netdev,
@@ -763,7 +769,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
763 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc); 769 rxdr->size = rxdr->count * sizeof(struct ixgb_rx_desc);
764 rxdr->size = ALIGN(rxdr->size, 4096); 770 rxdr->size = ALIGN(rxdr->size, 4096);
765 771
766 rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma); 772 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
773 GFP_KERNEL);
767 774
768 if (!rxdr->desc) { 775 if (!rxdr->desc) {
769 vfree(rxdr->buffer_info); 776 vfree(rxdr->buffer_info);
@@ -884,8 +891,8 @@ ixgb_free_tx_resources(struct ixgb_adapter *adapter)
884 vfree(adapter->tx_ring.buffer_info); 891 vfree(adapter->tx_ring.buffer_info);
885 adapter->tx_ring.buffer_info = NULL; 892 adapter->tx_ring.buffer_info = NULL;
886 893
887 pci_free_consistent(pdev, adapter->tx_ring.size, 894 dma_free_coherent(&pdev->dev, adapter->tx_ring.size,
888 adapter->tx_ring.desc, adapter->tx_ring.dma); 895 adapter->tx_ring.desc, adapter->tx_ring.dma);
889 896
890 adapter->tx_ring.desc = NULL; 897 adapter->tx_ring.desc = NULL;
891} 898}
@@ -896,12 +903,11 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
896{ 903{
897 if (buffer_info->dma) { 904 if (buffer_info->dma) {
898 if (buffer_info->mapped_as_page) 905 if (buffer_info->mapped_as_page)
899 pci_unmap_page(adapter->pdev, buffer_info->dma, 906 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
900 buffer_info->length, PCI_DMA_TODEVICE); 907 buffer_info->length, DMA_TO_DEVICE);
901 else 908 else
902 pci_unmap_single(adapter->pdev, buffer_info->dma, 909 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
903 buffer_info->length, 910 buffer_info->length, DMA_TO_DEVICE);
904 PCI_DMA_TODEVICE);
905 buffer_info->dma = 0; 911 buffer_info->dma = 0;
906 } 912 }
907 913
@@ -967,7 +973,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
967 vfree(rx_ring->buffer_info); 973 vfree(rx_ring->buffer_info);
968 rx_ring->buffer_info = NULL; 974 rx_ring->buffer_info = NULL;
969 975
970 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 976 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
977 rx_ring->dma);
971 978
972 rx_ring->desc = NULL; 979 rx_ring->desc = NULL;
973} 980}
@@ -991,10 +998,10 @@ ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
991 for (i = 0; i < rx_ring->count; i++) { 998 for (i = 0; i < rx_ring->count; i++) {
992 buffer_info = &rx_ring->buffer_info[i]; 999 buffer_info = &rx_ring->buffer_info[i];
993 if (buffer_info->dma) { 1000 if (buffer_info->dma) {
994 pci_unmap_single(pdev, 1001 dma_unmap_single(&pdev->dev,
995 buffer_info->dma, 1002 buffer_info->dma,
996 buffer_info->length, 1003 buffer_info->length,
997 PCI_DMA_FROMDEVICE); 1004 DMA_FROM_DEVICE);
998 buffer_info->dma = 0; 1005 buffer_info->dma = 0;
999 buffer_info->length = 0; 1006 buffer_info->length = 0;
1000 } 1007 }
@@ -1058,7 +1065,7 @@ ixgb_set_multi(struct net_device *netdev)
1058{ 1065{
1059 struct ixgb_adapter *adapter = netdev_priv(netdev); 1066 struct ixgb_adapter *adapter = netdev_priv(netdev);
1060 struct ixgb_hw *hw = &adapter->hw; 1067 struct ixgb_hw *hw = &adapter->hw;
1061 struct dev_mc_list *mc_ptr; 1068 struct netdev_hw_addr *ha;
1062 u32 rctl; 1069 u32 rctl;
1063 int i; 1070 int i;
1064 1071
@@ -1089,9 +1096,9 @@ ixgb_set_multi(struct net_device *netdev)
1089 IXGB_WRITE_REG(hw, RCTL, rctl); 1096 IXGB_WRITE_REG(hw, RCTL, rctl);
1090 1097
1091 i = 0; 1098 i = 0;
1092 netdev_for_each_mc_addr(mc_ptr, netdev) 1099 netdev_for_each_mc_addr(ha, netdev)
1093 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], 1100 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
1094 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1101 ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1095 1102
1096 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); 1103 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1097 } 1104 }
@@ -1118,15 +1125,14 @@ ixgb_watchdog(unsigned long data)
1118 1125
1119 if (adapter->hw.link_up) { 1126 if (adapter->hw.link_up) {
1120 if (!netif_carrier_ok(netdev)) { 1127 if (!netif_carrier_ok(netdev)) {
1121 printk(KERN_INFO "ixgb: %s NIC Link is Up 10 Gbps " 1128 netdev_info(netdev,
1122 "Full Duplex, Flow Control: %s\n", 1129 "NIC Link is Up 10 Gbps Full Duplex, Flow Control: %s\n",
1123 netdev->name, 1130 (adapter->hw.fc.type == ixgb_fc_full) ?
1124 (adapter->hw.fc.type == ixgb_fc_full) ? 1131 "RX/TX" :
1125 "RX/TX" : 1132 (adapter->hw.fc.type == ixgb_fc_rx_pause) ?
1126 ((adapter->hw.fc.type == ixgb_fc_rx_pause) ? 1133 "RX" :
1127 "RX" : 1134 (adapter->hw.fc.type == ixgb_fc_tx_pause) ?
1128 ((adapter->hw.fc.type == ixgb_fc_tx_pause) ? 1135 "TX" : "None");
1129 "TX" : "None")));
1130 adapter->link_speed = 10000; 1136 adapter->link_speed = 10000;
1131 adapter->link_duplex = FULL_DUPLEX; 1137 adapter->link_duplex = FULL_DUPLEX;
1132 netif_carrier_on(netdev); 1138 netif_carrier_on(netdev);
@@ -1135,8 +1141,7 @@ ixgb_watchdog(unsigned long data)
1135 if (netif_carrier_ok(netdev)) { 1141 if (netif_carrier_ok(netdev)) {
1136 adapter->link_speed = 0; 1142 adapter->link_speed = 0;
1137 adapter->link_duplex = 0; 1143 adapter->link_duplex = 0;
1138 printk(KERN_INFO "ixgb: %s NIC Link is Down\n", 1144 netdev_info(netdev, "NIC Link is Down\n");
1139 netdev->name);
1140 netif_carrier_off(netdev); 1145 netif_carrier_off(netdev);
1141 } 1146 }
1142 } 1147 }
@@ -1303,9 +1308,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1303 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1304 buffer_info->time_stamp = jiffies; 1309 buffer_info->time_stamp = jiffies;
1305 buffer_info->mapped_as_page = false; 1310 buffer_info->mapped_as_page = false;
1306 buffer_info->dma = pci_map_single(pdev, skb->data + offset, 1311 buffer_info->dma = dma_map_single(&pdev->dev,
1307 size, PCI_DMA_TODEVICE); 1312 skb->data + offset,
1308 if (pci_dma_mapping_error(pdev, buffer_info->dma)) 1313 size, DMA_TO_DEVICE);
1314 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1309 goto dma_error; 1315 goto dma_error;
1310 buffer_info->next_to_watch = 0; 1316 buffer_info->next_to_watch = 0;
1311 1317
@@ -1344,10 +1350,9 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1344 buffer_info->time_stamp = jiffies; 1350 buffer_info->time_stamp = jiffies;
1345 buffer_info->mapped_as_page = true; 1351 buffer_info->mapped_as_page = true;
1346 buffer_info->dma = 1352 buffer_info->dma =
1347 pci_map_page(pdev, frag->page, 1353 dma_map_page(&pdev->dev, frag->page,
1348 offset, size, 1354 offset, size, DMA_TO_DEVICE);
1349 PCI_DMA_TODEVICE); 1355 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
1350 if (pci_dma_mapping_error(pdev, buffer_info->dma))
1351 goto dma_error; 1356 goto dma_error;
1352 buffer_info->next_to_watch = 0; 1357 buffer_info->next_to_watch = 0;
1353 1358
@@ -1916,6 +1921,31 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
1916 } 1921 }
1917} 1922}
1918 1923
1924/*
1925 * this should improve performance for small packets with large amounts
1926 * of reassembly being done in the stack
1927 */
1928static void ixgb_check_copybreak(struct net_device *netdev,
1929 struct ixgb_buffer *buffer_info,
1930 u32 length, struct sk_buff **skb)
1931{
1932 struct sk_buff *new_skb;
1933
1934 if (length > copybreak)
1935 return;
1936
1937 new_skb = netdev_alloc_skb_ip_align(netdev, length);
1938 if (!new_skb)
1939 return;
1940
1941 skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
1942 (*skb)->data - NET_IP_ALIGN,
1943 length + NET_IP_ALIGN);
1944 /* save the skb in buffer_info as good */
1945 buffer_info->skb = *skb;
1946 *skb = new_skb;
1947}
1948
1919/** 1949/**
1920 * ixgb_clean_rx_irq - Send received data up the network stack, 1950 * ixgb_clean_rx_irq - Send received data up the network stack,
1921 * @adapter: board private structure 1951 * @adapter: board private structure
@@ -1952,11 +1982,14 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1952 1982
1953 prefetch(skb->data - NET_IP_ALIGN); 1983 prefetch(skb->data - NET_IP_ALIGN);
1954 1984
1955 if (++i == rx_ring->count) i = 0; 1985 if (++i == rx_ring->count)
1986 i = 0;
1956 next_rxd = IXGB_RX_DESC(*rx_ring, i); 1987 next_rxd = IXGB_RX_DESC(*rx_ring, i);
1957 prefetch(next_rxd); 1988 prefetch(next_rxd);
1958 1989
1959 if ((j = i + 1) == rx_ring->count) j = 0; 1990 j = i + 1;
1991 if (j == rx_ring->count)
1992 j = 0;
1960 next2_buffer = &rx_ring->buffer_info[j]; 1993 next2_buffer = &rx_ring->buffer_info[j];
1961 prefetch(next2_buffer); 1994 prefetch(next2_buffer);
1962 1995
@@ -1965,10 +1998,10 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1965 cleaned = true; 1998 cleaned = true;
1966 cleaned_count++; 1999 cleaned_count++;
1967 2000
1968 pci_unmap_single(pdev, 2001 dma_unmap_single(&pdev->dev,
1969 buffer_info->dma, 2002 buffer_info->dma,
1970 buffer_info->length, 2003 buffer_info->length,
1971 PCI_DMA_FROMDEVICE); 2004 DMA_FROM_DEVICE);
1972 buffer_info->dma = 0; 2005 buffer_info->dma = 0;
1973 2006
1974 length = le16_to_cpu(rx_desc->length); 2007 length = le16_to_cpu(rx_desc->length);
@@ -1992,25 +2025,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1992 goto rxdesc_done; 2025 goto rxdesc_done;
1993 } 2026 }
1994 2027
1995 /* code added for copybreak, this should improve 2028 ixgb_check_copybreak(netdev, buffer_info, length, &skb);
1996 * performance for small packets with large amounts
1997 * of reassembly being done in the stack */
1998 if (length < copybreak) {
1999 struct sk_buff *new_skb =
2000 netdev_alloc_skb_ip_align(netdev, length);
2001 if (new_skb) {
2002 skb_copy_to_linear_data_offset(new_skb,
2003 -NET_IP_ALIGN,
2004 (skb->data -
2005 NET_IP_ALIGN),
2006 (length +
2007 NET_IP_ALIGN));
2008 /* save the skb in buffer_info as good */
2009 buffer_info->skb = skb;
2010 skb = new_skb;
2011 }
2012 }
2013 /* end copybreak code */
2014 2029
2015 /* Good Receive */ 2030 /* Good Receive */
2016 skb_put(skb, length); 2031 skb_put(skb, length);
@@ -2091,10 +2106,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2091 buffer_info->skb = skb; 2106 buffer_info->skb = skb;
2092 buffer_info->length = adapter->rx_buffer_len; 2107 buffer_info->length = adapter->rx_buffer_len;
2093map_skb: 2108map_skb:
2094 buffer_info->dma = pci_map_single(pdev, 2109 buffer_info->dma = dma_map_single(&pdev->dev,
2095 skb->data, 2110 skb->data,
2096 adapter->rx_buffer_len, 2111 adapter->rx_buffer_len,
2097 PCI_DMA_FROMDEVICE); 2112 DMA_FROM_DEVICE);
2098 2113
2099 rx_desc = IXGB_RX_DESC(*rx_ring, i); 2114 rx_desc = IXGB_RX_DESC(*rx_ring, i);
2100 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma); 2115 rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2322,7 +2337,7 @@ static void ixgb_io_resume(struct pci_dev *pdev)
2322 2337
2323 if (netif_running(netdev)) { 2338 if (netif_running(netdev)) {
2324 if (ixgb_up(adapter)) { 2339 if (ixgb_up(adapter)) {
2325 printk ("ixgb: can't bring device back up after reset\n"); 2340 pr_err("can't bring device back up after reset\n");
2326 return; 2341 return;
2327 } 2342 }
2328 } 2343 }
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index 371a6be4d965..e361185920ef 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -41,20 +41,8 @@
41 41
42#undef ASSERT 42#undef ASSERT
43#define ASSERT(x) BUG_ON(!(x)) 43#define ASSERT(x) BUG_ON(!(x))
44#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B) 44
45 45#define ENTER() pr_debug("%s\n", __func__);
46#ifdef DBG
47#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
48#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)
49#else
50#define DEBUGOUT(S)
51#define DEBUGOUT1(S, A...)
52#endif
53
54#define DEBUGFUNC(F) DEBUGOUT(F)
55#define DEBUGOUT2 DEBUGOUT1
56#define DEBUGOUT3 DEBUGOUT2
57#define DEBUGOUT7 DEBUGOUT3
58 46
59#define IXGB_WRITE_REG(a, reg, value) ( \ 47#define IXGB_WRITE_REG(a, reg, value) ( \
60 writel((value), ((a)->hw_addr + IXGB_##reg))) 48 writel((value), ((a)->hw_addr + IXGB_##reg)))
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index af35e1ddadd6..88a08f056241 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ixgb.h" 31#include "ixgb.h"
30 32
31/* This is the only thing that needs to be changed to adjust the 33/* This is the only thing that needs to be changed to adjust the
@@ -209,16 +211,16 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
209 case enable_option: 211 case enable_option:
210 switch (*value) { 212 switch (*value) {
211 case OPTION_ENABLED: 213 case OPTION_ENABLED:
212 printk(KERN_INFO "%s Enabled\n", opt->name); 214 pr_info("%s Enabled\n", opt->name);
213 return 0; 215 return 0;
214 case OPTION_DISABLED: 216 case OPTION_DISABLED:
215 printk(KERN_INFO "%s Disabled\n", opt->name); 217 pr_info("%s Disabled\n", opt->name);
216 return 0; 218 return 0;
217 } 219 }
218 break; 220 break;
219 case range_option: 221 case range_option:
220 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { 222 if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
221 printk(KERN_INFO "%s set to %i\n", opt->name, *value); 223 pr_info("%s set to %i\n", opt->name, *value);
222 return 0; 224 return 0;
223 } 225 }
224 break; 226 break;
@@ -230,7 +232,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
230 ent = &opt->arg.l.p[i]; 232 ent = &opt->arg.l.p[i];
231 if (*value == ent->i) { 233 if (*value == ent->i) {
232 if (ent->str[0] != '\0') 234 if (ent->str[0] != '\0')
233 printk(KERN_INFO "%s\n", ent->str); 235 pr_info("%s\n", ent->str);
234 return 0; 236 return 0;
235 } 237 }
236 } 238 }
@@ -240,8 +242,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
240 BUG(); 242 BUG();
241 } 243 }
242 244
243 printk(KERN_INFO "Invalid %s specified (%i) %s\n", 245 pr_info("Invalid %s specified (%i) %s\n", opt->name, *value, opt->err);
244 opt->name, *value, opt->err);
245 *value = opt->def; 246 *value = opt->def;
246 return -1; 247 return -1;
247} 248}
@@ -261,9 +262,8 @@ ixgb_check_options(struct ixgb_adapter *adapter)
261{ 262{
262 int bd = adapter->bd_number; 263 int bd = adapter->bd_number;
263 if (bd >= IXGB_MAX_NIC) { 264 if (bd >= IXGB_MAX_NIC) {
264 printk(KERN_NOTICE 265 pr_notice("Warning: no configuration for board #%i\n", bd);
265 "Warning: no configuration for board #%i\n", bd); 266 pr_notice("Using defaults for all values\n");
266 printk(KERN_NOTICE "Using defaults for all values\n");
267 } 267 }
268 268
269 { /* Transmit Descriptor Count */ 269 { /* Transmit Descriptor Count */
@@ -363,8 +363,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
363 adapter->hw.fc.high_water = opt.def; 363 adapter->hw.fc.high_water = opt.def;
364 } 364 }
365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 365 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
366 printk(KERN_INFO 366 pr_info("Ignoring RxFCHighThresh when no RxFC\n");
367 "Ignoring RxFCHighThresh when no RxFC\n");
368 } 367 }
369 { /* Receive Flow Control Low Threshold */ 368 { /* Receive Flow Control Low Threshold */
370 const struct ixgb_option opt = { 369 const struct ixgb_option opt = {
@@ -383,8 +382,7 @@ ixgb_check_options(struct ixgb_adapter *adapter)
383 adapter->hw.fc.low_water = opt.def; 382 adapter->hw.fc.low_water = opt.def;
384 } 383 }
385 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 384 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
386 printk(KERN_INFO 385 pr_info("Ignoring RxFCLowThresh when no RxFC\n");
387 "Ignoring RxFCLowThresh when no RxFC\n");
388 } 386 }
389 { /* Flow Control Pause Time Request*/ 387 { /* Flow Control Pause Time Request*/
390 const struct ixgb_option opt = { 388 const struct ixgb_option opt = {
@@ -404,17 +402,14 @@ ixgb_check_options(struct ixgb_adapter *adapter)
404 adapter->hw.fc.pause_time = opt.def; 402 adapter->hw.fc.pause_time = opt.def;
405 } 403 }
406 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) ) 404 if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
407 printk(KERN_INFO 405 pr_info("Ignoring FCReqTimeout when no RxFC\n");
408 "Ignoring FCReqTimeout when no RxFC\n");
409 } 406 }
410 /* high low and spacing check for rx flow control thresholds */ 407 /* high low and spacing check for rx flow control thresholds */
411 if (adapter->hw.fc.type & ixgb_fc_tx_pause) { 408 if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
412 /* high must be greater than low */ 409 /* high must be greater than low */
413 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) { 410 if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
414 /* set defaults */ 411 /* set defaults */
415 printk(KERN_INFO 412 pr_info("RxFCHighThresh must be >= (RxFCLowThresh + 8), Using Defaults\n");
416 "RxFCHighThresh must be >= (RxFCLowThresh + 8), "
417 "Using Defaults\n");
418 adapter->hw.fc.high_water = DEFAULT_FCRTH; 413 adapter->hw.fc.high_water = DEFAULT_FCRTH;
419 adapter->hw.fc.low_water = DEFAULT_FCRTL; 414 adapter->hw.fc.low_water = DEFAULT_FCRTL;
420 } 415 }
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 79c35ae3718c..d0ea3d6dea95 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -111,7 +111,10 @@ struct vf_data_storage {
111 u16 default_vf_vlan_id; 111 u16 default_vf_vlan_id;
112 u16 vlans_enabled; 112 u16 vlans_enabled;
113 bool clear_to_send; 113 bool clear_to_send;
114 bool pf_set_mac;
114 int rar; 115 int rar;
116 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
117 u16 pf_qos;
115}; 118};
116 119
117/* wrapper around a pointer to a socket buffer, 120/* wrapper around a pointer to a socket buffer,
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 35a06b47587b..f2b7ff44215b 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -42,9 +42,9 @@ static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed, 42 ixgbe_link_speed *speed,
43 bool *autoneg); 43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 45 ixgbe_link_speed speed,
46 bool autoneg, 46 bool autoneg,
47 bool autoneg_wait_to_complete); 47 bool autoneg_wait_to_complete);
48static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, 48static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
49 u8 *eeprom_data); 49 u8 *eeprom_data);
50 50
@@ -1221,7 +1221,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1221 1221
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1223 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eeprom_generic, 1224 .read = &ixgbe_read_eerd_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1227};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b405a00817c6..e9706eb8e4ff 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -39,6 +39,8 @@
39#define IXGBE_82599_MC_TBL_SIZE 128 39#define IXGBE_82599_MC_TBL_SIZE 128
40#define IXGBE_82599_VFT_TBL_SIZE 128 40#define IXGBE_82599_VFT_TBL_SIZE 128
41 41
42void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
42void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); 44void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw);
43s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, 45s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
44 ixgbe_link_speed speed, 46 ixgbe_link_speed speed,
@@ -69,8 +71,14 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69 if (hw->phy.multispeed_fiber) { 71 if (hw->phy.multispeed_fiber) {
70 /* Set up dual speed SFP+ support */ 72 /* Set up dual speed SFP+ support */
71 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
74 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser =
77 &ixgbe_enable_tx_laser_multispeed_fiber;
72 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; 78 mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
73 } else { 79 } else {
80 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL;
74 mac->ops.flap_tx_laser = NULL; 82 mac->ops.flap_tx_laser = NULL;
75 if ((mac->ops.get_media_type(hw) == 83 if ((mac->ops.get_media_type(hw) ==
76 ixgbe_media_type_backplane) && 84 ixgbe_media_type_backplane) &&
@@ -125,27 +133,6 @@ setup_sfp_out:
125 return ret_val; 133 return ret_val;
126} 134}
127 135
128/**
129 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count
130 * @hw: pointer to hardware structure
131 *
132 * Read PCIe configuration space, and get the MSI-X vector count from
133 * the capabilities table.
134 **/
135static u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw)
136{
137 struct ixgbe_adapter *adapter = hw->back;
138 u16 msix_count;
139 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
140 &msix_count);
141 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
142
143 /* MSI-X count is zero-based in HW, so increment to give proper value */
144 msix_count++;
145
146 return msix_count;
147}
148
149static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) 136static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
150{ 137{
151 struct ixgbe_mac_info *mac = &hw->mac; 138 struct ixgbe_mac_info *mac = &hw->mac;
@@ -157,7 +144,7 @@ static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
157 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; 144 mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES;
158 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; 145 mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES;
159 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; 146 mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES;
160 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); 147 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
161 148
162 return 0; 149 return 0;
163} 150}
@@ -415,6 +402,44 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
415 return status; 402 return status;
416} 403}
417 404
405 /**
406 * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser
407 * @hw: pointer to hardware structure
408 *
409 * The base drivers may require better control over SFP+ module
410 * PHY states. This includes selectively shutting down the Tx
411 * laser on the PHY, effectively halting physical link.
412 **/
413void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
414{
415 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
416
417 /* Disable tx laser; allow 100us to go dark per spec */
418 esdp_reg |= IXGBE_ESDP_SDP3;
419 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
420 IXGBE_WRITE_FLUSH(hw);
421 udelay(100);
422}
423
424/**
425 * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser
426 * @hw: pointer to hardware structure
427 *
428 * The base drivers may require better control over SFP+ module
429 * PHY states. This includes selectively turning on the Tx
430 * laser on the PHY, effectively starting physical link.
431 **/
432void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
433{
434 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
435
436 /* Enable tx laser; allow 100ms to light up */
437 esdp_reg &= ~IXGBE_ESDP_SDP3;
438 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
439 IXGBE_WRITE_FLUSH(hw);
440 msleep(100);
441}
442
418/** 443/**
419 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser 444 * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser
420 * @hw: pointer to hardware structure 445 * @hw: pointer to hardware structure
@@ -429,23 +454,11 @@ s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
429 **/ 454 **/
430void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) 455void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
431{ 456{
432 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
433
434 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n"); 457 hw_dbg(hw, "ixgbe_flap_tx_laser_multispeed_fiber\n");
435 458
436 if (hw->mac.autotry_restart) { 459 if (hw->mac.autotry_restart) {
437 /* Disable tx laser; allow 100us to go dark per spec */ 460 ixgbe_disable_tx_laser_multispeed_fiber(hw);
438 esdp_reg |= IXGBE_ESDP_SDP3; 461 ixgbe_enable_tx_laser_multispeed_fiber(hw);
439 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
440 IXGBE_WRITE_FLUSH(hw);
441 udelay(100);
442
443 /* Enable tx laser; allow 100ms to light up */
444 esdp_reg &= ~IXGBE_ESDP_SDP3;
445 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
446 IXGBE_WRITE_FLUSH(hw);
447 msleep(100);
448
449 hw->mac.autotry_restart = false; 462 hw->mac.autotry_restart = false;
450 } 463 }
451} 464}
@@ -608,6 +621,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
608 s32 i, j; 621 s32 i, j;
609 bool link_up = false; 622 bool link_up = false;
610 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 623 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
624 struct ixgbe_adapter *adapter = hw->back;
611 625
612 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n"); 626 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
613 627
@@ -692,64 +706,14 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
692 autoneg_wait_to_complete); 706 autoneg_wait_to_complete);
693 707
694out: 708out:
709 if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL))
710 netif_info(adapter, hw, adapter->netdev, "Smartspeed has"
711 " downgraded the link speed from the maximum"
712 " advertised\n");
695 return status; 713 return status;
696} 714}
697 715
698/** 716/**
699 * ixgbe_check_mac_link_82599 - Determine link and speed status
700 * @hw: pointer to hardware structure
701 * @speed: pointer to link speed
702 * @link_up: true when link is up
703 * @link_up_wait_to_complete: bool used to wait for link up or not
704 *
705 * Reads the links register to determine if link is up and the current speed
706 **/
707static s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw,
708 ixgbe_link_speed *speed,
709 bool *link_up,
710 bool link_up_wait_to_complete)
711{
712 u32 links_reg;
713 u32 i;
714
715 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
716 if (link_up_wait_to_complete) {
717 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
718 if (links_reg & IXGBE_LINKS_UP) {
719 *link_up = true;
720 break;
721 } else {
722 *link_up = false;
723 }
724 msleep(100);
725 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
726 }
727 } else {
728 if (links_reg & IXGBE_LINKS_UP)
729 *link_up = true;
730 else
731 *link_up = false;
732 }
733
734 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
735 IXGBE_LINKS_SPEED_10G_82599)
736 *speed = IXGBE_LINK_SPEED_10GB_FULL;
737 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
738 IXGBE_LINKS_SPEED_1G_82599)
739 *speed = IXGBE_LINK_SPEED_1GB_FULL;
740 else
741 *speed = IXGBE_LINK_SPEED_100_FULL;
742
743 /* if link is down, zero out the current_mode */
744 if (*link_up == false) {
745 hw->fc.current_mode = ixgbe_fc_none;
746 hw->fc.fc_was_autonegged = false;
747 }
748
749 return 0;
750}
751
752/**
753 * ixgbe_setup_mac_link_82599 - Set MAC link speed 717 * ixgbe_setup_mac_link_82599 - Set MAC link speed
754 * @hw: pointer to hardware structure 718 * @hw: pointer to hardware structure
755 * @speed: new link speed 719 * @speed: new link speed
@@ -1011,243 +975,6 @@ reset_hw_out:
1011} 975}
1012 976
1013/** 977/**
1014 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address
1015 * @hw: pointer to hardware struct
1016 * @rar: receive address register index to disassociate
1017 * @vmdq: VMDq pool index to remove from the rar
1018 **/
1019static s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1020{
1021 u32 mpsar_lo, mpsar_hi;
1022 u32 rar_entries = hw->mac.num_rar_entries;
1023
1024 if (rar < rar_entries) {
1025 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1026 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1027
1028 if (!mpsar_lo && !mpsar_hi)
1029 goto done;
1030
1031 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
1032 if (mpsar_lo) {
1033 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
1034 mpsar_lo = 0;
1035 }
1036 if (mpsar_hi) {
1037 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
1038 mpsar_hi = 0;
1039 }
1040 } else if (vmdq < 32) {
1041 mpsar_lo &= ~(1 << vmdq);
1042 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
1043 } else {
1044 mpsar_hi &= ~(1 << (vmdq - 32));
1045 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
1046 }
1047
1048 /* was that the last pool using this rar? */
1049 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
1050 hw->mac.ops.clear_rar(hw, rar);
1051 } else {
1052 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
1053 }
1054
1055done:
1056 return 0;
1057}
1058
1059/**
1060 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address
1061 * @hw: pointer to hardware struct
1062 * @rar: receive address register index to associate with a VMDq index
1063 * @vmdq: VMDq pool index
1064 **/
1065static s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
1066{
1067 u32 mpsar;
1068 u32 rar_entries = hw->mac.num_rar_entries;
1069
1070 if (rar < rar_entries) {
1071 if (vmdq < 32) {
1072 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
1073 mpsar |= 1 << vmdq;
1074 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
1075 } else {
1076 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
1077 mpsar |= 1 << (vmdq - 32);
1078 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
1079 }
1080 } else {
1081 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
1082 }
1083 return 0;
1084}
1085
1086/**
1087 * ixgbe_set_vfta_82599 - Set VLAN filter table
1088 * @hw: pointer to hardware structure
1089 * @vlan: VLAN id to write to VLAN filter
1090 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
1091 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
1092 *
1093 * Turn on/off specified VLAN in the VLAN filter table.
1094 **/
1095static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
1096 bool vlan_on)
1097{
1098 u32 regindex;
1099 u32 vlvf_index;
1100 u32 bitindex;
1101 u32 bits;
1102 u32 first_empty_slot;
1103 u32 vt_ctl;
1104
1105 if (vlan > 4095)
1106 return IXGBE_ERR_PARAM;
1107
1108 /*
1109 * this is a 2 part operation - first the VFTA, then the
1110 * VLVF and VLVFB if vind is set
1111 */
1112
1113 /* Part 1
1114 * The VFTA is a bitstring made up of 128 32-bit registers
1115 * that enable the particular VLAN id, much like the MTA:
1116 * bits[11-5]: which register
1117 * bits[4-0]: which bit in the register
1118 */
1119 regindex = (vlan >> 5) & 0x7F;
1120 bitindex = vlan & 0x1F;
1121 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1122 if (vlan_on)
1123 bits |= (1 << bitindex);
1124 else
1125 bits &= ~(1 << bitindex);
1126 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1127
1128
1129 /* Part 2
1130 * If VT mode is set
1131 * Either vlan_on
1132 * make sure the vlan is in VLVF
1133 * set the vind bit in the matching VLVFB
1134 * Or !vlan_on
1135 * clear the pool bit and possibly the vind
1136 */
1137 vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
1138 if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
1139 goto out;
1140
1141 /* find the vlanid or the first empty slot */
1142 first_empty_slot = 0;
1143
1144 for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
1145 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
1146 if (!bits && !first_empty_slot)
1147 first_empty_slot = vlvf_index;
1148 else if ((bits & 0x0FFF) == vlan)
1149 break;
1150 }
1151
1152 if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
1153 if (first_empty_slot)
1154 vlvf_index = first_empty_slot;
1155 else {
1156 hw_dbg(hw, "No space in VLVF.\n");
1157 goto out;
1158 }
1159 }
1160
1161 if (vlan_on) {
1162 /* set the pool bit */
1163 if (vind < 32) {
1164 bits = IXGBE_READ_REG(hw,
1165 IXGBE_VLVFB(vlvf_index * 2));
1166 bits |= (1 << vind);
1167 IXGBE_WRITE_REG(hw,
1168 IXGBE_VLVFB(vlvf_index * 2), bits);
1169 } else {
1170 bits = IXGBE_READ_REG(hw,
1171 IXGBE_VLVFB((vlvf_index * 2) + 1));
1172 bits |= (1 << (vind - 32));
1173 IXGBE_WRITE_REG(hw,
1174 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1175 }
1176 } else {
1177 /* clear the pool bit */
1178 if (vind < 32) {
1179 bits = IXGBE_READ_REG(hw,
1180 IXGBE_VLVFB(vlvf_index * 2));
1181 bits &= ~(1 << vind);
1182 IXGBE_WRITE_REG(hw,
1183 IXGBE_VLVFB(vlvf_index * 2), bits);
1184 bits |= IXGBE_READ_REG(hw,
1185 IXGBE_VLVFB((vlvf_index * 2) + 1));
1186 } else {
1187 bits = IXGBE_READ_REG(hw,
1188 IXGBE_VLVFB((vlvf_index * 2) + 1));
1189 bits &= ~(1 << (vind - 32));
1190 IXGBE_WRITE_REG(hw,
1191 IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
1192 bits |= IXGBE_READ_REG(hw,
1193 IXGBE_VLVFB(vlvf_index * 2));
1194 }
1195 }
1196
1197 if (bits) {
1198 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
1199 (IXGBE_VLVF_VIEN | vlan));
1200 /* if bits is non-zero then some pools/VFs are still
1201 * using this VLAN ID. Force the VFTA entry to on */
1202 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
1203 bits |= (1 << bitindex);
1204 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
1205 }
1206 else
1207 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
1208
1209out:
1210 return 0;
1211}
1212
1213/**
1214 * ixgbe_clear_vfta_82599 - Clear VLAN filter table
1215 * @hw: pointer to hardware structure
1216 *
1217 * Clears the VLAN filer table, and the VMDq index associated with the filter
1218 **/
1219static s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw)
1220{
1221 u32 offset;
1222
1223 for (offset = 0; offset < hw->mac.vft_size; offset++)
1224 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
1225
1226 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
1227 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
1228 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0);
1229 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0);
1230 }
1231
1232 return 0;
1233}
1234
1235/**
1236 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array
1237 * @hw: pointer to hardware structure
1238 **/
1239static s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw)
1240{
1241 int i;
1242 hw_dbg(hw, " Clearing UTA\n");
1243
1244 for (i = 0; i < 128; i++)
1245 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
1246
1247 return 0;
1248}
1249
1250/**
1251 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 978 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables.
1252 * @hw: pointer to hardware structure 979 * @hw: pointer to hardware structure
1253 **/ 980 **/
@@ -1269,7 +996,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1269 } 996 }
1270 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 997 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1271 hw_dbg(hw ,"Flow Director previous command isn't complete, " 998 hw_dbg(hw ,"Flow Director previous command isn't complete, "
1272 "aborting table re-initialization. \n"); 999 "aborting table re-initialization.\n");
1273 return IXGBE_ERR_FDIR_REINIT_FAILED; 1000 return IXGBE_ERR_FDIR_REINIT_FAILED;
1274 } 1001 }
1275 1002
@@ -2428,10 +2155,14 @@ sfp_check:
2428 goto out; 2155 goto out;
2429 2156
2430 switch (hw->phy.type) { 2157 switch (hw->phy.type) {
2431 case ixgbe_phy_tw_tyco: 2158 case ixgbe_phy_sfp_passive_tyco:
2432 case ixgbe_phy_tw_unknown: 2159 case ixgbe_phy_sfp_passive_unknown:
2433 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2160 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
2434 break; 2161 break;
2162 case ixgbe_phy_sfp_ftl_active:
2163 case ixgbe_phy_sfp_active_unknown:
2164 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
2165 break;
2435 case ixgbe_phy_sfp_avago: 2166 case ixgbe_phy_sfp_avago:
2436 case ixgbe_phy_sfp_ftl: 2167 case ixgbe_phy_sfp_ftl:
2437 case ixgbe_phy_sfp_intel: 2168 case ixgbe_phy_sfp_intel:
@@ -2511,75 +2242,6 @@ static s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps)
2511} 2242}
2512 2243
2513/** 2244/**
2514 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599
2515 * @hw: pointer to hardware structure
2516 * @san_mac_offset: SAN MAC address offset
2517 *
2518 * This function will read the EEPROM location for the SAN MAC address
2519 * pointer, and returns the value at that location. This is used in both
2520 * get and set mac_addr routines.
2521 **/
2522static s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw,
2523 u16 *san_mac_offset)
2524{
2525 /*
2526 * First read the EEPROM pointer to see if the MAC addresses are
2527 * available.
2528 */
2529 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2530
2531 return 0;
2532}
2533
2534/**
2535 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599
2536 * @hw: pointer to hardware structure
2537 * @san_mac_addr: SAN MAC address
2538 *
2539 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2540 * per-port, so set_lan_id() must be called before reading the addresses.
2541 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2542 * upon for non-SFP connections, so we must call it here.
2543 **/
2544static s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr)
2545{
2546 u16 san_mac_data, san_mac_offset;
2547 u8 i;
2548
2549 /*
2550 * First read the EEPROM pointer to see if the MAC addresses are
2551 * available. If they're not, no point in calling set_lan_id() here.
2552 */
2553 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset);
2554
2555 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2556 /*
2557 * No addresses available in this EEPROM. It's not an
2558 * error though, so just wipe the local address and return.
2559 */
2560 for (i = 0; i < 6; i++)
2561 san_mac_addr[i] = 0xFF;
2562
2563 goto san_mac_addr_out;
2564 }
2565
2566 /* make sure we know which port we need to program */
2567 hw->mac.ops.set_lan_id(hw);
2568 /* apply the port offset to the address offset */
2569 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2570 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2571 for (i = 0; i < 3; i++) {
2572 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2573 san_mac_addr[i * 2] = (u8)(san_mac_data);
2574 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2575 san_mac_offset++;
2576 }
2577
2578san_mac_addr_out:
2579 return 0;
2580}
2581
2582/**
2583 * ixgbe_verify_fw_version_82599 - verify fw version for 82599 2245 * ixgbe_verify_fw_version_82599 - verify fw version for 82599
2584 * @hw: pointer to hardware structure 2246 * @hw: pointer to hardware structure
2585 * 2247 *
@@ -2681,7 +2343,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2681 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, 2343 .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
2682 .enable_rx_dma = &ixgbe_enable_rx_dma_82599, 2344 .enable_rx_dma = &ixgbe_enable_rx_dma_82599,
2683 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2345 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2684 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, 2346 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2685 .get_device_caps = &ixgbe_get_device_caps_82599, 2347 .get_device_caps = &ixgbe_get_device_caps_82599,
2686 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 2348 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
2687 .stop_adapter = &ixgbe_stop_adapter_generic, 2349 .stop_adapter = &ixgbe_stop_adapter_generic,
@@ -2690,7 +2352,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2690 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, 2352 .read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
2691 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, 2353 .write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
2692 .setup_link = &ixgbe_setup_mac_link_82599, 2354 .setup_link = &ixgbe_setup_mac_link_82599,
2693 .check_link = &ixgbe_check_mac_link_82599, 2355 .check_link = &ixgbe_check_mac_link_generic,
2694 .get_link_capabilities = &ixgbe_get_link_capabilities_82599, 2356 .get_link_capabilities = &ixgbe_get_link_capabilities_82599,
2695 .led_on = &ixgbe_led_on_generic, 2357 .led_on = &ixgbe_led_on_generic,
2696 .led_off = &ixgbe_led_off_generic, 2358 .led_off = &ixgbe_led_off_generic,
@@ -2698,23 +2360,23 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2698 .blink_led_stop = &ixgbe_blink_led_stop_generic, 2360 .blink_led_stop = &ixgbe_blink_led_stop_generic,
2699 .set_rar = &ixgbe_set_rar_generic, 2361 .set_rar = &ixgbe_set_rar_generic,
2700 .clear_rar = &ixgbe_clear_rar_generic, 2362 .clear_rar = &ixgbe_clear_rar_generic,
2701 .set_vmdq = &ixgbe_set_vmdq_82599, 2363 .set_vmdq = &ixgbe_set_vmdq_generic,
2702 .clear_vmdq = &ixgbe_clear_vmdq_82599, 2364 .clear_vmdq = &ixgbe_clear_vmdq_generic,
2703 .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 2365 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
2704 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, 2366 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
2705 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 2367 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
2706 .enable_mc = &ixgbe_enable_mc_generic, 2368 .enable_mc = &ixgbe_enable_mc_generic,
2707 .disable_mc = &ixgbe_disable_mc_generic, 2369 .disable_mc = &ixgbe_disable_mc_generic,
2708 .clear_vfta = &ixgbe_clear_vfta_82599, 2370 .clear_vfta = &ixgbe_clear_vfta_generic,
2709 .set_vfta = &ixgbe_set_vfta_82599, 2371 .set_vfta = &ixgbe_set_vfta_generic,
2710 .fc_enable = &ixgbe_fc_enable_generic, 2372 .fc_enable = &ixgbe_fc_enable_generic,
2711 .init_uta_tables = &ixgbe_init_uta_tables_82599, 2373 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2712 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2374 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2713}; 2375};
2714 2376
2715static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2377static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2716 .init_params = &ixgbe_init_eeprom_params_generic, 2378 .init_params = &ixgbe_init_eeprom_params_generic,
2717 .read = &ixgbe_read_eeprom_generic, 2379 .read = &ixgbe_read_eerd_generic,
2718 .write = &ixgbe_write_eeprom_generic, 2380 .write = &ixgbe_write_eeprom_generic,
2719 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2381 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2720 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2382 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
@@ -2723,7 +2385,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2723static struct ixgbe_phy_operations phy_ops_82599 = { 2385static struct ixgbe_phy_operations phy_ops_82599 = {
2724 .identify = &ixgbe_identify_phy_82599, 2386 .identify = &ixgbe_identify_phy_82599,
2725 .identify_sfp = &ixgbe_identify_sfp_module_generic, 2387 .identify_sfp = &ixgbe_identify_sfp_module_generic,
2726 .init = &ixgbe_init_phy_ops_82599, 2388 .init = &ixgbe_init_phy_ops_82599,
2727 .reset = &ixgbe_reset_phy_generic, 2389 .reset = &ixgbe_reset_phy_generic,
2728 .read_reg = &ixgbe_read_phy_reg_generic, 2390 .read_reg = &ixgbe_read_phy_reg_generic,
2729 .write_reg = &ixgbe_write_phy_reg_generic, 2391 .write_reg = &ixgbe_write_phy_reg_generic,
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..1159d9138f05 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -34,7 +34,6 @@
34#include "ixgbe_common.h" 34#include "ixgbe_common.h"
35#include "ixgbe_phy.h" 35#include "ixgbe_phy.h"
36 36
37static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw);
38static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 37static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw);
39static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 38static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw);
40static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 39static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
@@ -595,14 +594,14 @@ out:
595} 594}
596 595
597/** 596/**
598 * ixgbe_read_eeprom_generic - Read EEPROM word using EERD 597 * ixgbe_read_eerd_generic - Read EEPROM word using EERD
599 * @hw: pointer to hardware structure 598 * @hw: pointer to hardware structure
600 * @offset: offset of word in the EEPROM to read 599 * @offset: offset of word in the EEPROM to read
601 * @data: word read from the EEPROM 600 * @data: word read from the EEPROM
602 * 601 *
603 * Reads a 16 bit word from the EEPROM using the EERD register. 602 * Reads a 16 bit word from the EEPROM using the EERD register.
604 **/ 603 **/
605s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 604s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
606{ 605{
607 u32 eerd; 606 u32 eerd;
608 s32 status; 607 s32 status;
@@ -614,15 +613,15 @@ s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data)
614 goto out; 613 goto out;
615 } 614 }
616 615
617 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 616 eerd = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) +
618 IXGBE_EEPROM_READ_REG_START; 617 IXGBE_EEPROM_RW_REG_START;
619 618
620 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); 619 IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd);
621 status = ixgbe_poll_eeprom_eerd_done(hw); 620 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ);
622 621
623 if (status == 0) 622 if (status == 0)
624 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 623 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >>
625 IXGBE_EEPROM_READ_REG_DATA); 624 IXGBE_EEPROM_RW_REG_DATA);
626 else 625 else
627 hw_dbg(hw, "Eeprom read timed out\n"); 626 hw_dbg(hw, "Eeprom read timed out\n");
628 627
@@ -631,20 +630,26 @@ out:
631} 630}
632 631
633/** 632/**
634 * ixgbe_poll_eeprom_eerd_done - Poll EERD status 633 * ixgbe_poll_eerd_eewr_done - Poll EERD read or EEWR write status
635 * @hw: pointer to hardware structure 634 * @hw: pointer to hardware structure
635 * @ee_reg: EEPROM flag for polling
636 * 636 *
637 * Polls the status bit (bit 1) of the EERD to determine when the read is done. 637 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
638 * read or write is done respectively.
638 **/ 639 **/
639static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw) 640s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
640{ 641{
641 u32 i; 642 u32 i;
642 u32 reg; 643 u32 reg;
643 s32 status = IXGBE_ERR_EEPROM; 644 s32 status = IXGBE_ERR_EEPROM;
644 645
645 for (i = 0; i < IXGBE_EERD_ATTEMPTS; i++) { 646 for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
646 reg = IXGBE_READ_REG(hw, IXGBE_EERD); 647 if (ee_reg == IXGBE_NVM_POLL_READ)
647 if (reg & IXGBE_EEPROM_READ_REG_DONE) { 648 reg = IXGBE_READ_REG(hw, IXGBE_EERD);
649 else
650 reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
651
652 if (reg & IXGBE_EEPROM_RW_REG_DONE) {
648 status = 0; 653 status = 0;
649 break; 654 break;
650 } 655 }
@@ -1392,14 +1397,17 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
1392 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1397 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1393 fctrl |= IXGBE_FCTRL_UPE; 1398 fctrl |= IXGBE_FCTRL_UPE;
1394 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1399 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1400 hw->addr_ctrl.uc_set_promisc = true;
1395 } 1401 }
1396 } else { 1402 } else {
1397 /* only disable if set by overflow, not by user */ 1403 /* only disable if set by overflow, not by user */
1398 if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { 1404 if ((old_promisc_setting && hw->addr_ctrl.uc_set_promisc) &&
1405 !(hw->addr_ctrl.user_set_promisc)) {
1399 hw_dbg(hw, " Leaving address overflow promisc mode\n"); 1406 hw_dbg(hw, " Leaving address overflow promisc mode\n");
1400 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1407 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
1401 fctrl &= ~IXGBE_FCTRL_UPE; 1408 fctrl &= ~IXGBE_FCTRL_UPE;
1402 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 1409 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
1410 hw->addr_ctrl.uc_set_promisc = false;
1403 } 1411 }
1404 } 1412 }
1405 1413
@@ -1484,26 +1492,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1484/** 1492/**
1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1493 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1486 * @hw: pointer to hardware structure 1494 * @hw: pointer to hardware structure
1487 * @mc_addr_list: the list of new multicast addresses 1495 * @netdev: pointer to net device structure
1488 * @mc_addr_count: number of addresses
1489 * @next: iterator function to walk the multicast address list
1490 * 1496 *
1491 * The given list replaces any existing list. Clears the MC addrs from receive 1497 * The given list replaces any existing list. Clears the MC addrs from receive
1492 * address registers and the multicast table. Uses unused receive address 1498 * address registers and the multicast table. Uses unused receive address
1493 * registers for the first multicast addresses, and hashes the rest into the 1499 * registers for the first multicast addresses, and hashes the rest into the
1494 * multicast table. 1500 * multicast table.
1495 **/ 1501 **/
1496s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1502s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1497 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1503 struct net_device *netdev)
1498{ 1504{
1505 struct netdev_hw_addr *ha;
1499 u32 i; 1506 u32 i;
1500 u32 vmdq;
1501 1507
1502 /* 1508 /*
1503 * Set the new number of MC addresses that we are being requested to 1509 * Set the new number of MC addresses that we are being requested to
1504 * use. 1510 * use.
1505 */ 1511 */
1506 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1512 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1507 hw->addr_ctrl.mta_in_use = 0; 1513 hw->addr_ctrl.mta_in_use = 0;
1508 1514
1509 /* Clear the MTA */ 1515 /* Clear the MTA */
@@ -1512,9 +1518,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1512 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1518 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1513 1519
1514 /* Add the new addresses */ 1520 /* Add the new addresses */
1515 for (i = 0; i < mc_addr_count; i++) { 1521 netdev_for_each_mc_addr(ha, netdev) {
1516 hw_dbg(hw, " Adding the multicast addresses:\n"); 1522 hw_dbg(hw, " Adding the multicast addresses:\n");
1517 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1523 ixgbe_set_mta(hw, ha->addr);
1518 } 1524 }
1519 1525
1520 /* Enable mta */ 1526 /* Enable mta */
@@ -2254,3 +2260,490 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
2254 2260
2255 return 0; 2261 return 0;
2256} 2262}
2263
2264/**
2265 * ixgbe_get_san_mac_addr_offset - Get SAN MAC address offset from the EEPROM
2266 * @hw: pointer to hardware structure
2267 * @san_mac_offset: SAN MAC address offset
2268 *
2269 * This function will read the EEPROM location for the SAN MAC address
2270 * pointer, and returns the value at that location. This is used in both
2271 * get and set mac_addr routines.
2272 **/
2273static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
2274 u16 *san_mac_offset)
2275{
2276 /*
2277 * First read the EEPROM pointer to see if the MAC addresses are
2278 * available.
2279 */
2280 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
2281
2282 return 0;
2283}
2284
2285/**
2286 * ixgbe_get_san_mac_addr_generic - SAN MAC address retrieval from the EEPROM
2287 * @hw: pointer to hardware structure
2288 * @san_mac_addr: SAN MAC address
2289 *
2290 * Reads the SAN MAC address from the EEPROM, if it's available. This is
2291 * per-port, so set_lan_id() must be called before reading the addresses.
2292 * set_lan_id() is called by identify_sfp(), but this cannot be relied
2293 * upon for non-SFP connections, so we must call it here.
2294 **/
2295s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
2296{
2297 u16 san_mac_data, san_mac_offset;
2298 u8 i;
2299
2300 /*
2301 * First read the EEPROM pointer to see if the MAC addresses are
2302 * available. If they're not, no point in calling set_lan_id() here.
2303 */
2304 ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
2305
2306 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
2307 /*
2308 * No addresses available in this EEPROM. It's not an
2309 * error though, so just wipe the local address and return.
2310 */
2311 for (i = 0; i < 6; i++)
2312 san_mac_addr[i] = 0xFF;
2313
2314 goto san_mac_addr_out;
2315 }
2316
2317 /* make sure we know which port we need to program */
2318 hw->mac.ops.set_lan_id(hw);
2319 /* apply the port offset to the address offset */
2320 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
2321 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
2322 for (i = 0; i < 3; i++) {
2323 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
2324 san_mac_addr[i * 2] = (u8)(san_mac_data);
2325 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
2326 san_mac_offset++;
2327 }
2328
2329san_mac_addr_out:
2330 return 0;
2331}
2332
2333/**
2334 * ixgbe_get_pcie_msix_count_generic - Gets MSI-X vector count
2335 * @hw: pointer to hardware structure
2336 *
2337 * Read PCIe configuration space, and get the MSI-X vector count from
2338 * the capabilities table.
2339 **/
2340u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
2341{
2342 struct ixgbe_adapter *adapter = hw->back;
2343 u16 msix_count;
2344 pci_read_config_word(adapter->pdev, IXGBE_PCIE_MSIX_82599_CAPS,
2345 &msix_count);
2346 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
2347
2348 /* MSI-X count is zero-based in HW, so increment to give proper value */
2349 msix_count++;
2350
2351 return msix_count;
2352}
2353
2354/**
2355 * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address
2356 * @hw: pointer to hardware struct
2357 * @rar: receive address register index to disassociate
2358 * @vmdq: VMDq pool index to remove from the rar
2359 **/
2360s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2361{
2362 u32 mpsar_lo, mpsar_hi;
2363 u32 rar_entries = hw->mac.num_rar_entries;
2364
2365 if (rar < rar_entries) {
2366 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2367 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2368
2369 if (!mpsar_lo && !mpsar_hi)
2370 goto done;
2371
2372 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
2373 if (mpsar_lo) {
2374 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
2375 mpsar_lo = 0;
2376 }
2377 if (mpsar_hi) {
2378 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
2379 mpsar_hi = 0;
2380 }
2381 } else if (vmdq < 32) {
2382 mpsar_lo &= ~(1 << vmdq);
2383 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
2384 } else {
2385 mpsar_hi &= ~(1 << (vmdq - 32));
2386 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
2387 }
2388
2389 /* was that the last pool using this rar? */
2390 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
2391 hw->mac.ops.clear_rar(hw, rar);
2392 } else {
2393 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2394 }
2395
2396done:
2397 return 0;
2398}
2399
2400/**
2401 * ixgbe_set_vmdq_generic - Associate a VMDq pool index with a rx address
2402 * @hw: pointer to hardware struct
2403 * @rar: receive address register index to associate with a VMDq index
2404 * @vmdq: VMDq pool index
2405 **/
2406s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
2407{
2408 u32 mpsar;
2409 u32 rar_entries = hw->mac.num_rar_entries;
2410
2411 if (rar < rar_entries) {
2412 if (vmdq < 32) {
2413 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
2414 mpsar |= 1 << vmdq;
2415 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
2416 } else {
2417 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
2418 mpsar |= 1 << (vmdq - 32);
2419 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
2420 }
2421 } else {
2422 hw_dbg(hw, "RAR index %d is out of range.\n", rar);
2423 }
2424 return 0;
2425}
2426
2427/**
2428 * ixgbe_init_uta_tables_generic - Initialize the Unicast Table Array
2429 * @hw: pointer to hardware structure
2430 **/
2431s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw)
2432{
2433 int i;
2434
2435
2436 for (i = 0; i < 128; i++)
2437 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
2438
2439 return 0;
2440}
2441
2442/**
2443 * ixgbe_find_vlvf_slot - find the vlanid or the first empty slot
2444 * @hw: pointer to hardware structure
2445 * @vlan: VLAN id to write to VLAN filter
2446 *
2447 * return the VLVF index where this VLAN id should be placed
2448 *
2449 **/
2450s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
2451{
2452 u32 bits = 0;
2453 u32 first_empty_slot = 0;
2454 s32 regindex;
2455
2456 /* short cut the special case */
2457 if (vlan == 0)
2458 return 0;
2459
2460 /*
2461 * Search for the vlan id in the VLVF entries. Save off the first empty
2462 * slot found along the way
2463 */
2464 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
2465 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
2466 if (!bits && !(first_empty_slot))
2467 first_empty_slot = regindex;
2468 else if ((bits & 0x0FFF) == vlan)
2469 break;
2470 }
2471
2472 /*
2473 * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan
2474 * in the VLVF. Else use the first empty VLVF register for this
2475 * vlan id.
2476 */
2477 if (regindex >= IXGBE_VLVF_ENTRIES) {
2478 if (first_empty_slot)
2479 regindex = first_empty_slot;
2480 else {
2481 hw_dbg(hw, "No space in VLVF.\n");
2482 regindex = IXGBE_ERR_NO_SPACE;
2483 }
2484 }
2485
2486 return regindex;
2487}
2488
2489/**
2490 * ixgbe_set_vfta_generic - Set VLAN filter table
2491 * @hw: pointer to hardware structure
2492 * @vlan: VLAN id to write to VLAN filter
2493 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2494 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2495 *
2496 * Turn on/off specified VLAN in the VLAN filter table.
2497 **/
2498s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
2499 bool vlan_on)
2500{
2501 s32 regindex;
2502 u32 bitindex;
2503 u32 vfta;
2504 u32 bits;
2505 u32 vt;
2506 u32 targetbit;
2507 bool vfta_changed = false;
2508
2509 if (vlan > 4095)
2510 return IXGBE_ERR_PARAM;
2511
2512 /*
2513 * this is a 2 part operation - first the VFTA, then the
2514 * VLVF and VLVFB if VT Mode is set
2515 * We don't write the VFTA until we know the VLVF part succeeded.
2516 */
2517
2518 /* Part 1
2519 * The VFTA is a bitstring made up of 128 32-bit registers
2520 * that enable the particular VLAN id, much like the MTA:
2521 * bits[11-5]: which register
2522 * bits[4-0]: which bit in the register
2523 */
2524 regindex = (vlan >> 5) & 0x7F;
2525 bitindex = vlan & 0x1F;
2526 targetbit = (1 << bitindex);
2527 vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
2528
2529 if (vlan_on) {
2530 if (!(vfta & targetbit)) {
2531 vfta |= targetbit;
2532 vfta_changed = true;
2533 }
2534 } else {
2535 if ((vfta & targetbit)) {
2536 vfta &= ~targetbit;
2537 vfta_changed = true;
2538 }
2539 }
2540
2541 /* Part 2
2542 * If VT Mode is set
2543 * Either vlan_on
2544 * make sure the vlan is in VLVF
2545 * set the vind bit in the matching VLVFB
2546 * Or !vlan_on
2547 * clear the pool bit and possibly the vind
2548 */
2549 vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
2550 if (vt & IXGBE_VT_CTL_VT_ENABLE) {
2551 s32 vlvf_index;
2552
2553 vlvf_index = ixgbe_find_vlvf_slot(hw, vlan);
2554 if (vlvf_index < 0)
2555 return vlvf_index;
2556
2557 if (vlan_on) {
2558 /* set the pool bit */
2559 if (vind < 32) {
2560 bits = IXGBE_READ_REG(hw,
2561 IXGBE_VLVFB(vlvf_index*2));
2562 bits |= (1 << vind);
2563 IXGBE_WRITE_REG(hw,
2564 IXGBE_VLVFB(vlvf_index*2),
2565 bits);
2566 } else {
2567 bits = IXGBE_READ_REG(hw,
2568 IXGBE_VLVFB((vlvf_index*2)+1));
2569 bits |= (1 << (vind-32));
2570 IXGBE_WRITE_REG(hw,
2571 IXGBE_VLVFB((vlvf_index*2)+1),
2572 bits);
2573 }
2574 } else {
2575 /* clear the pool bit */
2576 if (vind < 32) {
2577 bits = IXGBE_READ_REG(hw,
2578 IXGBE_VLVFB(vlvf_index*2));
2579 bits &= ~(1 << vind);
2580 IXGBE_WRITE_REG(hw,
2581 IXGBE_VLVFB(vlvf_index*2),
2582 bits);
2583 bits |= IXGBE_READ_REG(hw,
2584 IXGBE_VLVFB((vlvf_index*2)+1));
2585 } else {
2586 bits = IXGBE_READ_REG(hw,
2587 IXGBE_VLVFB((vlvf_index*2)+1));
2588 bits &= ~(1 << (vind-32));
2589 IXGBE_WRITE_REG(hw,
2590 IXGBE_VLVFB((vlvf_index*2)+1),
2591 bits);
2592 bits |= IXGBE_READ_REG(hw,
2593 IXGBE_VLVFB(vlvf_index*2));
2594 }
2595 }
2596
2597 /*
2598 * If there are still bits set in the VLVFB registers
2599 * for the VLAN ID indicated we need to see if the
2600 * caller is requesting that we clear the VFTA entry bit.
2601 * If the caller has requested that we clear the VFTA
2602 * entry bit but there are still pools/VFs using this VLAN
2603 * ID entry then ignore the request. We're not worried
2604 * about the case where we're turning the VFTA VLAN ID
2605 * entry bit on, only when requested to turn it off as
2606 * there may be multiple pools and/or VFs using the
2607 * VLAN ID entry. In that case we cannot clear the
2608 * VFTA bit until all pools/VFs using that VLAN ID have also
2609 * been cleared. This will be indicated by "bits" being
2610 * zero.
2611 */
2612 if (bits) {
2613 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
2614 (IXGBE_VLVF_VIEN | vlan));
2615 if (!vlan_on) {
2616 /* someone wants to clear the vfta entry
2617 * but some pools/VFs are still using it.
2618 * Ignore it. */
2619 vfta_changed = false;
2620 }
2621 }
2622 else
2623 IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
2624 }
2625
2626 if (vfta_changed)
2627 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta);
2628
2629 return 0;
2630}
2631
2632/**
2633 * ixgbe_clear_vfta_generic - Clear VLAN filter table
2634 * @hw: pointer to hardware structure
2635 *
2636 * Clears the VLAN filer table, and the VMDq index associated with the filter
2637 **/
2638s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
2639{
2640 u32 offset;
2641
2642 for (offset = 0; offset < hw->mac.vft_size; offset++)
2643 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0);
2644
2645 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) {
2646 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0);
2647 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0);
2648 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0);
2649 }
2650
2651 return 0;
2652}
2653
2654/**
2655 * ixgbe_check_mac_link_generic - Determine link and speed status
2656 * @hw: pointer to hardware structure
2657 * @speed: pointer to link speed
2658 * @link_up: true when link is up
2659 * @link_up_wait_to_complete: bool used to wait for link up or not
2660 *
2661 * Reads the links register to determine if link is up and the current speed
2662 **/
2663s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2664 bool *link_up, bool link_up_wait_to_complete)
2665{
2666 u32 links_reg;
2667 u32 i;
2668
2669 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2670 if (link_up_wait_to_complete) {
2671 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
2672 if (links_reg & IXGBE_LINKS_UP) {
2673 *link_up = true;
2674 break;
2675 } else {
2676 *link_up = false;
2677 }
2678 msleep(100);
2679 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
2680 }
2681 } else {
2682 if (links_reg & IXGBE_LINKS_UP)
2683 *link_up = true;
2684 else
2685 *link_up = false;
2686 }
2687
2688 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2689 IXGBE_LINKS_SPEED_10G_82599)
2690 *speed = IXGBE_LINK_SPEED_10GB_FULL;
2691 else if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
2692 IXGBE_LINKS_SPEED_1G_82599)
2693 *speed = IXGBE_LINK_SPEED_1GB_FULL;
2694 else
2695 *speed = IXGBE_LINK_SPEED_100_FULL;
2696
2697 /* if link is down, zero out the current_mode */
2698 if (*link_up == false) {
2699 hw->fc.current_mode = ixgbe_fc_none;
2700 hw->fc.fc_was_autonegged = false;
2701 }
2702
2703 return 0;
2704}
2705
2706/**
2707 * ixgbe_get_wwn_prefix_generic - Get alternative WWNN/WWPN prefix from
2708 * the EEPROM
2709 * @hw: pointer to hardware structure
2710 * @wwnn_prefix: the alternative WWNN prefix
2711 * @wwpn_prefix: the alternative WWPN prefix
2712 *
2713 * This function will read the EEPROM from the alternative SAN MAC address
2714 * block to check the support for the alternative WWNN/WWPN prefix support.
2715 **/
2716s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2717 u16 *wwpn_prefix)
2718{
2719 u16 offset, caps;
2720 u16 alt_san_mac_blk_offset;
2721
2722 /* clear output first */
2723 *wwnn_prefix = 0xFFFF;
2724 *wwpn_prefix = 0xFFFF;
2725
2726 /* check if alternative SAN MAC is supported */
2727 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2728 &alt_san_mac_blk_offset);
2729
2730 if ((alt_san_mac_blk_offset == 0) ||
2731 (alt_san_mac_blk_offset == 0xFFFF))
2732 goto wwn_prefix_out;
2733
2734 /* check capability in alternative san mac address block */
2735 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2736 hw->eeprom.ops.read(hw, offset, &caps);
2737 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2738 goto wwn_prefix_out;
2739
2740 /* get the corresponding prefix for WWNN/WWPN */
2741 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2742 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2743
2744 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2745 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2746
2747wwn_prefix_out:
2748 return 0;
2749}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..3080afb12bdf 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -30,6 +30,7 @@
30 30
31#include "ixgbe_type.h" 31#include "ixgbe_type.h"
32 32
33u32 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw);
33s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 34s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
34s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
35s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
@@ -45,20 +46,20 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index);
45 46
46s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); 47s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw);
47s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); 48s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
48s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
49s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
50 u16 *data); 51 u16 *data);
51s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
52 u16 *checksum_val); 53 u16 *checksum_val);
53s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
55s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
54 56
55s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 57s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
56 u32 enable_addr); 58 u32 enable_addr);
57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 59s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 60s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 61s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
60 u32 mc_addr_count, 62 struct net_device *netdev);
61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 63s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct net_device *netdev); 64 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 65s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
@@ -71,9 +72,16 @@ s32 ixgbe_validate_mac_addr(u8 *mac_addr);
71s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 72s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
72void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 73void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask);
73s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 74s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
74 75s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr);
75s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); 76s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
76s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); 77s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
78s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
79s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
80 u32 vind, bool vlan_on);
81s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
82s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
83 ixgbe_link_speed *speed,
84 bool *link_up, bool link_up_wait_to_complete);
77 85
78s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 86s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
79s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 87s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index dd4883f642be..71da325dfa80 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -488,7 +488,6 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
488 if (adapter->temp_dcb_cfg.pfc_mode_enable != 488 if (adapter->temp_dcb_cfg.pfc_mode_enable !=
489 adapter->dcb_cfg.pfc_mode_enable) 489 adapter->dcb_cfg.pfc_mode_enable)
490 adapter->dcb_set_bitmap |= BIT_PFC; 490 adapter->dcb_set_bitmap |= BIT_PFC;
491 return;
492} 491}
493 492
494/** 493/**
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 8f461d5cee77..c50a7541ffec 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -212,8 +212,8 @@ static int ixgbe_get_settings(struct net_device *netdev,
212 ecmd->port = PORT_FIBRE; 212 ecmd->port = PORT_FIBRE;
213 break; 213 break;
214 case ixgbe_phy_nl: 214 case ixgbe_phy_nl:
215 case ixgbe_phy_tw_tyco: 215 case ixgbe_phy_sfp_passive_tyco:
216 case ixgbe_phy_tw_unknown: 216 case ixgbe_phy_sfp_passive_unknown:
217 case ixgbe_phy_sfp_ftl: 217 case ixgbe_phy_sfp_ftl:
218 case ixgbe_phy_sfp_avago: 218 case ixgbe_phy_sfp_avago:
219 case ixgbe_phy_sfp_intel: 219 case ixgbe_phy_sfp_intel:
@@ -365,7 +365,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
365 else 365 else
366 fc.disable_fc_autoneg = false; 366 fc.disable_fc_autoneg = false;
367 367
368 if (pause->rx_pause && pause->tx_pause) 368 if ((pause->rx_pause && pause->tx_pause) || pause->autoneg)
369 fc.requested_mode = ixgbe_fc_full; 369 fc.requested_mode = ixgbe_fc_full;
370 else if (pause->rx_pause && !pause->tx_pause) 370 else if (pause->rx_pause && !pause->tx_pause)
371 fc.requested_mode = ixgbe_fc_rx_pause; 371 fc.requested_mode = ixgbe_fc_rx_pause;
@@ -1458,8 +1458,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1458 struct ixgbe_tx_buffer *buf = 1458 struct ixgbe_tx_buffer *buf =
1459 &(tx_ring->tx_buffer_info[i]); 1459 &(tx_ring->tx_buffer_info[i]);
1460 if (buf->dma) 1460 if (buf->dma)
1461 pci_unmap_single(pdev, buf->dma, buf->length, 1461 dma_unmap_single(&pdev->dev, buf->dma,
1462 PCI_DMA_TODEVICE); 1462 buf->length, DMA_TO_DEVICE);
1463 if (buf->skb) 1463 if (buf->skb)
1464 dev_kfree_skb(buf->skb); 1464 dev_kfree_skb(buf->skb);
1465 } 1465 }
@@ -1470,22 +1470,22 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1470 struct ixgbe_rx_buffer *buf = 1470 struct ixgbe_rx_buffer *buf =
1471 &(rx_ring->rx_buffer_info[i]); 1471 &(rx_ring->rx_buffer_info[i]);
1472 if (buf->dma) 1472 if (buf->dma)
1473 pci_unmap_single(pdev, buf->dma, 1473 dma_unmap_single(&pdev->dev, buf->dma,
1474 IXGBE_RXBUFFER_2048, 1474 IXGBE_RXBUFFER_2048,
1475 PCI_DMA_FROMDEVICE); 1475 DMA_FROM_DEVICE);
1476 if (buf->skb) 1476 if (buf->skb)
1477 dev_kfree_skb(buf->skb); 1477 dev_kfree_skb(buf->skb);
1478 } 1478 }
1479 } 1479 }
1480 1480
1481 if (tx_ring->desc) { 1481 if (tx_ring->desc) {
1482 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, 1482 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1483 tx_ring->dma); 1483 tx_ring->dma);
1484 tx_ring->desc = NULL; 1484 tx_ring->desc = NULL;
1485 } 1485 }
1486 if (rx_ring->desc) { 1486 if (rx_ring->desc) {
1487 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, 1487 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1488 rx_ring->dma); 1488 rx_ring->dma);
1489 rx_ring->desc = NULL; 1489 rx_ring->desc = NULL;
1490 } 1490 }
1491 1491
@@ -1493,8 +1493,6 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1493 tx_ring->tx_buffer_info = NULL; 1493 tx_ring->tx_buffer_info = NULL;
1494 kfree(rx_ring->rx_buffer_info); 1494 kfree(rx_ring->rx_buffer_info);
1495 rx_ring->rx_buffer_info = NULL; 1495 rx_ring->rx_buffer_info = NULL;
1496
1497 return;
1498} 1496}
1499 1497
1500static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1498static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1520,8 +1518,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1520 1518
1521 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 1519 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1522 tx_ring->size = ALIGN(tx_ring->size, 4096); 1520 tx_ring->size = ALIGN(tx_ring->size, 4096);
1523 if (!(tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1521 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1524 &tx_ring->dma))) { 1522 &tx_ring->dma, GFP_KERNEL);
1523 if (!(tx_ring->desc)) {
1525 ret_val = 2; 1524 ret_val = 2;
1526 goto err_nomem; 1525 goto err_nomem;
1527 } 1526 }
@@ -1563,8 +1562,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1563 tx_ring->tx_buffer_info[i].skb = skb; 1562 tx_ring->tx_buffer_info[i].skb = skb;
1564 tx_ring->tx_buffer_info[i].length = skb->len; 1563 tx_ring->tx_buffer_info[i].length = skb->len;
1565 tx_ring->tx_buffer_info[i].dma = 1564 tx_ring->tx_buffer_info[i].dma =
1566 pci_map_single(pdev, skb->data, skb->len, 1565 dma_map_single(&pdev->dev, skb->data, skb->len,
1567 PCI_DMA_TODEVICE); 1566 DMA_TO_DEVICE);
1568 desc->read.buffer_addr = 1567 desc->read.buffer_addr =
1569 cpu_to_le64(tx_ring->tx_buffer_info[i].dma); 1568 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1570 desc->read.cmd_type_len = cpu_to_le32(skb->len); 1569 desc->read.cmd_type_len = cpu_to_le32(skb->len);
@@ -1593,8 +1592,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1593 1592
1594 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 1593 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1595 rx_ring->size = ALIGN(rx_ring->size, 4096); 1594 rx_ring->size = ALIGN(rx_ring->size, 4096);
1596 if (!(rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1595 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1597 &rx_ring->dma))) { 1596 &rx_ring->dma, GFP_KERNEL);
1597 if (!(rx_ring->desc)) {
1598 ret_val = 5; 1598 ret_val = 5;
1599 goto err_nomem; 1599 goto err_nomem;
1600 } 1600 }
@@ -1661,8 +1661,8 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1661 skb_reserve(skb, NET_IP_ALIGN); 1661 skb_reserve(skb, NET_IP_ALIGN);
1662 rx_ring->rx_buffer_info[i].skb = skb; 1662 rx_ring->rx_buffer_info[i].skb = skb;
1663 rx_ring->rx_buffer_info[i].dma = 1663 rx_ring->rx_buffer_info[i].dma =
1664 pci_map_single(pdev, skb->data, IXGBE_RXBUFFER_2048, 1664 dma_map_single(&pdev->dev, skb->data,
1665 PCI_DMA_FROMDEVICE); 1665 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1666 rx_desc->read.pkt_addr = 1666 rx_desc->read.pkt_addr =
1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma); 1667 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1668 memset(skb->data, 0x00, skb->len); 1668 memset(skb->data, 0x00, skb->len);
@@ -1775,10 +1775,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1775 ixgbe_create_lbtest_frame( 1775 ixgbe_create_lbtest_frame(
1776 tx_ring->tx_buffer_info[k].skb, 1776 tx_ring->tx_buffer_info[k].skb,
1777 1024); 1777 1024);
1778 pci_dma_sync_single_for_device(pdev, 1778 dma_sync_single_for_device(&pdev->dev,
1779 tx_ring->tx_buffer_info[k].dma, 1779 tx_ring->tx_buffer_info[k].dma,
1780 tx_ring->tx_buffer_info[k].length, 1780 tx_ring->tx_buffer_info[k].length,
1781 PCI_DMA_TODEVICE); 1781 DMA_TO_DEVICE);
1782 if (unlikely(++k == tx_ring->count)) 1782 if (unlikely(++k == tx_ring->count))
1783 k = 0; 1783 k = 0;
1784 } 1784 }
@@ -1789,10 +1789,10 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1789 good_cnt = 0; 1789 good_cnt = 0;
1790 do { 1790 do {
1791 /* receive the sent packets */ 1791 /* receive the sent packets */
1792 pci_dma_sync_single_for_cpu(pdev, 1792 dma_sync_single_for_cpu(&pdev->dev,
1793 rx_ring->rx_buffer_info[l].dma, 1793 rx_ring->rx_buffer_info[l].dma,
1794 IXGBE_RXBUFFER_2048, 1794 IXGBE_RXBUFFER_2048,
1795 PCI_DMA_FROMDEVICE); 1795 DMA_FROM_DEVICE);
1796 ret_val = ixgbe_check_lbtest_frame( 1796 ret_val = ixgbe_check_lbtest_frame(
1797 rx_ring->rx_buffer_info[l].skb, 1024); 1797 rx_ring->rx_buffer_info[l].skb, 1024);
1798 if (!ret_val) 1798 if (!ret_val)
@@ -1971,8 +1971,6 @@ static void ixgbe_get_wol(struct net_device *netdev,
1971 wol->wolopts |= WAKE_BCAST; 1971 wol->wolopts |= WAKE_BCAST;
1972 if (adapter->wol & IXGBE_WUFC_MAG) 1972 if (adapter->wol & IXGBE_WUFC_MAG)
1973 wol->wolopts |= WAKE_MAGIC; 1973 wol->wolopts |= WAKE_MAGIC;
1974
1975 return;
1976} 1974}
1977 1975
1978static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1976static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2079,12 +2077,32 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2079 return 0; 2077 return 0;
2080} 2078}
2081 2079
2080/*
2081 * this function must be called before setting the new value of
2082 * rx_itr_setting
2083 */
2084static bool ixgbe_reenable_rsc(struct ixgbe_adapter *adapter,
2085 struct ethtool_coalesce *ec)
2086{
2087 /* check the old value and enable RSC if necessary */
2088 if ((adapter->rx_itr_setting == 0) &&
2089 (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) {
2090 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2091 adapter->netdev->features |= NETIF_F_LRO;
2092 DPRINTK(PROBE, INFO, "rx-usecs set to %d, re-enabling RSC\n",
2093 ec->rx_coalesce_usecs);
2094 return true;
2095 }
2096 return false;
2097}
2098
2082static int ixgbe_set_coalesce(struct net_device *netdev, 2099static int ixgbe_set_coalesce(struct net_device *netdev,
2083 struct ethtool_coalesce *ec) 2100 struct ethtool_coalesce *ec)
2084{ 2101{
2085 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2102 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2086 struct ixgbe_q_vector *q_vector; 2103 struct ixgbe_q_vector *q_vector;
2087 int i; 2104 int i;
2105 bool need_reset = false;
2088 2106
2089 /* don't accept tx specific changes if we've got mixed RxTx vectors */ 2107 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2090 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count 2108 if (adapter->q_vector[0]->txr_count && adapter->q_vector[0]->rxr_count
@@ -2095,11 +2113,20 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2095 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2113 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2096 2114
2097 if (ec->rx_coalesce_usecs > 1) { 2115 if (ec->rx_coalesce_usecs > 1) {
2116 u32 max_int;
2117 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2118 max_int = IXGBE_MAX_RSC_INT_RATE;
2119 else
2120 max_int = IXGBE_MAX_INT_RATE;
2121
2098 /* check the limits */ 2122 /* check the limits */
2099 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2123 if ((1000000/ec->rx_coalesce_usecs > max_int) ||
2100 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2124 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2101 return -EINVAL; 2125 return -EINVAL;
2102 2126
2127 /* check the old value and enable RSC if necessary */
2128 need_reset = ixgbe_reenable_rsc(adapter, ec);
2129
2103 /* store the value in ints/second */ 2130 /* store the value in ints/second */
2104 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2131 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2105 2132
@@ -2108,6 +2135,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2108 /* clear the lower bit as its used for dynamic state */ 2135 /* clear the lower bit as its used for dynamic state */
2109 adapter->rx_itr_setting &= ~1; 2136 adapter->rx_itr_setting &= ~1;
2110 } else if (ec->rx_coalesce_usecs == 1) { 2137 } else if (ec->rx_coalesce_usecs == 1) {
2138 /* check the old value and enable RSC if necessary */
2139 need_reset = ixgbe_reenable_rsc(adapter, ec);
2140
2111 /* 1 means dynamic mode */ 2141 /* 1 means dynamic mode */
2112 adapter->rx_eitr_param = 20000; 2142 adapter->rx_eitr_param = 20000;
2113 adapter->rx_itr_setting = 1; 2143 adapter->rx_itr_setting = 1;
@@ -2116,14 +2146,30 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2116 * any other value means disable eitr, which is best 2146 * any other value means disable eitr, which is best
2117 * served by setting the interrupt rate very high 2147 * served by setting the interrupt rate very high
2118 */ 2148 */
2119 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) 2149 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2120 adapter->rx_eitr_param = IXGBE_MAX_RSC_INT_RATE;
2121 else
2122 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2123 adapter->rx_itr_setting = 0; 2150 adapter->rx_itr_setting = 0;
2151
2152 /*
2153 * if hardware RSC is enabled, disable it when
2154 * setting low latency mode, to avoid errata, assuming
2155 * that when the user set low latency mode they want
2156 * it at the cost of anything else
2157 */
2158 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2159 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2160 netdev->features &= ~NETIF_F_LRO;
2161 DPRINTK(PROBE, INFO,
2162 "rx-usecs set to 0, disabling RSC\n");
2163
2164 need_reset = true;
2165 }
2124 } 2166 }
2125 2167
2126 if (ec->tx_coalesce_usecs > 1) { 2168 if (ec->tx_coalesce_usecs > 1) {
2169 /*
2170 * don't have to worry about max_int as above because
2171 * tx vectors don't do hardware RSC (an rx function)
2172 */
2127 /* check the limits */ 2173 /* check the limits */
2128 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) || 2174 if ((1000000/ec->tx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2129 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2175 (1000000/ec->tx_coalesce_usecs < IXGBE_MIN_INT_RATE))
@@ -2167,6 +2213,18 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2167 ixgbe_write_eitr(q_vector); 2213 ixgbe_write_eitr(q_vector);
2168 } 2214 }
2169 2215
2216 /*
2217 * do reset here at the end to make sure EITR==0 case is handled
2218 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2219 * also locks in RSC enable/disable which requires reset
2220 */
2221 if (need_reset) {
2222 if (netif_running(netdev))
2223 ixgbe_reinit_locked(adapter);
2224 else
2225 ixgbe_reset(adapter);
2226 }
2227
2170 return 0; 2228 return 0;
2171} 2229}
2172 2230
@@ -2178,10 +2236,26 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2178 ethtool_op_set_flags(netdev, data); 2236 ethtool_op_set_flags(netdev, data);
2179 2237
2180 /* if state changes we need to update adapter->flags and reset */ 2238 /* if state changes we need to update adapter->flags and reset */
2181 if ((!!(data & ETH_FLAG_LRO)) != 2239 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
2182 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) { 2240 /*
2183 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2241 * cast both to bool and verify if they are set the same
2184 need_reset = true; 2242 * but only enable RSC if itr is non-zero, as
2243 * itr=0 and RSC are mutually exclusive
2244 */
2245 if (((!!(data & ETH_FLAG_LRO)) !=
2246 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
2247 adapter->rx_itr_setting) {
2248 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2249 switch (adapter->hw.mac.type) {
2250 case ixgbe_mac_82599EB:
2251 need_reset = true;
2252 break;
2253 default:
2254 break;
2255 }
2256 } else if (!adapter->rx_itr_setting) {
2257 netdev->features &= ~ETH_FLAG_LRO;
2258 }
2185 } 2259 }
2186 2260
2187 /* 2261 /*
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 6493049b663d..45182ab41d6b 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -32,6 +32,7 @@
32#endif /* CONFIG_IXGBE_DCB */ 32#endif /* CONFIG_IXGBE_DCB */
33#include <linux/if_ether.h> 33#include <linux/if_ether.h>
34#include <linux/gfp.h> 34#include <linux/gfp.h>
35#include <linux/if_vlan.h>
35#include <scsi/scsi_cmnd.h> 36#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_device.h> 37#include <scsi/scsi_device.h>
37#include <scsi/fc/fc_fs.h> 38#include <scsi/fc/fc_fs.h>
@@ -312,10 +313,12 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
312 if (fcerr == IXGBE_FCERR_BADCRC) 313 if (fcerr == IXGBE_FCERR_BADCRC)
313 skb->ip_summed = CHECKSUM_NONE; 314 skb->ip_summed = CHECKSUM_NONE;
314 315
315 skb_reset_network_header(skb); 316 if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
316 skb_set_transport_header(skb, skb_network_offset(skb) + 317 fh = (struct fc_frame_header *)(skb->data +
317 sizeof(struct fcoe_hdr)); 318 sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
318 fh = (struct fc_frame_header *)skb_transport_header(skb); 319 else
320 fh = (struct fc_frame_header *)(skb->data +
321 sizeof(struct fcoe_hdr));
319 fctl = ntoh24(fh->fh_f_ctl); 322 fctl = ntoh24(fh->fh_f_ctl);
320 if (fctl & FC_FC_EX_CTX) 323 if (fctl & FC_FC_EX_CTX)
321 xid = be16_to_cpu(fh->fh_ox_id); 324 xid = be16_to_cpu(fh->fh_ox_id);
@@ -536,12 +539,6 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
536 } 539 }
537 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); 540 IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
538 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); 541 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
539 fcoe_i = f->mask;
540 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
541 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
542 IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
543 IXGBE_ETQS_QUEUE_EN |
544 (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
545 } else { 542 } else {
546 /* Use single rx queue for FCoE */ 543 /* Use single rx queue for FCoE */
547 fcoe_i = f->mask; 544 fcoe_i = f->mask;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8f677cb86290..9551cbb7bf01 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -175,6 +175,345 @@ static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; 175 adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
176} 176}
177 177
178struct ixgbe_reg_info {
179 u32 ofs;
180 char *name;
181};
182
183static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = {
184
185 /* General Registers */
186 {IXGBE_CTRL, "CTRL"},
187 {IXGBE_STATUS, "STATUS"},
188 {IXGBE_CTRL_EXT, "CTRL_EXT"},
189
190 /* Interrupt Registers */
191 {IXGBE_EICR, "EICR"},
192
193 /* RX Registers */
194 {IXGBE_SRRCTL(0), "SRRCTL"},
195 {IXGBE_DCA_RXCTRL(0), "DRXCTL"},
196 {IXGBE_RDLEN(0), "RDLEN"},
197 {IXGBE_RDH(0), "RDH"},
198 {IXGBE_RDT(0), "RDT"},
199 {IXGBE_RXDCTL(0), "RXDCTL"},
200 {IXGBE_RDBAL(0), "RDBAL"},
201 {IXGBE_RDBAH(0), "RDBAH"},
202
203 /* TX Registers */
204 {IXGBE_TDBAL(0), "TDBAL"},
205 {IXGBE_TDBAH(0), "TDBAH"},
206 {IXGBE_TDLEN(0), "TDLEN"},
207 {IXGBE_TDH(0), "TDH"},
208 {IXGBE_TDT(0), "TDT"},
209 {IXGBE_TXDCTL(0), "TXDCTL"},
210
211 /* List Terminator */
212 {}
213};
214
215
216/*
217 * ixgbe_regdump - register printout routine
218 */
219static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo)
220{
221 int i = 0, j = 0;
222 char rname[16];
223 u32 regs[64];
224
225 switch (reginfo->ofs) {
226 case IXGBE_SRRCTL(0):
227 for (i = 0; i < 64; i++)
228 regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
229 break;
230 case IXGBE_DCA_RXCTRL(0):
231 for (i = 0; i < 64; i++)
232 regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
233 break;
234 case IXGBE_RDLEN(0):
235 for (i = 0; i < 64; i++)
236 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
237 break;
238 case IXGBE_RDH(0):
239 for (i = 0; i < 64; i++)
240 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
241 break;
242 case IXGBE_RDT(0):
243 for (i = 0; i < 64; i++)
244 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
245 break;
246 case IXGBE_RXDCTL(0):
247 for (i = 0; i < 64; i++)
248 regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
249 break;
250 case IXGBE_RDBAL(0):
251 for (i = 0; i < 64; i++)
252 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
253 break;
254 case IXGBE_RDBAH(0):
255 for (i = 0; i < 64; i++)
256 regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
257 break;
258 case IXGBE_TDBAL(0):
259 for (i = 0; i < 64; i++)
260 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
261 break;
262 case IXGBE_TDBAH(0):
263 for (i = 0; i < 64; i++)
264 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
265 break;
266 case IXGBE_TDLEN(0):
267 for (i = 0; i < 64; i++)
268 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
269 break;
270 case IXGBE_TDH(0):
271 for (i = 0; i < 64; i++)
272 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
273 break;
274 case IXGBE_TDT(0):
275 for (i = 0; i < 64; i++)
276 regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
277 break;
278 case IXGBE_TXDCTL(0):
279 for (i = 0; i < 64; i++)
280 regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
281 break;
282 default:
283 printk(KERN_INFO "%-15s %08x\n", reginfo->name,
284 IXGBE_READ_REG(hw, reginfo->ofs));
285 return;
286 }
287
288 for (i = 0; i < 8; i++) {
289 snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7);
290 printk(KERN_ERR "%-15s ", rname);
291 for (j = 0; j < 8; j++)
292 printk(KERN_CONT "%08x ", regs[i*8+j]);
293 printk(KERN_CONT "\n");
294 }
295
296}
297
298/*
299 * ixgbe_dump - Print registers, tx-rings and rx-rings
300 */
301static void ixgbe_dump(struct ixgbe_adapter *adapter)
302{
303 struct net_device *netdev = adapter->netdev;
304 struct ixgbe_hw *hw = &adapter->hw;
305 struct ixgbe_reg_info *reginfo;
306 int n = 0;
307 struct ixgbe_ring *tx_ring;
308 struct ixgbe_tx_buffer *tx_buffer_info;
309 union ixgbe_adv_tx_desc *tx_desc;
310 struct my_u0 { u64 a; u64 b; } *u0;
311 struct ixgbe_ring *rx_ring;
312 union ixgbe_adv_rx_desc *rx_desc;
313 struct ixgbe_rx_buffer *rx_buffer_info;
314 u32 staterr;
315 int i = 0;
316
317 if (!netif_msg_hw(adapter))
318 return;
319
320 /* Print netdevice Info */
321 if (netdev) {
322 dev_info(&adapter->pdev->dev, "Net device Info\n");
323 printk(KERN_INFO "Device Name state "
324 "trans_start last_rx\n");
325 printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
326 netdev->name,
327 netdev->state,
328 netdev->trans_start,
329 netdev->last_rx);
330 }
331
332 /* Print Registers */
333 dev_info(&adapter->pdev->dev, "Register Dump\n");
334 printk(KERN_INFO " Register Name Value\n");
335 for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl;
336 reginfo->name; reginfo++) {
337 ixgbe_regdump(hw, reginfo);
338 }
339
340 /* Print TX Ring Summary */
341 if (!netdev || !netif_running(netdev))
342 goto exit;
343
344 dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
345 printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ] "
346 "leng ntw timestamp\n");
347 for (n = 0; n < adapter->num_tx_queues; n++) {
348 tx_ring = adapter->tx_ring[n];
349 tx_buffer_info =
350 &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
351 printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
352 n, tx_ring->next_to_use, tx_ring->next_to_clean,
353 (u64)tx_buffer_info->dma,
354 tx_buffer_info->length,
355 tx_buffer_info->next_to_watch,
356 (u64)tx_buffer_info->time_stamp);
357 }
358
359 /* Print TX Rings */
360 if (!netif_msg_tx_done(adapter))
361 goto rx_ring_summary;
362
363 dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
364
365 /* Transmit Descriptor Formats
366 *
367 * Advanced Transmit Descriptor
368 * +--------------------------------------------------------------+
369 * 0 | Buffer Address [63:0] |
370 * +--------------------------------------------------------------+
371 * 8 | PAYLEN | PORTS | IDX | STA | DCMD |DTYP | RSV | DTALEN |
372 * +--------------------------------------------------------------+
373 * 63 46 45 40 39 36 35 32 31 24 23 20 19 0
374 */
375
376 for (n = 0; n < adapter->num_tx_queues; n++) {
377 tx_ring = adapter->tx_ring[n];
378 printk(KERN_INFO "------------------------------------\n");
379 printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
380 printk(KERN_INFO "------------------------------------\n");
381 printk(KERN_INFO "T [desc] [address 63:0 ] "
382 "[PlPOIdStDDt Ln] [bi->dma ] "
383 "leng ntw timestamp bi->skb\n");
384
385 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
386 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
387 tx_buffer_info = &tx_ring->tx_buffer_info[i];
388 u0 = (struct my_u0 *)tx_desc;
389 printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
390 " %04X %3X %016llX %p", i,
391 le64_to_cpu(u0->a),
392 le64_to_cpu(u0->b),
393 (u64)tx_buffer_info->dma,
394 tx_buffer_info->length,
395 tx_buffer_info->next_to_watch,
396 (u64)tx_buffer_info->time_stamp,
397 tx_buffer_info->skb);
398 if (i == tx_ring->next_to_use &&
399 i == tx_ring->next_to_clean)
400 printk(KERN_CONT " NTC/U\n");
401 else if (i == tx_ring->next_to_use)
402 printk(KERN_CONT " NTU\n");
403 else if (i == tx_ring->next_to_clean)
404 printk(KERN_CONT " NTC\n");
405 else
406 printk(KERN_CONT "\n");
407
408 if (netif_msg_pktdata(adapter) &&
409 tx_buffer_info->dma != 0)
410 print_hex_dump(KERN_INFO, "",
411 DUMP_PREFIX_ADDRESS, 16, 1,
412 phys_to_virt(tx_buffer_info->dma),
413 tx_buffer_info->length, true);
414 }
415 }
416
417 /* Print RX Rings Summary */
418rx_ring_summary:
419 dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
420 printk(KERN_INFO "Queue [NTU] [NTC]\n");
421 for (n = 0; n < adapter->num_rx_queues; n++) {
422 rx_ring = adapter->rx_ring[n];
423 printk(KERN_INFO "%5d %5X %5X\n", n,
424 rx_ring->next_to_use, rx_ring->next_to_clean);
425 }
426
427 /* Print RX Rings */
428 if (!netif_msg_rx_status(adapter))
429 goto exit;
430
431 dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
432
433 /* Advanced Receive Descriptor (Read) Format
434 * 63 1 0
435 * +-----------------------------------------------------+
436 * 0 | Packet Buffer Address [63:1] |A0/NSE|
437 * +----------------------------------------------+------+
438 * 8 | Header Buffer Address [63:1] | DD |
439 * +-----------------------------------------------------+
440 *
441 *
442 * Advanced Receive Descriptor (Write-Back) Format
443 *
444 * 63 48 47 32 31 30 21 20 16 15 4 3 0
445 * +------------------------------------------------------+
446 * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
447 * | Checksum Ident | | | | Type | Type |
448 * +------------------------------------------------------+
449 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
450 * +------------------------------------------------------+
451 * 63 48 47 32 31 20 19 0
452 */
453 for (n = 0; n < adapter->num_rx_queues; n++) {
454 rx_ring = adapter->rx_ring[n];
455 printk(KERN_INFO "------------------------------------\n");
456 printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
457 printk(KERN_INFO "------------------------------------\n");
458 printk(KERN_INFO "R [desc] [ PktBuf A0] "
459 "[ HeadBuf DD] [bi->dma ] [bi->skb] "
460 "<-- Adv Rx Read format\n");
461 printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
462 "[vl er S cks ln] ---------------- [bi->skb] "
463 "<-- Adv Rx Write-Back format\n");
464
465 for (i = 0; i < rx_ring->count; i++) {
466 rx_buffer_info = &rx_ring->rx_buffer_info[i];
467 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
468 u0 = (struct my_u0 *)rx_desc;
469 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
470 if (staterr & IXGBE_RXD_STAT_DD) {
471 /* Descriptor Done */
472 printk(KERN_INFO "RWB[0x%03X] %016llX "
473 "%016llX ---------------- %p", i,
474 le64_to_cpu(u0->a),
475 le64_to_cpu(u0->b),
476 rx_buffer_info->skb);
477 } else {
478 printk(KERN_INFO "R [0x%03X] %016llX "
479 "%016llX %016llX %p", i,
480 le64_to_cpu(u0->a),
481 le64_to_cpu(u0->b),
482 (u64)rx_buffer_info->dma,
483 rx_buffer_info->skb);
484
485 if (netif_msg_pktdata(adapter)) {
486 print_hex_dump(KERN_INFO, "",
487 DUMP_PREFIX_ADDRESS, 16, 1,
488 phys_to_virt(rx_buffer_info->dma),
489 rx_ring->rx_buf_len, true);
490
491 if (rx_ring->rx_buf_len
492 < IXGBE_RXBUFFER_2048)
493 print_hex_dump(KERN_INFO, "",
494 DUMP_PREFIX_ADDRESS, 16, 1,
495 phys_to_virt(
496 rx_buffer_info->page_dma +
497 rx_buffer_info->page_offset
498 ),
499 PAGE_SIZE/2, true);
500 }
501 }
502
503 if (i == rx_ring->next_to_use)
504 printk(KERN_CONT " NTU\n");
505 else if (i == rx_ring->next_to_clean)
506 printk(KERN_CONT " NTC\n");
507 else
508 printk(KERN_CONT "\n");
509
510 }
511 }
512
513exit:
514 return;
515}
516
178static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) 517static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
179{ 518{
180 u32 ctrl_ext; 519 u32 ctrl_ext;
@@ -266,15 +605,15 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
266{ 605{
267 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
268 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
269 pci_unmap_page(adapter->pdev, 608 dma_unmap_page(&adapter->pdev->dev,
270 tx_buffer_info->dma, 609 tx_buffer_info->dma,
271 tx_buffer_info->length, 610 tx_buffer_info->length,
272 PCI_DMA_TODEVICE); 611 DMA_TO_DEVICE);
273 else 612 else
274 pci_unmap_single(adapter->pdev, 613 dma_unmap_single(&adapter->pdev->dev,
275 tx_buffer_info->dma, 614 tx_buffer_info->dma,
276 tx_buffer_info->length, 615 tx_buffer_info->length,
277 PCI_DMA_TODEVICE); 616 DMA_TO_DEVICE);
278 tx_buffer_info->dma = 0; 617 tx_buffer_info->dma = 0;
279 } 618 }
280 if (tx_buffer_info->skb) { 619 if (tx_buffer_info->skb) {
@@ -286,16 +625,16 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
286} 625}
287 626
288/** 627/**
289 * ixgbe_tx_is_paused - check if the tx ring is paused 628 * ixgbe_tx_xon_state - check the tx ring xon state
290 * @adapter: the ixgbe adapter 629 * @adapter: the ixgbe adapter
291 * @tx_ring: the corresponding tx_ring 630 * @tx_ring: the corresponding tx_ring
292 * 631 *
293 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 632 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
294 * corresponding TC of this tx_ring when checking TFCS. 633 * corresponding TC of this tx_ring when checking TFCS.
295 * 634 *
296 * Returns : true if paused 635 * Returns : true if in xon state (currently not paused)
297 */ 636 */
298static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter, 637static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
299 struct ixgbe_ring *tx_ring) 638 struct ixgbe_ring *tx_ring)
300{ 639{
301 u32 txoff = IXGBE_TFCS_TXOFF; 640 u32 txoff = IXGBE_TFCS_TXOFF;
@@ -351,7 +690,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
351 adapter->detect_tx_hung = false; 690 adapter->detect_tx_hung = false;
352 if (tx_ring->tx_buffer_info[eop].time_stamp && 691 if (tx_ring->tx_buffer_info[eop].time_stamp &&
353 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 692 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
354 !ixgbe_tx_is_paused(adapter, tx_ring)) { 693 ixgbe_tx_xon_state(adapter, tx_ring)) {
355 /* detected Tx unit hang */ 694 /* detected Tx unit hang */
356 union ixgbe_adv_tx_desc *tx_desc; 695 union ixgbe_adv_tx_desc *tx_desc;
357 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); 696 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
@@ -721,10 +1060,10 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
721 bi->page_offset ^= (PAGE_SIZE / 2); 1060 bi->page_offset ^= (PAGE_SIZE / 2);
722 } 1061 }
723 1062
724 bi->page_dma = pci_map_page(pdev, bi->page, 1063 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
725 bi->page_offset, 1064 bi->page_offset,
726 (PAGE_SIZE / 2), 1065 (PAGE_SIZE / 2),
727 PCI_DMA_FROMDEVICE); 1066 DMA_FROM_DEVICE);
728 } 1067 }
729 1068
730 if (!bi->skb) { 1069 if (!bi->skb) {
@@ -743,9 +1082,9 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
743 - skb->data)); 1082 - skb->data));
744 1083
745 bi->skb = skb; 1084 bi->skb = skb;
746 bi->dma = pci_map_single(pdev, skb->data, 1085 bi->dma = dma_map_single(&pdev->dev, skb->data,
747 rx_ring->rx_buf_len, 1086 rx_ring->rx_buf_len,
748 PCI_DMA_FROMDEVICE); 1087 DMA_FROM_DEVICE);
749 } 1088 }
750 /* Refresh the desc even if buffer_addrs didn't change because 1089 /* Refresh the desc even if buffer_addrs didn't change because
751 * each write-back erases this info. */ 1090 * each write-back erases this info. */
@@ -821,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
821 1160
822struct ixgbe_rsc_cb { 1161struct ixgbe_rsc_cb {
823 dma_addr_t dma; 1162 dma_addr_t dma;
1163 bool delay_unmap;
824}; 1164};
825 1165
826#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) 1166#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
@@ -861,9 +1201,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
861 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 1201 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
862 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 1202 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
863 IXGBE_RXDADV_HDRBUFLEN_SHIFT; 1203 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
864 if (len > IXGBE_RX_HDR_SIZE)
865 len = IXGBE_RX_HDR_SIZE;
866 upper_len = le16_to_cpu(rx_desc->wb.upper.length); 1204 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1205 if ((len > IXGBE_RX_HDR_SIZE) ||
1206 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1207 len = IXGBE_RX_HDR_SIZE;
867 } else { 1208 } else {
868 len = le16_to_cpu(rx_desc->wb.upper.length); 1209 len = le16_to_cpu(rx_desc->wb.upper.length);
869 } 1210 }
@@ -876,7 +1217,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
876 if (rx_buffer_info->dma) { 1217 if (rx_buffer_info->dma) {
877 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1218 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
878 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1219 (!(staterr & IXGBE_RXD_STAT_EOP)) &&
879 (!(skb->prev))) 1220 (!(skb->prev))) {
880 /* 1221 /*
881 * When HWRSC is enabled, delay unmapping 1222 * When HWRSC is enabled, delay unmapping
882 * of the first packet. It carries the 1223 * of the first packet. It carries the
@@ -884,18 +1225,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
884 * access the header after the writeback. 1225 * access the header after the writeback.
885 * Only unmap it when EOP is reached 1226 * Only unmap it when EOP is reached
886 */ 1227 */
1228 IXGBE_RSC_CB(skb)->delay_unmap = true;
887 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1229 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
888 else 1230 } else {
889 pci_unmap_single(pdev, rx_buffer_info->dma, 1231 dma_unmap_single(&pdev->dev,
1232 rx_buffer_info->dma,
890 rx_ring->rx_buf_len, 1233 rx_ring->rx_buf_len,
891 PCI_DMA_FROMDEVICE); 1234 DMA_FROM_DEVICE);
1235 }
892 rx_buffer_info->dma = 0; 1236 rx_buffer_info->dma = 0;
893 skb_put(skb, len); 1237 skb_put(skb, len);
894 } 1238 }
895 1239
896 if (upper_len) { 1240 if (upper_len) {
897 pci_unmap_page(pdev, rx_buffer_info->page_dma, 1241 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
898 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 1242 PAGE_SIZE / 2, DMA_FROM_DEVICE);
899 rx_buffer_info->page_dma = 0; 1243 rx_buffer_info->page_dma = 0;
900 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1244 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
901 rx_buffer_info->page, 1245 rx_buffer_info->page,
@@ -936,11 +1280,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
936 if (skb->prev) 1280 if (skb->prev)
937 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); 1281 skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
938 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1282 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
939 if (IXGBE_RSC_CB(skb)->dma) { 1283 if (IXGBE_RSC_CB(skb)->delay_unmap) {
940 pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma, 1284 dma_unmap_single(&pdev->dev,
1285 IXGBE_RSC_CB(skb)->dma,
941 rx_ring->rx_buf_len, 1286 rx_ring->rx_buf_len,
942 PCI_DMA_FROMDEVICE); 1287 DMA_FROM_DEVICE);
943 IXGBE_RSC_CB(skb)->dma = 0; 1288 IXGBE_RSC_CB(skb)->dma = 0;
1289 IXGBE_RSC_CB(skb)->delay_unmap = false;
944 } 1290 }
945 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) 1291 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
946 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; 1292 rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
@@ -1190,6 +1536,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1190 itr_reg |= (itr_reg << 16); 1536 itr_reg |= (itr_reg << 16);
1191 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1537 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1192 /* 1538 /*
1539 * 82599 can support a value of zero, so allow it for
1540 * max interrupt rate, but there is an errata where it can
1541 * not be zero with RSC
1542 */
1543 if (itr_reg == 8 &&
1544 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
1545 itr_reg = 0;
1546
1547 /*
1193 * set the WDIS bit to not clear the timer bits and cause an 1548 * set the WDIS bit to not clear the timer bits and cause an
1194 * immediate assertion of the interrupt 1549 * immediate assertion of the interrupt
1195 */ 1550 */
@@ -1261,8 +1616,6 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1261 1616
1262 ixgbe_write_eitr(q_vector); 1617 ixgbe_write_eitr(q_vector);
1263 } 1618 }
1264
1265 return;
1266} 1619}
1267 1620
1268static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) 1621static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr)
@@ -1826,8 +2179,6 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
1826 2179
1827 ixgbe_write_eitr(q_vector); 2180 ixgbe_write_eitr(q_vector);
1828 } 2181 }
1829
1830 return;
1831} 2182}
1832 2183
1833/** 2184/**
@@ -2372,7 +2723,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
2372 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift)); 2723 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
2373 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift)); 2724 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
2374 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 2725 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
2375 ixgbe_set_vmolr(hw, adapter->num_vfs); 2726 ixgbe_set_vmolr(hw, adapter->num_vfs, true);
2376 } 2727 }
2377 2728
2378 /* Program MRQC for the distribution of queues */ 2729 /* Program MRQC for the distribution of queues */
@@ -2482,12 +2833,82 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2482 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 2833 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2483} 2834}
2484 2835
2836/**
2837 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2838 * @adapter: driver data
2839 */
2840static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2841{
2842 struct ixgbe_hw *hw = &adapter->hw;
2843 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2844 int i, j;
2845
2846 switch (hw->mac.type) {
2847 case ixgbe_mac_82598EB:
2848 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2849#ifdef CONFIG_IXGBE_DCB
2850 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
2851 vlnctrl &= ~IXGBE_VLNCTRL_VME;
2852#endif
2853 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2854 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2855 break;
2856 case ixgbe_mac_82599EB:
2857 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2858 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2859 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2860#ifdef CONFIG_IXGBE_DCB
2861 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
2862 break;
2863#endif
2864 for (i = 0; i < adapter->num_rx_queues; i++) {
2865 j = adapter->rx_ring[i]->reg_idx;
2866 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2867 vlnctrl &= ~IXGBE_RXDCTL_VME;
2868 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2869 }
2870 break;
2871 default:
2872 break;
2873 }
2874}
2875
2876/**
2877 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2878 * @adapter: driver data
2879 */
2880static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2881{
2882 struct ixgbe_hw *hw = &adapter->hw;
2883 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2884 int i, j;
2885
2886 switch (hw->mac.type) {
2887 case ixgbe_mac_82598EB:
2888 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2889 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2890 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2891 break;
2892 case ixgbe_mac_82599EB:
2893 vlnctrl |= IXGBE_VLNCTRL_VFE;
2894 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2895 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2896 for (i = 0; i < adapter->num_rx_queues; i++) {
2897 j = adapter->rx_ring[i]->reg_idx;
2898 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2899 vlnctrl |= IXGBE_RXDCTL_VME;
2900 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2901 }
2902 break;
2903 default:
2904 break;
2905 }
2906}
2907
2485static void ixgbe_vlan_rx_register(struct net_device *netdev, 2908static void ixgbe_vlan_rx_register(struct net_device *netdev,
2486 struct vlan_group *grp) 2909 struct vlan_group *grp)
2487{ 2910{
2488 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2911 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2489 u32 ctrl;
2490 int i, j;
2491 2912
2492 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2913 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2493 ixgbe_irq_disable(adapter); 2914 ixgbe_irq_disable(adapter);
@@ -2498,25 +2919,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2498 * still receive traffic from a DCB-enabled host even if we're 2919 * still receive traffic from a DCB-enabled host even if we're
2499 * not in DCB mode. 2920 * not in DCB mode.
2500 */ 2921 */
2501 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2922 ixgbe_vlan_filter_enable(adapter);
2502
2503 /* Disable CFI check */
2504 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2505
2506 /* enable VLAN tag stripping */
2507 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2508 ctrl |= IXGBE_VLNCTRL_VME;
2509 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2510 for (i = 0; i < adapter->num_rx_queues; i++) {
2511 u32 ctrl;
2512 j = adapter->rx_ring[i]->reg_idx;
2513 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2514 ctrl |= IXGBE_RXDCTL_VME;
2515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2516 }
2517 }
2518
2519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2520 2923
2521 ixgbe_vlan_rx_add_vid(netdev, 0); 2924 ixgbe_vlan_rx_add_vid(netdev, 0);
2522 2925
@@ -2538,21 +2941,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2538 } 2941 }
2539} 2942}
2540 2943
2541static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2542{
2543 struct dev_mc_list *mc_ptr;
2544 u8 *addr = *mc_addr_ptr;
2545 *vmdq = 0;
2546
2547 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2548 if (mc_ptr->next)
2549 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2550 else
2551 *mc_addr_ptr = NULL;
2552
2553 return addr;
2554}
2555
2556/** 2944/**
2557 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2945 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2558 * @netdev: network interface device structure 2946 * @netdev: network interface device structure
@@ -2566,42 +2954,36 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2566{ 2954{
2567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2955 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2568 struct ixgbe_hw *hw = &adapter->hw; 2956 struct ixgbe_hw *hw = &adapter->hw;
2569 u32 fctrl, vlnctrl; 2957 u32 fctrl;
2570 u8 *addr_list = NULL;
2571 int addr_count = 0;
2572 2958
2573 /* Check for Promiscuous and All Multicast modes */ 2959 /* Check for Promiscuous and All Multicast modes */
2574 2960
2575 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2961 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2576 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2577 2962
2578 if (netdev->flags & IFF_PROMISC) { 2963 if (netdev->flags & IFF_PROMISC) {
2579 hw->addr_ctrl.user_set_promisc = 1; 2964 hw->addr_ctrl.user_set_promisc = true;
2580 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2965 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2581 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2966 /* don't hardware filter vlans in promisc mode */
2967 ixgbe_vlan_filter_disable(adapter);
2582 } else { 2968 } else {
2583 if (netdev->flags & IFF_ALLMULTI) { 2969 if (netdev->flags & IFF_ALLMULTI) {
2584 fctrl |= IXGBE_FCTRL_MPE; 2970 fctrl |= IXGBE_FCTRL_MPE;
2585 fctrl &= ~IXGBE_FCTRL_UPE; 2971 fctrl &= ~IXGBE_FCTRL_UPE;
2586 } else { 2972 } else if (!hw->addr_ctrl.uc_set_promisc) {
2587 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2973 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2588 } 2974 }
2589 vlnctrl |= IXGBE_VLNCTRL_VFE; 2975 ixgbe_vlan_filter_enable(adapter);
2590 hw->addr_ctrl.user_set_promisc = 0; 2976 hw->addr_ctrl.user_set_promisc = false;
2591 } 2977 }
2592 2978
2593 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2979 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2594 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2595 2980
2596 /* reprogram secondary unicast list */ 2981 /* reprogram secondary unicast list */
2597 hw->mac.ops.update_uc_addr_list(hw, netdev); 2982 hw->mac.ops.update_uc_addr_list(hw, netdev);
2598 2983
2599 /* reprogram multicast list */ 2984 /* reprogram multicast list */
2600 addr_count = netdev_mc_count(netdev); 2985 hw->mac.ops.update_mc_addr_list(hw, netdev);
2601 if (addr_count) 2986
2602 addr_list = netdev->mc_list->dmi_addr;
2603 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2604 ixgbe_addr_list_itr);
2605 if (adapter->num_vfs) 2987 if (adapter->num_vfs)
2606 ixgbe_restore_vf_multicasts(adapter); 2988 ixgbe_restore_vf_multicasts(adapter);
2607} 2989}
@@ -2661,7 +3043,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2661static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 3043static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2662{ 3044{
2663 struct ixgbe_hw *hw = &adapter->hw; 3045 struct ixgbe_hw *hw = &adapter->hw;
2664 u32 txdctl, vlnctrl; 3046 u32 txdctl;
2665 int i, j; 3047 int i, j;
2666 3048
2667 ixgbe_dcb_check_config(&adapter->dcb_cfg); 3049 ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +3061,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2679 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3061 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2680 } 3062 }
2681 /* Enable VLAN tag insert/strip */ 3063 /* Enable VLAN tag insert/strip */
2682 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 3064 ixgbe_vlan_filter_enable(adapter);
2683 if (hw->mac.type == ixgbe_mac_82598EB) { 3065
2684 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2685 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2686 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2687 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2688 vlnctrl |= IXGBE_VLNCTRL_VFE;
2689 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2690 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2691 for (i = 0; i < adapter->num_rx_queues; i++) {
2692 j = adapter->rx_ring[i]->reg_idx;
2693 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2694 vlnctrl |= IXGBE_RXDCTL_VME;
2695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2696 }
2697 }
2698 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3066 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2699} 3067}
2700 3068
@@ -2750,8 +3118,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
2750 case ixgbe_phy_sfp_ftl: 3118 case ixgbe_phy_sfp_ftl:
2751 case ixgbe_phy_sfp_intel: 3119 case ixgbe_phy_sfp_intel:
2752 case ixgbe_phy_sfp_unknown: 3120 case ixgbe_phy_sfp_unknown:
2753 case ixgbe_phy_tw_tyco: 3121 case ixgbe_phy_sfp_passive_tyco:
2754 case ixgbe_phy_tw_unknown: 3122 case ixgbe_phy_sfp_passive_unknown:
3123 case ixgbe_phy_sfp_active_unknown:
3124 case ixgbe_phy_sfp_ftl_active:
2755 return true; 3125 return true;
2756 default: 3126 default:
2757 return false; 3127 return false;
@@ -2927,8 +3297,13 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2927 for (i = 0; i < adapter->num_tx_queues; i++) { 3297 for (i = 0; i < adapter->num_tx_queues; i++) {
2928 j = adapter->tx_ring[i]->reg_idx; 3298 j = adapter->tx_ring[i]->reg_idx;
2929 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 3299 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
2930 /* enable WTHRESH=8 descriptors, to encourage burst writeback */ 3300 if (adapter->rx_itr_setting == 0) {
2931 txdctl |= (8 << 16); 3301 /* cannot set wthresh when itr==0 */
3302 txdctl &= ~0x007F0000;
3303 } else {
3304 /* enable WTHRESH=8 descriptors, to encourage burst writeback */
3305 txdctl |= (8 << 16);
3306 }
2932 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 3307 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2933 } 3308 }
2934 3309
@@ -2982,6 +3357,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
2982 else 3357 else
2983 ixgbe_configure_msi_and_legacy(adapter); 3358 ixgbe_configure_msi_and_legacy(adapter);
2984 3359
3360 /* enable the optics */
3361 if (hw->phy.multispeed_fiber)
3362 hw->mac.ops.enable_tx_laser(hw);
3363
2985 clear_bit(__IXGBE_DOWN, &adapter->state); 3364 clear_bit(__IXGBE_DOWN, &adapter->state);
2986 ixgbe_napi_enable_all(adapter); 3365 ixgbe_napi_enable_all(adapter);
2987 3366
@@ -3127,9 +3506,9 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3127 3506
3128 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3507 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3129 if (rx_buffer_info->dma) { 3508 if (rx_buffer_info->dma) {
3130 pci_unmap_single(pdev, rx_buffer_info->dma, 3509 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
3131 rx_ring->rx_buf_len, 3510 rx_ring->rx_buf_len,
3132 PCI_DMA_FROMDEVICE); 3511 DMA_FROM_DEVICE);
3133 rx_buffer_info->dma = 0; 3512 rx_buffer_info->dma = 0;
3134 } 3513 }
3135 if (rx_buffer_info->skb) { 3514 if (rx_buffer_info->skb) {
@@ -3137,11 +3516,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3137 rx_buffer_info->skb = NULL; 3516 rx_buffer_info->skb = NULL;
3138 do { 3517 do {
3139 struct sk_buff *this = skb; 3518 struct sk_buff *this = skb;
3140 if (IXGBE_RSC_CB(this)->dma) { 3519 if (IXGBE_RSC_CB(this)->delay_unmap) {
3141 pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma, 3520 dma_unmap_single(&pdev->dev,
3521 IXGBE_RSC_CB(this)->dma,
3142 rx_ring->rx_buf_len, 3522 rx_ring->rx_buf_len,
3143 PCI_DMA_FROMDEVICE); 3523 DMA_FROM_DEVICE);
3144 IXGBE_RSC_CB(this)->dma = 0; 3524 IXGBE_RSC_CB(this)->dma = 0;
3525 IXGBE_RSC_CB(skb)->delay_unmap = false;
3145 } 3526 }
3146 skb = skb->prev; 3527 skb = skb->prev;
3147 dev_kfree_skb(this); 3528 dev_kfree_skb(this);
@@ -3150,8 +3531,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3150 if (!rx_buffer_info->page) 3531 if (!rx_buffer_info->page)
3151 continue; 3532 continue;
3152 if (rx_buffer_info->page_dma) { 3533 if (rx_buffer_info->page_dma) {
3153 pci_unmap_page(pdev, rx_buffer_info->page_dma, 3534 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
3154 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 3535 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3155 rx_buffer_info->page_dma = 0; 3536 rx_buffer_info->page_dma = 0;
3156 } 3537 }
3157 put_page(rx_buffer_info->page); 3538 put_page(rx_buffer_info->page);
@@ -3243,6 +3624,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3243 /* signal that we are down to the interrupt handler */ 3624 /* signal that we are down to the interrupt handler */
3244 set_bit(__IXGBE_DOWN, &adapter->state); 3625 set_bit(__IXGBE_DOWN, &adapter->state);
3245 3626
3627 /* power down the optics */
3628 if (hw->phy.multispeed_fiber)
3629 hw->mac.ops.disable_tx_laser(hw);
3630
3246 /* disable receive for all VFs and wait one second */ 3631 /* disable receive for all VFs and wait one second */
3247 if (adapter->num_vfs) { 3632 if (adapter->num_vfs) {
3248 /* ping all the active vfs to let them know we are going down */ 3633 /* ping all the active vfs to let them know we are going down */
@@ -3260,22 +3645,23 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3260 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 3645 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3261 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 3646 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3262 3647
3263 netif_tx_disable(netdev);
3264
3265 IXGBE_WRITE_FLUSH(hw); 3648 IXGBE_WRITE_FLUSH(hw);
3266 msleep(10); 3649 msleep(10);
3267 3650
3268 netif_tx_stop_all_queues(netdev); 3651 netif_tx_stop_all_queues(netdev);
3269 3652
3270 ixgbe_irq_disable(adapter);
3271
3272 ixgbe_napi_disable_all(adapter);
3273
3274 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 3653 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
3275 del_timer_sync(&adapter->sfp_timer); 3654 del_timer_sync(&adapter->sfp_timer);
3276 del_timer_sync(&adapter->watchdog_timer); 3655 del_timer_sync(&adapter->watchdog_timer);
3277 cancel_work_sync(&adapter->watchdog_task); 3656 cancel_work_sync(&adapter->watchdog_task);
3278 3657
3658 netif_carrier_off(netdev);
3659 netif_tx_disable(netdev);
3660
3661 ixgbe_irq_disable(adapter);
3662
3663 ixgbe_napi_disable_all(adapter);
3664
3279 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 3665 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
3280 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 3666 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
3281 cancel_work_sync(&adapter->fdir_reinit_task); 3667 cancel_work_sync(&adapter->fdir_reinit_task);
@@ -3293,8 +3679,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3293 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3679 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3294 ~IXGBE_DMATXCTL_TE)); 3680 ~IXGBE_DMATXCTL_TE));
3295 3681
3296 netif_carrier_off(netdev);
3297
3298 /* clear n-tuple filters that are cached */ 3682 /* clear n-tuple filters that are cached */
3299 ethtool_ntuple_flush(netdev); 3683 ethtool_ntuple_flush(netdev);
3300 3684
@@ -3371,6 +3755,8 @@ static void ixgbe_reset_task(struct work_struct *work)
3371 3755
3372 adapter->tx_timeout_count++; 3756 adapter->tx_timeout_count++;
3373 3757
3758 ixgbe_dump(adapter);
3759 netdev_err(adapter->netdev, "Reset adapter\n");
3374 ixgbe_reinit_locked(adapter); 3760 ixgbe_reinit_locked(adapter);
3375} 3761}
3376 3762
@@ -3471,12 +3857,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3471 adapter->num_tx_queues = 1; 3857 adapter->num_tx_queues = 1;
3472#ifdef CONFIG_IXGBE_DCB 3858#ifdef CONFIG_IXGBE_DCB
3473 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3859 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3474 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3860 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3475 ixgbe_set_dcb_queues(adapter); 3861 ixgbe_set_dcb_queues(adapter);
3476 } 3862 }
3477#endif 3863#endif
3478 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3864 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3479 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3865 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3480 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3866 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3481 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3867 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3482 ixgbe_set_fdir_queues(adapter); 3868 ixgbe_set_fdir_queues(adapter);
@@ -4087,7 +4473,6 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
4087 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED; 4473 adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
4088 pci_disable_msi(adapter->pdev); 4474 pci_disable_msi(adapter->pdev);
4089 } 4475 }
4090 return;
4091} 4476}
4092 4477
4093/** 4478/**
@@ -4373,8 +4758,8 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4373 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4758 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4374 tx_ring->size = ALIGN(tx_ring->size, 4096); 4759 tx_ring->size = ALIGN(tx_ring->size, 4096);
4375 4760
4376 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 4761 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
4377 &tx_ring->dma); 4762 &tx_ring->dma, GFP_KERNEL);
4378 if (!tx_ring->desc) 4763 if (!tx_ring->desc)
4379 goto err; 4764 goto err;
4380 4765
@@ -4444,7 +4829,8 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
4444 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 4829 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
4445 rx_ring->size = ALIGN(rx_ring->size, 4096); 4830 rx_ring->size = ALIGN(rx_ring->size, 4096);
4446 4831
4447 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, &rx_ring->dma); 4832 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
4833 &rx_ring->dma, GFP_KERNEL);
4448 4834
4449 if (!rx_ring->desc) { 4835 if (!rx_ring->desc) {
4450 DPRINTK(PROBE, ERR, 4836 DPRINTK(PROBE, ERR,
@@ -4505,7 +4891,8 @@ void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
4505 vfree(tx_ring->tx_buffer_info); 4891 vfree(tx_ring->tx_buffer_info);
4506 tx_ring->tx_buffer_info = NULL; 4892 tx_ring->tx_buffer_info = NULL;
4507 4893
4508 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 4894 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
4895 tx_ring->dma);
4509 4896
4510 tx_ring->desc = NULL; 4897 tx_ring->desc = NULL;
4511} 4898}
@@ -4542,7 +4929,8 @@ void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
4542 vfree(rx_ring->rx_buffer_info); 4929 vfree(rx_ring->rx_buffer_info);
4543 rx_ring->rx_buffer_info = NULL; 4930 rx_ring->rx_buffer_info = NULL;
4544 4931
4545 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 4932 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
4933 rx_ring->dma);
4546 4934
4547 rx_ring->desc = NULL; 4935 rx_ring->desc = NULL;
4548} 4936}
@@ -5092,7 +5480,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5092 &(adapter->tx_ring[i]->reinit_state)); 5480 &(adapter->tx_ring[i]->reinit_state));
5093 } else { 5481 } else {
5094 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5482 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5095 "ignored adding FDIR ATR filters \n"); 5483 "ignored adding FDIR ATR filters\n");
5096 } 5484 }
5097 /* Done FDIR Re-initialization, enable transmits */ 5485 /* Done FDIR Re-initialization, enable transmits */
5098 netif_tx_start_all_queues(adapter->netdev); 5486 netif_tx_start_all_queues(adapter->netdev);
@@ -5412,10 +5800,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5412 5800
5413 tx_buffer_info->length = size; 5801 tx_buffer_info->length = size;
5414 tx_buffer_info->mapped_as_page = false; 5802 tx_buffer_info->mapped_as_page = false;
5415 tx_buffer_info->dma = pci_map_single(pdev, 5803 tx_buffer_info->dma = dma_map_single(&pdev->dev,
5416 skb->data + offset, 5804 skb->data + offset,
5417 size, PCI_DMA_TODEVICE); 5805 size, DMA_TO_DEVICE);
5418 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5806 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5419 goto dma_error; 5807 goto dma_error;
5420 tx_buffer_info->time_stamp = jiffies; 5808 tx_buffer_info->time_stamp = jiffies;
5421 tx_buffer_info->next_to_watch = i; 5809 tx_buffer_info->next_to_watch = i;
@@ -5448,12 +5836,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5448 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 5836 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
5449 5837
5450 tx_buffer_info->length = size; 5838 tx_buffer_info->length = size;
5451 tx_buffer_info->dma = pci_map_page(adapter->pdev, 5839 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
5452 frag->page, 5840 frag->page,
5453 offset, size, 5841 offset, size,
5454 PCI_DMA_TODEVICE); 5842 DMA_TO_DEVICE);
5455 tx_buffer_info->mapped_as_page = true; 5843 tx_buffer_info->mapped_as_page = true;
5456 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 5844 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
5457 goto dma_error; 5845 goto dma_error;
5458 tx_buffer_info->time_stamp = jiffies; 5846 tx_buffer_info->time_stamp = jiffies;
5459 tx_buffer_info->next_to_watch = i; 5847 tx_buffer_info->next_to_watch = i;
@@ -5689,7 +6077,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5689 } 6077 }
5690 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6078 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5691 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6079 tx_flags |= IXGBE_TX_FLAGS_VLAN;
5692 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 6080 } else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED &&
6081 skb->priority != TC_PRIO_CONTROL) {
5693 tx_flags |= ((skb->queue_mapping & 0x7) << 13); 6082 tx_flags |= ((skb->queue_mapping & 0x7) << 13);
5694 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 6083 tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
5695 tx_flags |= IXGBE_TX_FLAGS_VLAN; 6084 tx_flags |= IXGBE_TX_FLAGS_VLAN;
@@ -5934,6 +6323,10 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5934 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, 6323 .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
5935 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, 6324 .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
5936 .ndo_do_ioctl = ixgbe_ioctl, 6325 .ndo_do_ioctl = ixgbe_ioctl,
6326 .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
6327 .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
6328 .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
6329 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
5937#ifdef CONFIG_NET_POLL_CONTROLLER 6330#ifdef CONFIG_NET_POLL_CONTROLLER
5938 .ndo_poll_controller = ixgbe_netpoll, 6331 .ndo_poll_controller = ixgbe_netpoll,
5939#endif 6332#endif
@@ -6031,13 +6424,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6031 if (err) 6424 if (err)
6032 return err; 6425 return err;
6033 6426
6034 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 6427 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
6035 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 6428 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
6036 pci_using_dac = 1; 6429 pci_using_dac = 1;
6037 } else { 6430 } else {
6038 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 6431 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
6039 if (err) { 6432 if (err) {
6040 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 6433 err = dma_set_coherent_mask(&pdev->dev,
6434 DMA_BIT_MASK(32));
6041 if (err) { 6435 if (err) {
6042 dev_err(&pdev->dev, "No usable DMA " 6436 dev_err(&pdev->dev, "No usable DMA "
6043 "configuration, aborting\n"); 6437 "configuration, aborting\n");
@@ -6253,6 +6647,10 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6253 goto err_eeprom; 6647 goto err_eeprom;
6254 } 6648 }
6255 6649
6650 /* power down the optics */
6651 if (hw->phy.multispeed_fiber)
6652 hw->mac.ops.disable_tx_laser(hw);
6653
6256 init_timer(&adapter->watchdog_timer); 6654 init_timer(&adapter->watchdog_timer);
6257 adapter->watchdog_timer.function = &ixgbe_watchdog; 6655 adapter->watchdog_timer.function = &ixgbe_watchdog;
6258 adapter->watchdog_timer.data = (unsigned long)adapter; 6656 adapter->watchdog_timer.data = (unsigned long)adapter;
@@ -6400,16 +6798,6 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
6400 del_timer_sync(&adapter->sfp_timer); 6798 del_timer_sync(&adapter->sfp_timer);
6401 cancel_work_sync(&adapter->watchdog_task); 6799 cancel_work_sync(&adapter->watchdog_task);
6402 cancel_work_sync(&adapter->sfp_task); 6800 cancel_work_sync(&adapter->sfp_task);
6403 if (adapter->hw.phy.multispeed_fiber) {
6404 struct ixgbe_hw *hw = &adapter->hw;
6405 /*
6406 * Restart clause 37 autoneg, disable and re-enable
6407 * the tx laser, to clear & alert the link partner
6408 * that it needs to restart autotry
6409 */
6410 hw->mac.autotry_restart = true;
6411 hw->mac.ops.flap_tx_laser(hw);
6412 }
6413 cancel_work_sync(&adapter->multispeed_fiber_task); 6801 cancel_work_sync(&adapter->multispeed_fiber_task);
6414 cancel_work_sync(&adapter->sfp_config_module_task); 6802 cancel_work_sync(&adapter->sfp_config_module_task);
6415 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 6803 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..22d21af14783 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
475 msleep(edata); 475 msleep(edata);
476 break; 476 break;
477 case IXGBE_DATA_NL: 477 case IXGBE_DATA_NL:
478 hw_dbg(hw, "DATA: \n"); 478 hw_dbg(hw, "DATA:\n");
479 data_offset++; 479 data_offset++;
480 hw->eeprom.ops.read(hw, data_offset++, 480 hw->eeprom.ops.read(hw, data_offset++,
481 &phy_offset); 481 &phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
491 break; 491 break;
492 case IXGBE_CONTROL_NL: 492 case IXGBE_CONTROL_NL:
493 data_offset++; 493 data_offset++;
494 hw_dbg(hw, "CONTROL: \n"); 494 hw_dbg(hw, "CONTROL:\n");
495 if (edata == IXGBE_CONTROL_EOL_NL) { 495 if (edata == IXGBE_CONTROL_EOL_NL) {
496 hw_dbg(hw, "EOL\n"); 496 hw_dbg(hw, "EOL\n");
497 end_data = true; 497 end_data = true;
@@ -531,6 +531,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
531 u8 comp_codes_10g = 0; 531 u8 comp_codes_10g = 0;
532 u8 oui_bytes[3] = {0, 0, 0}; 532 u8 oui_bytes[3] = {0, 0, 0};
533 u8 cable_tech = 0; 533 u8 cable_tech = 0;
534 u8 cable_spec = 0;
534 u16 enforce_sfp = 0; 535 u16 enforce_sfp = 0;
535 536
536 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { 537 if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
@@ -580,14 +581,30 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
580 else 581 else
581 hw->phy.sfp_type = ixgbe_sfp_type_unknown; 582 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
582 } else if (hw->mac.type == ixgbe_mac_82599EB) { 583 } else if (hw->mac.type == ixgbe_mac_82599EB) {
583 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 584 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
584 if (hw->bus.lan_id == 0) 585 if (hw->bus.lan_id == 0)
585 hw->phy.sfp_type = 586 hw->phy.sfp_type =
586 ixgbe_sfp_type_da_cu_core0; 587 ixgbe_sfp_type_da_cu_core0;
587 else 588 else
588 hw->phy.sfp_type = 589 hw->phy.sfp_type =
589 ixgbe_sfp_type_da_cu_core1; 590 ixgbe_sfp_type_da_cu_core1;
590 else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 591 } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
592 hw->phy.ops.read_i2c_eeprom(
593 hw, IXGBE_SFF_CABLE_SPEC_COMP,
594 &cable_spec);
595 if (cable_spec &
596 IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
597 if (hw->bus.lan_id == 0)
598 hw->phy.sfp_type =
599 ixgbe_sfp_type_da_act_lmt_core0;
600 else
601 hw->phy.sfp_type =
602 ixgbe_sfp_type_da_act_lmt_core1;
603 } else {
604 hw->phy.sfp_type =
605 ixgbe_sfp_type_unknown;
606 }
607 } else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
591 if (hw->bus.lan_id == 0) 608 if (hw->bus.lan_id == 0)
592 hw->phy.sfp_type = 609 hw->phy.sfp_type =
593 ixgbe_sfp_type_srlr_core0; 610 ixgbe_sfp_type_srlr_core0;
@@ -637,10 +654,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
637 switch (vendor_oui) { 654 switch (vendor_oui) {
638 case IXGBE_SFF_VENDOR_OUI_TYCO: 655 case IXGBE_SFF_VENDOR_OUI_TYCO:
639 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 656 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
640 hw->phy.type = ixgbe_phy_tw_tyco; 657 hw->phy.type =
658 ixgbe_phy_sfp_passive_tyco;
641 break; 659 break;
642 case IXGBE_SFF_VENDOR_OUI_FTL: 660 case IXGBE_SFF_VENDOR_OUI_FTL:
643 hw->phy.type = ixgbe_phy_sfp_ftl; 661 if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
662 hw->phy.type = ixgbe_phy_sfp_ftl_active;
663 else
664 hw->phy.type = ixgbe_phy_sfp_ftl;
644 break; 665 break;
645 case IXGBE_SFF_VENDOR_OUI_AVAGO: 666 case IXGBE_SFF_VENDOR_OUI_AVAGO:
646 hw->phy.type = ixgbe_phy_sfp_avago; 667 hw->phy.type = ixgbe_phy_sfp_avago;
@@ -650,7 +671,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
650 break; 671 break;
651 default: 672 default:
652 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) 673 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
653 hw->phy.type = ixgbe_phy_tw_unknown; 674 hw->phy.type =
675 ixgbe_phy_sfp_passive_unknown;
676 else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
677 hw->phy.type =
678 ixgbe_phy_sfp_active_unknown;
654 else 679 else
655 hw->phy.type = ixgbe_phy_sfp_unknown; 680 hw->phy.type = ixgbe_phy_sfp_unknown;
656 break; 681 break;
@@ -658,7 +683,8 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
658 } 683 }
659 684
660 /* All passive DA cables are supported */ 685 /* All passive DA cables are supported */
661 if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { 686 if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
687 IXGBE_SFF_DA_ACTIVE_CABLE)) {
662 status = 0; 688 status = 0;
663 goto out; 689 goto out;
664 } 690 }
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index 9cf5f3b4cc5d..c9c545941407 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -40,9 +40,12 @@
40#define IXGBE_SFF_1GBE_COMP_CODES 0x6 40#define IXGBE_SFF_1GBE_COMP_CODES 0x6
41#define IXGBE_SFF_10GBE_COMP_CODES 0x3 41#define IXGBE_SFF_10GBE_COMP_CODES 0x3
42#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 42#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
43#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
43 44
44/* Bitmasks */ 45/* Bitmasks */
45#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 46#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
47#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8
48#define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4
46#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 49#define IXGBE_SFF_1GBASESX_CAPABLE 0x1
47#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 50#define IXGBE_SFF_1GBASELX_CAPABLE 0x2
48#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 51#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index d4cd20f30199..f6cee94ec8e8 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -48,7 +48,11 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
48 int entries, u16 *hash_list, u32 vf) 48 int entries, u16 *hash_list, u32 vf)
49{ 49{
50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; 50 struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
51 struct ixgbe_hw *hw = &adapter->hw;
51 int i; 52 int i;
53 u32 vector_bit;
54 u32 vector_reg;
55 u32 mta_reg;
52 56
53 /* only so many hash values supported */ 57 /* only so many hash values supported */
54 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); 58 entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -68,8 +72,13 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 vfinfo->vf_mc_hashes[i] = hash_list[i];; 72 vfinfo->vf_mc_hashes[i] = hash_list[i];;
69 } 73 }
70 74
71 /* Flush and reset the mta with the new values */ 75 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
72 ixgbe_set_rx_mode(adapter->netdev); 76 vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
77 vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
78 mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
79 mta_reg |= (1 << vector_bit);
80 IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
81 }
73 82
74 return 0; 83 return 0;
75} 84}
@@ -98,38 +107,51 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
98 107
99int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) 108int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
100{ 109{
101 u32 ctrl;
102
103 /* Check if global VLAN already set, if not set it */
104 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
105 if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
106 /* enable VLAN tag insert/strip */
107 ctrl |= IXGBE_VLNCTRL_VFE;
108 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
109 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
110 }
111
112 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); 110 return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
113} 111}
114 112
115 113
116void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf) 114void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
117{ 115{
118 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); 116 u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
119 vmolr |= (IXGBE_VMOLR_AUPE | 117 vmolr |= (IXGBE_VMOLR_ROMPE |
120 IXGBE_VMOLR_ROMPE |
121 IXGBE_VMOLR_ROPE | 118 IXGBE_VMOLR_ROPE |
122 IXGBE_VMOLR_BAM); 119 IXGBE_VMOLR_BAM);
120 if (aupe)
121 vmolr |= IXGBE_VMOLR_AUPE;
122 else
123 vmolr &= ~IXGBE_VMOLR_AUPE;
123 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); 124 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
124} 125}
125 126
127static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, u32 vid, u32 vf)
128{
129 struct ixgbe_hw *hw = &adapter->hw;
130
131 if (vid)
132 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf),
133 (vid | IXGBE_VMVIR_VLANA_DEFAULT));
134 else
135 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
136}
137
126inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) 138inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
127{ 139{
128 struct ixgbe_hw *hw = &adapter->hw; 140 struct ixgbe_hw *hw = &adapter->hw;
129 141
130 /* reset offloads to defaults */ 142 /* reset offloads to defaults */
131 ixgbe_set_vmolr(hw, vf); 143 if (adapter->vfinfo[vf].pf_vlan) {
132 144 ixgbe_set_vf_vlan(adapter, true,
145 adapter->vfinfo[vf].pf_vlan, vf);
146 ixgbe_set_vmvir(adapter,
147 (adapter->vfinfo[vf].pf_vlan |
148 (adapter->vfinfo[vf].pf_qos <<
149 VLAN_PRIO_SHIFT)), vf);
150 ixgbe_set_vmolr(hw, vf, false);
151 } else {
152 ixgbe_set_vmvir(adapter, 0, vf);
153 ixgbe_set_vmolr(hw, vf, true);
154 }
133 155
134 /* reset multicast table array for vf */ 156 /* reset multicast table array for vf */
135 adapter->vfinfo[vf].num_vf_mc_hashes = 0; 157 adapter->vfinfo[vf].num_vf_mc_hashes = 0;
@@ -263,10 +285,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
263 case IXGBE_VF_SET_MAC_ADDR: 285 case IXGBE_VF_SET_MAC_ADDR:
264 { 286 {
265 u8 *new_mac = ((u8 *)(&msgbuf[1])); 287 u8 *new_mac = ((u8 *)(&msgbuf[1]));
266 if (is_valid_ether_addr(new_mac)) 288 if (is_valid_ether_addr(new_mac) &&
289 !adapter->vfinfo[vf].pf_set_mac)
267 ixgbe_set_vf_mac(adapter, vf, new_mac); 290 ixgbe_set_vf_mac(adapter, vf, new_mac);
268 else 291 else
269 retval = -1; 292 ixgbe_set_vf_mac(adapter,
293 vf, adapter->vfinfo[vf].vf_mac_addresses);
270 } 294 }
271 break; 295 break;
272 case IXGBE_VF_SET_MULTICAST: 296 case IXGBE_VF_SET_MULTICAST:
@@ -360,3 +384,76 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
360 } 384 }
361} 385}
362 386
387int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
388{
389 struct ixgbe_adapter *adapter = netdev_priv(netdev);
390 if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs))
391 return -EINVAL;
392 adapter->vfinfo[vf].pf_set_mac = true;
393 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
394 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
395 " change effective.");
396 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
397 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
398 " but the PF device is not up.\n");
399 dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
400 " attempting to use the VF device.\n");
401 }
402 return ixgbe_set_vf_mac(adapter, vf, mac);
403}
404
405int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
406{
407 int err = 0;
408 struct ixgbe_adapter *adapter = netdev_priv(netdev);
409
410 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
411 return -EINVAL;
412 if (vlan || qos) {
413 err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
414 if (err)
415 goto out;
416 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
417 ixgbe_set_vmolr(&adapter->hw, vf, false);
418 adapter->vfinfo[vf].pf_vlan = vlan;
419 adapter->vfinfo[vf].pf_qos = qos;
420 dev_info(&adapter->pdev->dev,
421 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
422 if (test_bit(__IXGBE_DOWN, &adapter->state)) {
423 dev_warn(&adapter->pdev->dev,
424 "The VF VLAN has been set,"
425 " but the PF device is not up.\n");
426 dev_warn(&adapter->pdev->dev,
427 "Bring the PF device up before"
428 " attempting to use the VF device.\n");
429 }
430 } else {
431 err = ixgbe_set_vf_vlan(adapter, false,
432 adapter->vfinfo[vf].pf_vlan, vf);
433 ixgbe_set_vmvir(adapter, vlan, vf);
434 ixgbe_set_vmolr(&adapter->hw, vf, true);
435 adapter->vfinfo[vf].pf_vlan = 0;
436 adapter->vfinfo[vf].pf_qos = 0;
437 }
438out:
439 return err;
440}
441
442int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
443{
444 return -EOPNOTSUPP;
445}
446
447int ixgbe_ndo_get_vf_config(struct net_device *netdev,
448 int vf, struct ifla_vf_info *ivi)
449{
450 struct ixgbe_adapter *adapter = netdev_priv(netdev);
451 if (vf >= adapter->num_vfs)
452 return -EINVAL;
453 ivi->vf = vf;
454 memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
455 ivi->tx_rate = 0;
456 ivi->vlan = adapter->vfinfo[vf].pf_vlan;
457 ivi->qos = adapter->vfinfo[vf].pf_qos;
458 return 0;
459}
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
index 51d1106c45a1..184730ecdfb6 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -32,7 +32,7 @@ int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
32 int entries, u16 *hash_list, u32 vf); 32 int entries, u16 *hash_list, u32 vf);
33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); 33void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); 34int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf); 35void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe);
36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf); 36void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf); 37void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
38void ixgbe_msg_task(struct ixgbe_adapter *adapter); 38void ixgbe_msg_task(struct ixgbe_adapter *adapter);
@@ -42,6 +42,12 @@ int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); 42void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); 43void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
44void ixgbe_dump_registers(struct ixgbe_adapter *adapter); 44void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
45int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
46int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
47 u8 qos);
48int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
49int ixgbe_ndo_get_vf_config(struct net_device *netdev,
50 int vf, struct ifla_vf_info *ivi);
45 51
46#endif /* _IXGBE_SRIOV_H_ */ 52#endif /* _IXGBE_SRIOV_H_ */
47 53
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 4ec6dc1a5b75..39b9be897439 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -73,6 +73,7 @@
73/* NVM Registers */ 73/* NVM Registers */
74#define IXGBE_EEC 0x10010 74#define IXGBE_EEC 0x10010
75#define IXGBE_EERD 0x10014 75#define IXGBE_EERD 0x10014
76#define IXGBE_EEWR 0x10018
76#define IXGBE_FLA 0x1001C 77#define IXGBE_FLA 0x1001C
77#define IXGBE_EEMNGCTL 0x10110 78#define IXGBE_EEMNGCTL 0x10110
78#define IXGBE_EEMNGDATA 0x10114 79#define IXGBE_EEMNGDATA 0x10114
@@ -219,6 +220,7 @@
219#define IXGBE_MTQC 0x08120 220#define IXGBE_MTQC 0x08120
220#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ 221#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */
221#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ 222#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */
223#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */
222#define IXGBE_VT_CTL 0x051B0 224#define IXGBE_VT_CTL 0x051B0
223#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 225#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
224#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 226#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
@@ -698,6 +700,7 @@
698#define IXGBE_MREVID 0x11064 700#define IXGBE_MREVID 0x11064
699#define IXGBE_DCA_ID 0x11070 701#define IXGBE_DCA_ID 0x11070
700#define IXGBE_DCA_CTRL 0x11074 702#define IXGBE_DCA_CTRL 0x11074
703#define IXGBE_SWFW_SYNC IXGBE_GSSR
701 704
702/* PCIe registers 82599-specific */ 705/* PCIe registers 82599-specific */
703#define IXGBE_GCR_EXT 0x11050 706#define IXGBE_GCR_EXT 0x11050
@@ -1311,6 +1314,10 @@
1311#define IXGBE_VLVF_ENTRIES 64 1314#define IXGBE_VLVF_ENTRIES 64
1312#define IXGBE_VLVF_VLANID_MASK 0x00000FFF 1315#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
1313 1316
1317/* Per VF Port VLAN insertion rules */
1318#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
1319#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
1320
1314#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 1321#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
1315 1322
1316/* STATUS Bit Masks */ 1323/* STATUS Bit Masks */
@@ -1458,8 +1465,9 @@
1458#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1465#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
1459#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ 1466#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
1460#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ 1467#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */
1468#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */
1461 1469
1462/* GSSR definitions */ 1470/* SW_FW_SYNC/GSSR definitions */
1463#define IXGBE_GSSR_EEP_SM 0x0001 1471#define IXGBE_GSSR_EEP_SM 0x0001
1464#define IXGBE_GSSR_PHY0_SM 0x0002 1472#define IXGBE_GSSR_PHY0_SM 0x0002
1465#define IXGBE_GSSR_PHY1_SM 0x0004 1473#define IXGBE_GSSR_PHY1_SM 0x0004
@@ -1479,6 +1487,8 @@
1479#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ 1487#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */
1480#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1488#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1481#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1489#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1490#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1491#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1482/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1492/* EEPROM Addressing bits based on type (0-small, 1-large) */
1483#define IXGBE_EEC_ADDR_SIZE 0x00000400 1493#define IXGBE_EEC_ADDR_SIZE 0x00000400
1484#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ 1494#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */
@@ -1534,10 +1544,12 @@
1534#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ 1544#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */
1535 1545
1536/* EEPROM Read Register */ 1546/* EEPROM Read Register */
1537#define IXGBE_EEPROM_READ_REG_DATA 16 /* data offset in EEPROM read reg */ 1547#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */
1538#define IXGBE_EEPROM_READ_REG_DONE 2 /* Offset to READ done bit */ 1548#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */
1539#define IXGBE_EEPROM_READ_REG_START 1 /* First bit to start operation */ 1549#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */
1540#define IXGBE_EEPROM_READ_ADDR_SHIFT 2 /* Shift to the address bits */ 1550#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */
1551#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
1552#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
1541 1553
1542#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 1554#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
1543 1555
@@ -1545,9 +1557,15 @@
1545#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ 1557#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
1546#endif 1558#endif
1547 1559
1548#ifndef IXGBE_EERD_ATTEMPTS 1560#ifndef IXGBE_EERD_EEWR_ATTEMPTS
1549/* Number of 5 microseconds we wait for EERD read to complete */ 1561/* Number of 5 microseconds we wait for EERD read and
1550#define IXGBE_EERD_ATTEMPTS 100000 1562 * EERW write to complete */
1563#define IXGBE_EERD_EEWR_ATTEMPTS 100000
1564#endif
1565
1566#ifndef IXGBE_FLUDONE_ATTEMPTS
1567/* # attempts we wait for flush update to complete */
1568#define IXGBE_FLUDONE_ATTEMPTS 20000
1551#endif 1569#endif
1552 1570
1553#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 1571#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0
@@ -2090,6 +2108,7 @@ typedef u32 ixgbe_physical_layer;
2090#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 2108#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400
2091#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 2109#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800
2092#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2110#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2111#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2093 2112
2094/* Software ATR hash keys */ 2113/* Software ATR hash keys */
2095#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2114#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D
@@ -2159,10 +2178,12 @@ enum ixgbe_phy_type {
2159 ixgbe_phy_qt, 2178 ixgbe_phy_qt,
2160 ixgbe_phy_xaui, 2179 ixgbe_phy_xaui,
2161 ixgbe_phy_nl, 2180 ixgbe_phy_nl,
2162 ixgbe_phy_tw_tyco, 2181 ixgbe_phy_sfp_passive_tyco,
2163 ixgbe_phy_tw_unknown, 2182 ixgbe_phy_sfp_passive_unknown,
2183 ixgbe_phy_sfp_active_unknown,
2164 ixgbe_phy_sfp_avago, 2184 ixgbe_phy_sfp_avago,
2165 ixgbe_phy_sfp_ftl, 2185 ixgbe_phy_sfp_ftl,
2186 ixgbe_phy_sfp_ftl_active,
2166 ixgbe_phy_sfp_unknown, 2187 ixgbe_phy_sfp_unknown,
2167 ixgbe_phy_sfp_intel, 2188 ixgbe_phy_sfp_intel,
2168 ixgbe_phy_sfp_unsupported, 2189 ixgbe_phy_sfp_unsupported,
@@ -2190,6 +2211,8 @@ enum ixgbe_sfp_type {
2190 ixgbe_sfp_type_da_cu_core1 = 4, 2211 ixgbe_sfp_type_da_cu_core1 = 4,
2191 ixgbe_sfp_type_srlr_core0 = 5, 2212 ixgbe_sfp_type_srlr_core0 = 5,
2192 ixgbe_sfp_type_srlr_core1 = 6, 2213 ixgbe_sfp_type_srlr_core1 = 6,
2214 ixgbe_sfp_type_da_act_lmt_core0 = 7,
2215 ixgbe_sfp_type_da_act_lmt_core1 = 8,
2193 ixgbe_sfp_type_not_present = 0xFFFE, 2216 ixgbe_sfp_type_not_present = 0xFFFE,
2194 ixgbe_sfp_type_unknown = 0xFFFF 2217 ixgbe_sfp_type_unknown = 0xFFFF
2195}; 2218};
@@ -2263,6 +2286,7 @@ struct ixgbe_addr_filter_info {
2263 u32 mc_addr_in_rar_count; 2286 u32 mc_addr_in_rar_count;
2264 u32 mta_in_use; 2287 u32 mta_in_use;
2265 u32 overflow_promisc; 2288 u32 overflow_promisc;
2289 bool uc_set_promisc;
2266 bool user_set_promisc; 2290 bool user_set_promisc;
2267}; 2291};
2268 2292
@@ -2398,6 +2422,8 @@ struct ixgbe_mac_operations {
2398 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); 2422 s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
2399 2423
2400 /* Link */ 2424 /* Link */
2425 void (*disable_tx_laser)(struct ixgbe_hw *);
2426 void (*enable_tx_laser)(struct ixgbe_hw *);
2401 void (*flap_tx_laser)(struct ixgbe_hw *); 2427 void (*flap_tx_laser)(struct ixgbe_hw *);
2402 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool); 2428 s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
2403 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 2429 s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
@@ -2417,8 +2443,7 @@ struct ixgbe_mac_operations {
2417 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2443 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2418 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2444 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2419 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); 2445 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2420 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2446 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2421 ixgbe_mc_addr_itr);
2422 s32 (*enable_mc)(struct ixgbe_hw *); 2447 s32 (*enable_mc)(struct ixgbe_hw *);
2423 s32 (*disable_mc)(struct ixgbe_hw *); 2448 s32 (*disable_mc)(struct ixgbe_hw *);
2424 s32 (*clear_vfta)(struct ixgbe_hw *); 2449 s32 (*clear_vfta)(struct ixgbe_hw *);
@@ -2469,6 +2494,7 @@ struct ixgbe_mac_info {
2469 u32 mcft_size; 2494 u32 mcft_size;
2470 u32 vft_size; 2495 u32 vft_size;
2471 u32 num_rar_entries; 2496 u32 num_rar_entries;
2497 u32 rar_highwater;
2472 u32 max_tx_queues; 2498 u32 max_tx_queues;
2473 u32 max_rx_queues; 2499 u32 max_rx_queues;
2474 u32 max_msix_vectors; 2500 u32 max_msix_vectors;
@@ -2575,8 +2601,10 @@ struct ixgbe_info {
2575#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 2601#define IXGBE_ERR_SFP_NOT_SUPPORTED -19
2576#define IXGBE_ERR_SFP_NOT_PRESENT -20 2602#define IXGBE_ERR_SFP_NOT_PRESENT -20
2577#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 2603#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21
2604#define IXGBE_ERR_NO_SAN_ADDR_PTR -22
2578#define IXGBE_ERR_FDIR_REINIT_FAILED -23 2605#define IXGBE_ERR_FDIR_REINIT_FAILED -23
2579#define IXGBE_ERR_EEPROM_VERSION -24 2606#define IXGBE_ERR_EEPROM_VERSION -24
2607#define IXGBE_ERR_NO_SPACE -25
2580#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2608#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2581 2609
2582#endif /* _IXGBE_TYPE_H_ */ 2610#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index c44fdb05447a..ca2c81f49a05 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -41,11 +41,13 @@ typedef u32 ixgbe_link_speed;
41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 41#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 42#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
43 43
44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ 44#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ 45#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ 46#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
47#define IXGBE_LINKS_UP 0x40000000 47#define IXGBE_LINKS_UP 0x40000000
48#define IXGBE_LINKS_SPEED 0x20000000 48#define IXGBE_LINKS_SPEED_82599 0x30000000
49#define IXGBE_LINKS_SPEED_10G_82599 0x30000000
50#define IXGBE_LINKS_SPEED_1G_82599 0x20000000
49 51
50/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ 52/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
51#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 53#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfacc..a16cff7e54a3 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -139,15 +139,15 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
139{ 139{
140 if (tx_buffer_info->dma) { 140 if (tx_buffer_info->dma) {
141 if (tx_buffer_info->mapped_as_page) 141 if (tx_buffer_info->mapped_as_page)
142 pci_unmap_page(adapter->pdev, 142 dma_unmap_page(&adapter->pdev->dev,
143 tx_buffer_info->dma, 143 tx_buffer_info->dma,
144 tx_buffer_info->length, 144 tx_buffer_info->length,
145 PCI_DMA_TODEVICE); 145 DMA_TO_DEVICE);
146 else 146 else
147 pci_unmap_single(adapter->pdev, 147 dma_unmap_single(&adapter->pdev->dev,
148 tx_buffer_info->dma, 148 tx_buffer_info->dma,
149 tx_buffer_info->length, 149 tx_buffer_info->length,
150 PCI_DMA_TODEVICE); 150 DMA_TO_DEVICE);
151 tx_buffer_info->dma = 0; 151 tx_buffer_info->dma = 0;
152 } 152 }
153 if (tx_buffer_info->skb) { 153 if (tx_buffer_info->skb) {
@@ -416,10 +416,10 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
416 bi->page_offset ^= (PAGE_SIZE / 2); 416 bi->page_offset ^= (PAGE_SIZE / 2);
417 } 417 }
418 418
419 bi->page_dma = pci_map_page(pdev, bi->page, 419 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
420 bi->page_offset, 420 bi->page_offset,
421 (PAGE_SIZE / 2), 421 (PAGE_SIZE / 2),
422 PCI_DMA_FROMDEVICE); 422 DMA_FROM_DEVICE);
423 } 423 }
424 424
425 skb = bi->skb; 425 skb = bi->skb;
@@ -442,9 +442,9 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
442 bi->skb = skb; 442 bi->skb = skb;
443 } 443 }
444 if (!bi->dma) { 444 if (!bi->dma) {
445 bi->dma = pci_map_single(pdev, skb->data, 445 bi->dma = dma_map_single(&pdev->dev, skb->data,
446 rx_ring->rx_buf_len, 446 rx_ring->rx_buf_len,
447 PCI_DMA_FROMDEVICE); 447 DMA_FROM_DEVICE);
448 } 448 }
449 /* Refresh the desc even if buffer_addrs didn't change because 449 /* Refresh the desc even if buffer_addrs didn't change because
450 * each write-back erases this info. */ 450 * each write-back erases this info. */
@@ -536,16 +536,16 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
536 rx_buffer_info->skb = NULL; 536 rx_buffer_info->skb = NULL;
537 537
538 if (rx_buffer_info->dma) { 538 if (rx_buffer_info->dma) {
539 pci_unmap_single(pdev, rx_buffer_info->dma, 539 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
540 rx_ring->rx_buf_len, 540 rx_ring->rx_buf_len,
541 PCI_DMA_FROMDEVICE); 541 DMA_FROM_DEVICE);
542 rx_buffer_info->dma = 0; 542 rx_buffer_info->dma = 0;
543 skb_put(skb, len); 543 skb_put(skb, len);
544 } 544 }
545 545
546 if (upper_len) { 546 if (upper_len) {
547 pci_unmap_page(pdev, rx_buffer_info->page_dma, 547 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
548 PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 548 PAGE_SIZE / 2, DMA_FROM_DEVICE);
549 rx_buffer_info->page_dma = 0; 549 rx_buffer_info->page_dma = 0;
550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 550 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
551 rx_buffer_info->page, 551 rx_buffer_info->page,
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
604 * packets not getting split correctly 604 * packets not getting split correctly
605 */ 605 */
606 if (staterr & IXGBE_RXD_STAT_LB) { 606 if (staterr & IXGBE_RXD_STAT_LB) {
607 u32 header_fixup_len = skb->len - skb->data_len; 607 u32 header_fixup_len = skb_headlen(skb);
608 if (header_fixup_len < 14) 608 if (header_fixup_len < 14)
609 skb_push(skb, header_fixup_len); 609 skb_push(skb, header_fixup_len);
610 } 610 }
611 skb->protocol = eth_type_trans(skb, adapter->netdev); 611 skb->protocol = eth_type_trans(skb, adapter->netdev);
612 612
613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
614 adapter->netdev->last_rx = jiffies;
615 614
616next_desc: 615next_desc:
617 rx_desc->wb.upper.status_error = 0; 616 rx_desc->wb.upper.status_error = 0;
@@ -947,8 +946,6 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
947 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 946 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
948 ixgbevf_write_eitr(adapter, v_idx, itr_reg); 947 ixgbevf_write_eitr(adapter, v_idx, itr_reg);
949 } 948 }
950
951 return;
952} 949}
953 950
954static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) 951static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
@@ -962,12 +959,28 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
962 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS); 959 eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
963 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr); 960 IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
964 961
962 if (!hw->mbx.ops.check_for_ack(hw)) {
963 /*
964 * checking for the ack clears the PFACK bit. Place
965 * it back in the v2p_mailbox cache so that anyone
966 * polling for an ack will not miss it. Also
967 * avoid the read below because the code to read
968 * the mailbox will also clear the ack bit. This was
969 * causing lost acks. Just cache the bit and exit
970 * the IRQ handler.
971 */
972 hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
973 goto out;
974 }
975
976 /* Not an ack interrupt, go ahead and read the message */
965 hw->mbx.ops.read(hw, &msg, 1); 977 hw->mbx.ops.read(hw, &msg, 1);
966 978
967 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG) 979 if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
968 mod_timer(&adapter->watchdog_timer, 980 mod_timer(&adapter->watchdog_timer,
969 round_jiffies(jiffies + 1)); 981 round_jiffies(jiffies + 1));
970 982
983out:
971 return IRQ_HANDLED; 984 return IRQ_HANDLED;
972} 985}
973 986
@@ -1496,22 +1509,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1496 } 1509 }
1497} 1510}
1498 1511
1499static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1500 u32 *vmdq)
1501{
1502 struct dev_mc_list *mc_ptr;
1503 u8 *addr = *mc_addr_ptr;
1504 *vmdq = 0;
1505
1506 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1507 if (mc_ptr->next)
1508 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1509 else
1510 *mc_addr_ptr = NULL;
1511
1512 return addr;
1513}
1514
1515/** 1512/**
1516 * ixgbevf_set_rx_mode - Multicast set 1513 * ixgbevf_set_rx_mode - Multicast set
1517 * @netdev: network interface device structure 1514 * @netdev: network interface device structure
@@ -1524,16 +1521,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524{ 1521{
1525 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1522 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1526 struct ixgbe_hw *hw = &adapter->hw; 1523 struct ixgbe_hw *hw = &adapter->hw;
1527 u8 *addr_list = NULL;
1528 int addr_count = 0;
1529 1524
1530 /* reprogram multicast list */ 1525 /* reprogram multicast list */
1531 addr_count = netdev_mc_count(netdev);
1532 if (addr_count)
1533 addr_list = netdev->mc_list->dmi_addr;
1534 if (hw->mac.ops.update_mc_addr_list) 1526 if (hw->mac.ops.update_mc_addr_list)
1535 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 1527 hw->mac.ops.update_mc_addr_list(hw, netdev);
1536 ixgbevf_addr_list_itr);
1537} 1528}
1538 1529
1539static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1530static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -1744,9 +1735,9 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1744 1735
1745 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 1736 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1746 if (rx_buffer_info->dma) { 1737 if (rx_buffer_info->dma) {
1747 pci_unmap_single(pdev, rx_buffer_info->dma, 1738 dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
1748 rx_ring->rx_buf_len, 1739 rx_ring->rx_buf_len,
1749 PCI_DMA_FROMDEVICE); 1740 DMA_FROM_DEVICE);
1750 rx_buffer_info->dma = 0; 1741 rx_buffer_info->dma = 0;
1751 } 1742 }
1752 if (rx_buffer_info->skb) { 1743 if (rx_buffer_info->skb) {
@@ -1760,8 +1751,8 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
1760 } 1751 }
1761 if (!rx_buffer_info->page) 1752 if (!rx_buffer_info->page)
1762 continue; 1753 continue;
1763 pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 1754 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
1764 PCI_DMA_FROMDEVICE); 1755 PAGE_SIZE / 2, DMA_FROM_DEVICE);
1765 rx_buffer_info->page_dma = 0; 1756 rx_buffer_info->page_dma = 0;
1766 put_page(rx_buffer_info->page); 1757 put_page(rx_buffer_info->page);
1767 rx_buffer_info->page = NULL; 1758 rx_buffer_info->page = NULL;
@@ -2158,8 +2149,6 @@ static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
2158 pci_disable_msix(adapter->pdev); 2149 pci_disable_msix(adapter->pdev);
2159 kfree(adapter->msix_entries); 2150 kfree(adapter->msix_entries);
2160 adapter->msix_entries = NULL; 2151 adapter->msix_entries = NULL;
2161
2162 return;
2163} 2152}
2164 2153
2165/** 2154/**
@@ -2418,9 +2407,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2418 2407
2419 if (link_up) { 2408 if (link_up) {
2420 if (!netif_carrier_ok(netdev)) { 2409 if (!netif_carrier_ok(netdev)) {
2421 hw_dbg(&adapter->hw, "NIC Link is Up %s, ", 2410 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2422 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2411 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2423 "10 Gbps\n" : "1 Gbps\n")); 2412 10 : 1);
2424 netif_carrier_on(netdev); 2413 netif_carrier_on(netdev);
2425 netif_tx_wake_all_queues(netdev); 2414 netif_tx_wake_all_queues(netdev);
2426 } else { 2415 } else {
@@ -2468,7 +2457,8 @@ void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
2468 vfree(tx_ring->tx_buffer_info); 2457 vfree(tx_ring->tx_buffer_info);
2469 tx_ring->tx_buffer_info = NULL; 2458 tx_ring->tx_buffer_info = NULL;
2470 2459
2471 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2460 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2461 tx_ring->dma);
2472 2462
2473 tx_ring->desc = NULL; 2463 tx_ring->desc = NULL;
2474} 2464}
@@ -2513,8 +2503,8 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2513 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2503 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
2514 tx_ring->size = ALIGN(tx_ring->size, 4096); 2504 tx_ring->size = ALIGN(tx_ring->size, 4096);
2515 2505
2516 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 2506 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
2517 &tx_ring->dma); 2507 &tx_ring->dma, GFP_KERNEL);
2518 if (!tx_ring->desc) 2508 if (!tx_ring->desc)
2519 goto err; 2509 goto err;
2520 2510
@@ -2584,8 +2574,8 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2584 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2574 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
2585 rx_ring->size = ALIGN(rx_ring->size, 4096); 2575 rx_ring->size = ALIGN(rx_ring->size, 4096);
2586 2576
2587 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 2577 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
2588 &rx_ring->dma); 2578 &rx_ring->dma, GFP_KERNEL);
2589 2579
2590 if (!rx_ring->desc) { 2580 if (!rx_ring->desc) {
2591 hw_dbg(&adapter->hw, 2581 hw_dbg(&adapter->hw,
@@ -2646,7 +2636,8 @@ void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
2646 vfree(rx_ring->rx_buffer_info); 2636 vfree(rx_ring->rx_buffer_info);
2647 rx_ring->rx_buffer_info = NULL; 2637 rx_ring->rx_buffer_info = NULL;
2648 2638
2649 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2639 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2640 rx_ring->dma);
2650 2641
2651 rx_ring->desc = NULL; 2642 rx_ring->desc = NULL;
2652} 2643}
@@ -2958,10 +2949,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2958 2949
2959 tx_buffer_info->length = size; 2950 tx_buffer_info->length = size;
2960 tx_buffer_info->mapped_as_page = false; 2951 tx_buffer_info->mapped_as_page = false;
2961 tx_buffer_info->dma = pci_map_single(adapter->pdev, 2952 tx_buffer_info->dma = dma_map_single(&adapter->pdev->dev,
2962 skb->data + offset, 2953 skb->data + offset,
2963 size, PCI_DMA_TODEVICE); 2954 size, DMA_TO_DEVICE);
2964 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2955 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2965 goto dma_error; 2956 goto dma_error;
2966 tx_buffer_info->time_stamp = jiffies; 2957 tx_buffer_info->time_stamp = jiffies;
2967 tx_buffer_info->next_to_watch = i; 2958 tx_buffer_info->next_to_watch = i;
@@ -2987,13 +2978,13 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
2987 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); 2978 size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
2988 2979
2989 tx_buffer_info->length = size; 2980 tx_buffer_info->length = size;
2990 tx_buffer_info->dma = pci_map_page(adapter->pdev, 2981 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
2991 frag->page, 2982 frag->page,
2992 offset, 2983 offset,
2993 size, 2984 size,
2994 PCI_DMA_TODEVICE); 2985 DMA_TO_DEVICE);
2995 tx_buffer_info->mapped_as_page = true; 2986 tx_buffer_info->mapped_as_page = true;
2996 if (pci_dma_mapping_error(pdev, tx_buffer_info->dma)) 2987 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
2997 goto dma_error; 2988 goto dma_error;
2998 tx_buffer_info->time_stamp = jiffies; 2989 tx_buffer_info->time_stamp = jiffies;
2999 tx_buffer_info->next_to_watch = i; 2990 tx_buffer_info->next_to_watch = i;
@@ -3189,8 +3180,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
3189 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first), 3180 ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
3190 skb->len, hdr_len); 3181 skb->len, hdr_len);
3191 3182
3192 netdev->trans_start = jiffies;
3193
3194 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED); 3183 ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
3195 3184
3196 return NETDEV_TX_OK; 3185 return NETDEV_TX_OK;
@@ -3334,14 +3323,14 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3334 if (err) 3323 if (err)
3335 return err; 3324 return err;
3336 3325
3337 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && 3326 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
3338 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { 3327 !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
3339 pci_using_dac = 1; 3328 pci_using_dac = 1;
3340 } else { 3329 } else {
3341 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3330 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3342 if (err) { 3331 if (err) {
3343 err = pci_set_consistent_dma_mask(pdev, 3332 err = dma_set_coherent_mask(&pdev->dev,
3344 DMA_BIT_MASK(32)); 3333 DMA_BIT_MASK(32));
3345 if (err) { 3334 if (err) {
3346 dev_err(&pdev->dev, "No usable DMA " 3335 dev_err(&pdev->dev, "No usable DMA "
3347 "configuration, aborting\n"); 3336 "configuration, aborting\n");
@@ -3482,7 +3471,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3482 3471
3483 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3472 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3484 3473
3485 hw_dbg(hw, "LRO is disabled \n"); 3474 hw_dbg(hw, "LRO is disabled\n");
3486 3475
3487 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3476 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3488 cards_found++; 3477 cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0ec140..f6f929958ba0 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
252/** 252/**
253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses 253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
254 * @hw: pointer to the HW structure 254 * @hw: pointer to the HW structure
255 * @mc_addr_list: array of multicast addresses to program 255 * @netdev: pointer to net device structure
256 * @mc_addr_count: number of multicast addresses to program
257 * @next: caller supplied function to return next address in list
258 * 256 *
259 * Updates the Multicast Table Array. 257 * Updates the Multicast Table Array.
260 **/ 258 **/
261static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, 259static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
262 u32 mc_addr_count, 260 struct net_device *netdev)
263 ixgbe_mc_addr_itr next)
264{ 261{
262 struct netdev_hw_addr *ha;
265 struct ixgbe_mbx_info *mbx = &hw->mbx; 263 struct ixgbe_mbx_info *mbx = &hw->mbx;
266 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 264 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
267 u16 *vector_list = (u16 *)&msgbuf[1]; 265 u16 *vector_list = (u16 *)&msgbuf[1];
268 u32 vector;
269 u32 cnt, i; 266 u32 cnt, i;
270 u32 vmdq;
271 267
272 /* Each entry in the list uses 1 16 bit word. We have 30 268 /* Each entry in the list uses 1 16 bit word. We have 30
273 * 16 bit words available in our HW msg buffer (minus 1 for the 269 * 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
278 * addresses except for in large enterprise network environments. 274 * addresses except for in large enterprise network environments.
279 */ 275 */
280 276
281 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; 277 cnt = netdev_mc_count(netdev);
278 if (cnt > 30)
279 cnt = 30;
282 msgbuf[0] = IXGBE_VF_SET_MULTICAST; 280 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
283 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; 281 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
284 282
285 for (i = 0; i < cnt; i++) { 283 i = 0;
286 vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); 284 netdev_for_each_mc_addr(ha, netdev) {
287 vector_list[i] = vector; 285 if (i == cnt)
286 break;
287 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
288 } 288 }
289 289
290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); 290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
@@ -359,7 +359,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
359 else 359 else
360 *link_up = false; 360 *link_up = false;
361 361
362 if (links_reg & IXGBE_LINKS_SPEED) 362 if ((links_reg & IXGBE_LINKS_SPEED_82599) ==
363 IXGBE_LINKS_SPEED_10G_82599)
363 *speed = IXGBE_LINK_SPEED_10GB_FULL; 364 *speed = IXGBE_LINK_SPEED_10GB_FULL;
364 else 365 else
365 *speed = IXGBE_LINK_SPEED_1GB_FULL; 366 *speed = IXGBE_LINK_SPEED_1GB_FULL;
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b052d4b4..94b750b8874f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/netdevice.h>
35 36
36#include "defines.h" 37#include "defines.h"
37#include "regs.h" 38#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
62 /* RAR, Multicast, VLAN */ 63 /* RAR, Multicast, VLAN */
63 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); 64 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
64 s32 (*init_rx_addrs)(struct ixgbe_hw *); 65 s32 (*init_rx_addrs)(struct ixgbe_hw *);
65 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 66 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
66 ixgbe_mc_addr_itr);
67 s32 (*enable_mc)(struct ixgbe_hw *); 67 s32 (*enable_mc)(struct ixgbe_hw *);
68 s32 (*disable_mc)(struct ixgbe_hw *); 68 s32 (*disable_mc)(struct ixgbe_hw *);
69 s32 (*clear_vfta)(struct ixgbe_hw *); 69 s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index d5932ca3e27d..78ddd8b79e7e 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -64,8 +64,6 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
64 ixp2000_reg_write(RING_TX_PENDING, 64 ixp2000_reg_write(RING_TX_PENDING,
65 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc))); 65 TX_BUF_DESC_BASE + (entry * sizeof(struct ixpdev_tx_desc)));
66 66
67 dev->trans_start = jiffies;
68
69 local_irq_save(flags); 67 local_irq_save(flags);
70 ip->tx_queue_entries++; 68 ip->tx_queue_entries++;
71 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) 69 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3a53a7..99f24f5cac53 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -103,8 +103,6 @@ jme_mdio_write(struct net_device *netdev,
103 103
104 if (i == 0) 104 if (i == 0)
105 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg); 105 jeprintk(jme->pdev, "phy(%d) write timeout : %d\n", phy, reg);
106
107 return;
108} 106}
109 107
110static inline void 108static inline void
@@ -130,8 +128,6 @@ jme_reset_phy_processor(struct jme_adapter *jme)
130 jme_mdio_write(jme->dev, 128 jme_mdio_write(jme->dev,
131 jme->mii_if.phy_id, 129 jme->mii_if.phy_id,
132 MII_BMCR, val | BMCR_RESET); 130 MII_BMCR, val | BMCR_RESET);
133
134 return;
135} 131}
136 132
137static void 133static void
@@ -2010,12 +2006,12 @@ jme_set_multi(struct net_device *netdev)
2010 } else if (netdev->flags & IFF_ALLMULTI) { 2006 } else if (netdev->flags & IFF_ALLMULTI) {
2011 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2007 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2012 } else if (netdev->flags & IFF_MULTICAST) { 2008 } else if (netdev->flags & IFF_MULTICAST) {
2013 struct dev_mc_list *mclist; 2009 struct netdev_hw_addr *ha;
2014 int bit_nr; 2010 int bit_nr;
2015 2011
2016 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2012 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2017 netdev_for_each_mc_addr(mclist, netdev) { 2013 netdev_for_each_mc_addr(ha, netdev) {
2018 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2014 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2019 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2015 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2020 } 2016 }
2021 2017
@@ -2839,7 +2835,7 @@ jme_init_one(struct pci_dev *pdev,
2839 default: 2835 default:
2840 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B; 2836 jme->reg_txcs = TXCS_DEFAULT | TXCS_DMASIZE_512B;
2841 break; 2837 break;
2842 }; 2838 }
2843 2839
2844 /* 2840 /*
2845 * Must check before reset_mac_processor 2841 * Must check before reset_mac_processor
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c2249812d..26bf1b76b997 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
482{ 482{
483 struct korina_private *lp = netdev_priv(dev); 483 struct korina_private *lp = netdev_priv(dev);
484 unsigned long flags; 484 unsigned long flags;
485 struct dev_mc_list *dmi; 485 struct netdev_hw_addr *ha;
486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ 486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
487 int i; 487 int i;
488 488
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
502 for (i = 0; i < 4; i++) 502 for (i = 0; i < 4; i++)
503 hash_table[i] = 0; 503 hash_table[i] = 0;
504 504
505 netdev_for_each_mc_addr(dmi, dev) { 505 netdev_for_each_mc_addr(ha, dev) {
506 char *addrs = dmi->dmi_addr; 506 char *addrs = ha->addr;
507 507
508 if (!(*addrs & 1)) 508 if (!(*addrs & 1))
509 continue; 509 continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
1135 1135
1136 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); 1136 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1137 dev->base_addr = r->start; 1137 dev->base_addr = r->start;
1138 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start); 1138 lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1139 if (!lp->eth_regs) { 1139 if (!lp->eth_regs) {
1140 printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); 1140 printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1141 rc = -ENXIO; 1141 rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
1143 } 1143 }
1144 1144
1145 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); 1145 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1146 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1146 lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1147 if (!lp->rx_dma_regs) { 1147 if (!lp->rx_dma_regs) {
1148 printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); 1148 printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1149 rc = -ENXIO; 1149 rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
1151 } 1151 }
1152 1152
1153 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); 1153 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1154 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1154 lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1155 if (!lp->tx_dma_regs) { 1155 if (!lp->tx_dma_regs) {
1156 printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); 1156 printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1157 rc = -ENXIO; 1157 rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb58d023..f852ab3ae9cf 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * ks8842_main.c timberdale KS8842 ethernet driver 2 * ks8842.c timberdale KS8842 ethernet driver
3 * Copyright (c) 2009 Intel Corporation 3 * Copyright (c) 2009 Intel Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
@@ -20,12 +20,15 @@
20 * The Micrel KS8842 behind the timberdale FPGA 20 * The Micrel KS8842 behind the timberdale FPGA
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/kernel.h> 25#include <linux/kernel.h>
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/platform_device.h> 27#include <linux/platform_device.h>
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
27#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
28#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/ks8842.h>
29 32
30#define DRV_NAME "ks8842" 33#define DRV_NAME "ks8842"
31 34
@@ -302,6 +305,20 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
302 ks8842_write16(adapter, 39, mac, REG_MACAR3); 305 ks8842_write16(adapter, 39, mac, REG_MACAR3);
303} 306}
304 307
308static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
309{
310 unsigned long flags;
311 unsigned i;
312
313 spin_lock_irqsave(&adapter->lock, flags);
314 for (i = 0; i < ETH_ALEN; i++) {
315 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
316 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
317 REG_MACAR1 + i);
318 }
319 spin_unlock_irqrestore(&adapter->lock, flags);
320}
321
305static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter) 322static inline u16 ks8842_tx_fifo_space(struct ks8842_adapter *adapter)
306{ 323{
307 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff; 324 return ks8842_read16(adapter, 16, REG_TXMIR) & 0x1fff;
@@ -520,13 +537,14 @@ static int ks8842_open(struct net_device *netdev)
520 /* reset the HW */ 537 /* reset the HW */
521 ks8842_reset_hw(adapter); 538 ks8842_reset_hw(adapter);
522 539
540 ks8842_write_mac_addr(adapter, netdev->dev_addr);
541
523 ks8842_update_link_status(netdev, adapter); 542 ks8842_update_link_status(netdev, adapter);
524 543
525 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 544 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
526 adapter); 545 adapter);
527 if (err) { 546 if (err) {
528 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 547 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
529 adapter->irq, err);
530 return err; 548 return err;
531 } 549 }
532 550
@@ -567,10 +585,8 @@ static netdev_tx_t ks8842_xmit_frame(struct sk_buff *skb,
567static int ks8842_set_mac(struct net_device *netdev, void *p) 585static int ks8842_set_mac(struct net_device *netdev, void *p)
568{ 586{
569 struct ks8842_adapter *adapter = netdev_priv(netdev); 587 struct ks8842_adapter *adapter = netdev_priv(netdev);
570 unsigned long flags;
571 struct sockaddr *addr = p; 588 struct sockaddr *addr = p;
572 char *mac = (u8 *)addr->sa_data; 589 char *mac = (u8 *)addr->sa_data;
573 int i;
574 590
575 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__); 591 dev_dbg(&adapter->pdev->dev, "%s: entry\n", __func__);
576 592
@@ -579,13 +595,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
579 595
580 memcpy(netdev->dev_addr, mac, netdev->addr_len); 596 memcpy(netdev->dev_addr, mac, netdev->addr_len);
581 597
582 spin_lock_irqsave(&adapter->lock, flags); 598 ks8842_write_mac_addr(adapter, mac);
583 for (i = 0; i < ETH_ALEN; i++) {
584 ks8842_write8(adapter, 2, mac[ETH_ALEN - i - 1], REG_MARL + i);
585 ks8842_write8(adapter, 39, mac[ETH_ALEN - i - 1],
586 REG_MACAR1 + i);
587 }
588 spin_unlock_irqrestore(&adapter->lock, flags);
589 return 0; 599 return 0;
590} 600}
591 601
@@ -604,6 +614,8 @@ static void ks8842_tx_timeout(struct net_device *netdev)
604 614
605 ks8842_reset_hw(adapter); 615 ks8842_reset_hw(adapter);
606 616
617 ks8842_write_mac_addr(adapter, netdev->dev_addr);
618
607 ks8842_update_link_status(netdev, adapter); 619 ks8842_update_link_status(netdev, adapter);
608} 620}
609 621
@@ -626,7 +638,9 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
626 struct resource *iomem; 638 struct resource *iomem;
627 struct net_device *netdev; 639 struct net_device *netdev;
628 struct ks8842_adapter *adapter; 640 struct ks8842_adapter *adapter;
641 struct ks8842_platform_data *pdata = pdev->dev.platform_data;
629 u16 id; 642 u16 id;
643 unsigned i;
630 644
631 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 645 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
632 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME)) 646 if (!request_mem_region(iomem->start, resource_size(iomem), DRV_NAME))
@@ -657,7 +671,25 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
657 netdev->netdev_ops = &ks8842_netdev_ops; 671 netdev->netdev_ops = &ks8842_netdev_ops;
658 netdev->ethtool_ops = &ks8842_ethtool_ops; 672 netdev->ethtool_ops = &ks8842_ethtool_ops;
659 673
660 ks8842_read_mac_addr(adapter, netdev->dev_addr); 674 /* Check if a mac address was given */
675 i = netdev->addr_len;
676 if (pdata) {
677 for (i = 0; i < netdev->addr_len; i++)
678 if (pdata->macaddr[i] != 0)
679 break;
680
681 if (i < netdev->addr_len)
682 /* an address was passed, use it */
683 memcpy(netdev->dev_addr, pdata->macaddr,
684 netdev->addr_len);
685 }
686
687 if (i == netdev->addr_len) {
688 ks8842_read_mac_addr(adapter, netdev->dev_addr);
689
690 if (!is_valid_ether_addr(netdev->dev_addr))
691 random_ether_addr(netdev->dev_addr);
692 }
661 693
662 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE); 694 id = ks8842_read16(adapter, 32, REG_SW_ID_AND_ENABLE);
663 695
@@ -668,8 +700,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
668 700
669 platform_set_drvdata(pdev, netdev); 701 platform_set_drvdata(pdev, netdev);
670 702
671 printk(KERN_INFO DRV_NAME 703 pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
672 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
673 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); 704 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
674 705
675 return 0; 706 return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 13cc1ca261d9..b4fb07a6f13f 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#define DEBUG 14#define DEBUG
13 15
14#include <linux/module.h> 16#include <linux/module.h>
@@ -76,7 +78,9 @@ union ks8851_tx_hdr {
76 * @msg_enable: The message flags controlling driver output (see ethtool). 78 * @msg_enable: The message flags controlling driver output (see ethtool).
77 * @fid: Incrementing frame id tag. 79 * @fid: Incrementing frame id tag.
78 * @rc_ier: Cached copy of KS_IER. 80 * @rc_ier: Cached copy of KS_IER.
81 * @rc_ccr: Cached copy of KS_CCR.
79 * @rc_rxqcr: Cached copy of KS_RXQCR. 82 * @rc_rxqcr: Cached copy of KS_RXQCR.
83 * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
80 * 84 *
81 * The @lock ensures that the chip is protected when certain operations are 85 * The @lock ensures that the chip is protected when certain operations are
82 * in progress. When the read or write packet transfer is in progress, most 86 * in progress. When the read or write packet transfer is in progress, most
@@ -107,6 +111,8 @@ struct ks8851_net {
107 111
108 u16 rc_ier; 112 u16 rc_ier;
109 u16 rc_rxqcr; 113 u16 rc_rxqcr;
114 u16 rc_ccr;
115 u16 eeprom_size;
110 116
111 struct mii_if_info mii; 117 struct mii_if_info mii;
112 struct ks8851_rxctrl rxctrl; 118 struct ks8851_rxctrl rxctrl;
@@ -125,11 +131,6 @@ struct ks8851_net {
125 131
126static int msg_enable; 132static int msg_enable;
127 133
128#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
129#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
130#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
131#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
132
133/* shift for byte-enable data */ 134/* shift for byte-enable data */
134#define BYTE_EN(_x) ((_x) << 2) 135#define BYTE_EN(_x) ((_x) << 2)
135 136
@@ -167,7 +168,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
167 168
168 ret = spi_sync(ks->spidev, msg); 169 ret = spi_sync(ks->spidev, msg);
169 if (ret < 0) 170 if (ret < 0)
170 ks_err(ks, "spi_sync() failed\n"); 171 netdev_err(ks->netdev, "spi_sync() failed\n");
171} 172}
172 173
173/** 174/**
@@ -197,7 +198,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
197 198
198 ret = spi_sync(ks->spidev, msg); 199 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0) 200 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n"); 201 netdev_err(ks->netdev, "spi_sync() failed\n");
201} 202}
202 203
203/** 204/**
@@ -263,7 +264,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
263 264
264 ret = spi_sync(ks->spidev, msg); 265 ret = spi_sync(ks->spidev, msg);
265 if (ret < 0) 266 if (ret < 0)
266 ks_err(ks, "read: spi_sync() failed\n"); 267 netdev_err(ks->netdev, "read: spi_sync() failed\n");
267 else if (ks8851_rx_1msg(ks)) 268 else if (ks8851_rx_1msg(ks))
268 memcpy(rxb, trx + 2, rxl); 269 memcpy(rxb, trx + 2, rxl);
269 else 270 else
@@ -417,8 +418,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
417 u8 txb[1]; 418 u8 txb[1];
418 int ret; 419 int ret;
419 420
420 if (netif_msg_rx_status(ks)) 421 netif_dbg(ks, rx_status, ks->netdev,
421 ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff); 422 "%s: %d@%p\n", __func__, len, buff);
422 423
423 /* set the operation we're issuing */ 424 /* set the operation we're issuing */
424 txb[0] = KS_SPIOP_RXFIFO; 425 txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +435,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
434 435
435 ret = spi_sync(ks->spidev, msg); 436 ret = spi_sync(ks->spidev, msg);
436 if (ret < 0) 437 if (ret < 0)
437 ks_err(ks, "%s: spi_sync() failed\n", __func__); 438 netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
438} 439}
439 440
440/** 441/**
@@ -446,10 +447,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
446*/ 447*/
447static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) 448static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
448{ 449{
449 ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 450 netdev_dbg(ks->netdev,
450 rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], 451 "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
451 rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], 452 rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
452 rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); 453 rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
454 rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
453} 455}
454 456
455/** 457/**
@@ -471,8 +473,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
471 473
472 rxfc = ks8851_rdreg8(ks, KS_RXFC); 474 rxfc = ks8851_rdreg8(ks, KS_RXFC);
473 475
474 if (netif_msg_rx_status(ks)) 476 netif_dbg(ks, rx_status, ks->netdev,
475 ks_dbg(ks, "%s: %d packets\n", __func__, rxfc); 477 "%s: %d packets\n", __func__, rxfc);
476 478
477 /* Currently we're issuing a read per packet, but we could possibly 479 /* Currently we're issuing a read per packet, but we could possibly
478 * improve the code by issuing a single read, getting the receive 480 * improve the code by issuing a single read, getting the receive
@@ -489,9 +491,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
489 rxstat = rxh & 0xffff; 491 rxstat = rxh & 0xffff;
490 rxlen = rxh >> 16; 492 rxlen = rxh >> 16;
491 493
492 if (netif_msg_rx_status(ks)) 494 netif_dbg(ks, rx_status, ks->netdev,
493 ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n", 495 "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
494 rxstat, rxlen);
495 496
496 /* the length of the packet includes the 32bit CRC */ 497 /* the length of the packet includes the 32bit CRC */
497 498
@@ -553,9 +554,8 @@ static void ks8851_irq_work(struct work_struct *work)
553 554
554 status = ks8851_rdreg16(ks, KS_ISR); 555 status = ks8851_rdreg16(ks, KS_ISR);
555 556
556 if (netif_msg_intr(ks)) 557 netif_dbg(ks, intr, ks->netdev,
557 dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n", 558 "%s: status 0x%04x\n", __func__, status);
558 __func__, status);
559 559
560 if (status & IRQ_LCI) { 560 if (status & IRQ_LCI) {
561 /* should do something about checking link status */ 561 /* should do something about checking link status */
@@ -582,8 +582,8 @@ static void ks8851_irq_work(struct work_struct *work)
582 * system */ 582 * system */
583 ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); 583 ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
584 584
585 if (netif_msg_intr(ks)) 585 netif_dbg(ks, intr, ks->netdev,
586 ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space); 586 "%s: txspace %d\n", __func__, ks->tx_space);
587 } 587 }
588 588
589 if (status & IRQ_RXI) 589 if (status & IRQ_RXI)
@@ -659,9 +659,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
659 unsigned fid = 0; 659 unsigned fid = 0;
660 int ret; 660 int ret;
661 661
662 if (netif_msg_tx_queued(ks)) 662 netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
663 dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n", 663 __func__, txp, txp->len, txp->data, irq);
664 __func__, txp, txp->len, txp->data, irq);
665 664
666 fid = ks->fid++; 665 fid = ks->fid++;
667 fid &= TXFR_TXFID_MASK; 666 fid &= TXFR_TXFID_MASK;
@@ -685,7 +684,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
685 684
686 ret = spi_sync(ks->spidev, msg); 685 ret = spi_sync(ks->spidev, msg);
687 if (ret < 0) 686 if (ret < 0)
688 ks_err(ks, "%s: spi_sync() failed\n", __func__); 687 netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
689} 688}
690 689
691/** 690/**
@@ -722,12 +721,14 @@ static void ks8851_tx_work(struct work_struct *work)
722 txb = skb_dequeue(&ks->txq); 721 txb = skb_dequeue(&ks->txq);
723 last = skb_queue_empty(&ks->txq); 722 last = skb_queue_empty(&ks->txq);
724 723
725 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA); 724 if (txb != NULL) {
726 ks8851_wrpkt(ks, txb, last); 725 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
727 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 726 ks8851_wrpkt(ks, txb, last);
728 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE); 727 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
728 ks8851_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
729 729
730 ks8851_done_tx(ks, txb); 730 ks8851_done_tx(ks, txb);
731 }
731 } 732 }
732 733
733 mutex_unlock(&ks->lock); 734 mutex_unlock(&ks->lock);
@@ -744,8 +745,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
744{ 745{
745 unsigned pmecr; 746 unsigned pmecr;
746 747
747 if (netif_msg_hw(ks)) 748 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
748 ks_dbg(ks, "setting power mode %d\n", pwrmode);
749 749
750 pmecr = ks8851_rdreg16(ks, KS_PMECR); 750 pmecr = ks8851_rdreg16(ks, KS_PMECR);
751 pmecr &= ~PMECR_PM_MASK; 751 pmecr &= ~PMECR_PM_MASK;
@@ -769,8 +769,7 @@ static int ks8851_net_open(struct net_device *dev)
769 * else at the moment */ 769 * else at the moment */
770 mutex_lock(&ks->lock); 770 mutex_lock(&ks->lock);
771 771
772 if (netif_msg_ifup(ks)) 772 netif_dbg(ks, ifup, ks->netdev, "opening\n");
773 ks_dbg(ks, "opening %s\n", dev->name);
774 773
775 /* bring chip out of any power saving mode it was in */ 774 /* bring chip out of any power saving mode it was in */
776 ks8851_set_powermode(ks, PMECR_PM_NORMAL); 775 ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -826,8 +825,7 @@ static int ks8851_net_open(struct net_device *dev)
826 825
827 netif_start_queue(ks->netdev); 826 netif_start_queue(ks->netdev);
828 827
829 if (netif_msg_ifup(ks)) 828 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
830 ks_dbg(ks, "network device %s up\n", dev->name);
831 829
832 mutex_unlock(&ks->lock); 830 mutex_unlock(&ks->lock);
833 return 0; 831 return 0;
@@ -845,8 +843,7 @@ static int ks8851_net_stop(struct net_device *dev)
845{ 843{
846 struct ks8851_net *ks = netdev_priv(dev); 844 struct ks8851_net *ks = netdev_priv(dev);
847 845
848 if (netif_msg_ifdown(ks)) 846 netif_info(ks, ifdown, dev, "shutting down\n");
849 ks_info(ks, "%s: shutting down\n", dev->name);
850 847
851 netif_stop_queue(dev); 848 netif_stop_queue(dev);
852 849
@@ -874,8 +871,8 @@ static int ks8851_net_stop(struct net_device *dev)
874 while (!skb_queue_empty(&ks->txq)) { 871 while (!skb_queue_empty(&ks->txq)) {
875 struct sk_buff *txb = skb_dequeue(&ks->txq); 872 struct sk_buff *txb = skb_dequeue(&ks->txq);
876 873
877 if (netif_msg_ifdown(ks)) 874 netif_dbg(ks, ifdown, ks->netdev,
878 ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb); 875 "%s: freeing txb %p\n", __func__, txb);
879 876
880 dev_kfree_skb(txb); 877 dev_kfree_skb(txb);
881 } 878 }
@@ -904,9 +901,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
904 unsigned needed = calc_txlen(skb->len); 901 unsigned needed = calc_txlen(skb->len);
905 netdev_tx_t ret = NETDEV_TX_OK; 902 netdev_tx_t ret = NETDEV_TX_OK;
906 903
907 if (netif_msg_tx_queued(ks)) 904 netif_dbg(ks, tx_queued, ks->netdev,
908 ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__, 905 "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
909 skb, skb->len, skb->data);
910 906
911 spin_lock(&ks->statelock); 907 spin_lock(&ks->statelock);
912 908
@@ -966,13 +962,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
966 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | 962 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
967 RXCR1_RXPAFMA | RXCR1_RXMAFMA); 963 RXCR1_RXPAFMA | RXCR1_RXMAFMA);
968 } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { 964 } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
969 struct dev_mc_list *mcptr; 965 struct netdev_hw_addr *ha;
970 u32 crc; 966 u32 crc;
971 967
972 /* accept some multicast */ 968 /* accept some multicast */
973 969
974 netdev_for_each_mc_addr(mcptr, dev) { 970 netdev_for_each_mc_addr(ha, dev) {
975 crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); 971 crc = ether_crc(ETH_ALEN, ha->addr);
976 crc >>= (32 - 6); /* get top six bits */ 972 crc >>= (32 - 6); /* get top six bits */
977 973
978 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); 974 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1038,6 +1034,234 @@ static const struct net_device_ops ks8851_netdev_ops = {
1038 .ndo_validate_addr = eth_validate_addr, 1034 .ndo_validate_addr = eth_validate_addr,
1039}; 1035};
1040 1036
1037/* Companion eeprom access */
1038
1039enum { /* EEPROM programming states */
1040 EEPROM_CONTROL,
1041 EEPROM_ADDRESS,
1042 EEPROM_DATA,
1043 EEPROM_COMPLETE
1044};
1045
1046/**
1047 * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
1048 * @dev: The network device the PHY is on.
1049 * @addr: EEPROM address to read
1050 *
1051 * eeprom_size: used to define the data coding length. Can be changed
1052 * through debug-fs.
1053 *
1054 * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
1055 * Warning: The READ feature is not supported on ks8851 revision 0.
1056 *
1057 * Rough programming model:
1058 * - on period start: set clock high and read value on bus
1059 * - on period / 2: set clock low and program value on bus
1060 * - start on period / 2
1061 */
1062unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
1063{
1064 struct ks8851_net *ks = netdev_priv(dev);
1065 int eepcr;
1066 int ctrl = EEPROM_OP_READ;
1067 int state = EEPROM_CONTROL;
1068 int bit_count = EEPROM_OP_LEN - 1;
1069 unsigned int data = 0;
1070 int dummy;
1071 unsigned int addr_len;
1072
1073 addr_len = (ks->eeprom_size == 128) ? 6 : 8;
1074
1075 /* start transaction: chip select high, authorize write */
1076 mutex_lock(&ks->lock);
1077 eepcr = EEPCR_EESA | EEPCR_EESRWA;
1078 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1079 eepcr |= EEPCR_EECS;
1080 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1081 mutex_unlock(&ks->lock);
1082
1083 while (state != EEPROM_COMPLETE) {
1084 /* falling clock period starts... */
1085 /* set EED_IO pin for control and address */
1086 eepcr &= ~EEPCR_EEDO;
1087 switch (state) {
1088 case EEPROM_CONTROL:
1089 eepcr |= ((ctrl >> bit_count) & 1) << 2;
1090 if (bit_count-- <= 0) {
1091 bit_count = addr_len - 1;
1092 state = EEPROM_ADDRESS;
1093 }
1094 break;
1095 case EEPROM_ADDRESS:
1096 eepcr |= ((addr >> bit_count) & 1) << 2;
1097 bit_count--;
1098 break;
1099 case EEPROM_DATA:
1100 /* Change to receive mode */
1101 eepcr &= ~EEPCR_EESRWA;
1102 break;
1103 }
1104
1105 /* lower clock */
1106 eepcr &= ~EEPCR_EESCK;
1107
1108 mutex_lock(&ks->lock);
1109 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1110 mutex_unlock(&ks->lock);
1111
1112 /* waitread period / 2 */
1113 udelay(EEPROM_SK_PERIOD / 2);
1114
1115 /* rising clock period starts... */
1116
1117 /* raise clock */
1118 mutex_lock(&ks->lock);
1119 eepcr |= EEPCR_EESCK;
1120 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1121 mutex_unlock(&ks->lock);
1122
1123 /* Manage read */
1124 switch (state) {
1125 case EEPROM_ADDRESS:
1126 if (bit_count < 0) {
1127 bit_count = EEPROM_DATA_LEN - 1;
1128 state = EEPROM_DATA;
1129 }
1130 break;
1131 case EEPROM_DATA:
1132 mutex_lock(&ks->lock);
1133 dummy = ks8851_rdreg16(ks, KS_EEPCR);
1134 mutex_unlock(&ks->lock);
1135 data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
1136 if (bit_count-- <= 0)
1137 state = EEPROM_COMPLETE;
1138 break;
1139 }
1140
1141 /* wait period / 2 */
1142 udelay(EEPROM_SK_PERIOD / 2);
1143 }
1144
1145 /* close transaction */
1146 mutex_lock(&ks->lock);
1147 eepcr &= ~EEPCR_EECS;
1148 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1149 eepcr = 0;
1150 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1151 mutex_unlock(&ks->lock);
1152
1153 return data;
1154}
1155
1156/**
1157 * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
1158 * @dev: The network device the PHY is on.
1159 * @op: operand (can be WRITE, EWEN, EWDS)
1160 * @addr: EEPROM address to write
1161 * @data: data to write
1162 *
1163 * eeprom_size: used to define the data coding length. Can be changed
1164 * through debug-fs.
1165 *
1166 * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
1167 *
1168 * Note that a write enable is required before writing data.
1169 *
1170 * Rough programming model:
1171 * - on period start: set clock high
1172 * - on period / 2: set clock low and program value on bus
1173 * - start on period / 2
1174 */
1175void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
1176 unsigned int addr, unsigned int data)
1177{
1178 struct ks8851_net *ks = netdev_priv(dev);
1179 int eepcr;
1180 int state = EEPROM_CONTROL;
1181 int bit_count = EEPROM_OP_LEN - 1;
1182 unsigned int addr_len;
1183
1184 addr_len = (ks->eeprom_size == 128) ? 6 : 8;
1185
1186 switch (op) {
1187 case EEPROM_OP_EWEN:
1188 addr = 0x30;
1189 break;
1190 case EEPROM_OP_EWDS:
1191 addr = 0;
1192 break;
1193 }
1194
1195 /* start transaction: chip select high, authorize write */
1196 mutex_lock(&ks->lock);
1197 eepcr = EEPCR_EESA | EEPCR_EESRWA;
1198 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1199 eepcr |= EEPCR_EECS;
1200 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1201 mutex_unlock(&ks->lock);
1202
1203 while (state != EEPROM_COMPLETE) {
1204 /* falling clock period starts... */
1205 /* set EED_IO pin for control and address */
1206 eepcr &= ~EEPCR_EEDO;
1207 switch (state) {
1208 case EEPROM_CONTROL:
1209 eepcr |= ((op >> bit_count) & 1) << 2;
1210 if (bit_count-- <= 0) {
1211 bit_count = addr_len - 1;
1212 state = EEPROM_ADDRESS;
1213 }
1214 break;
1215 case EEPROM_ADDRESS:
1216 eepcr |= ((addr >> bit_count) & 1) << 2;
1217 if (bit_count-- <= 0) {
1218 if (op == EEPROM_OP_WRITE) {
1219 bit_count = EEPROM_DATA_LEN - 1;
1220 state = EEPROM_DATA;
1221 } else {
1222 state = EEPROM_COMPLETE;
1223 }
1224 }
1225 break;
1226 case EEPROM_DATA:
1227 eepcr |= ((data >> bit_count) & 1) << 2;
1228 if (bit_count-- <= 0)
1229 state = EEPROM_COMPLETE;
1230 break;
1231 }
1232
1233 /* lower clock */
1234 eepcr &= ~EEPCR_EESCK;
1235
1236 mutex_lock(&ks->lock);
1237 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1238 mutex_unlock(&ks->lock);
1239
1240 /* wait period / 2 */
1241 udelay(EEPROM_SK_PERIOD / 2);
1242
1243 /* rising clock period starts... */
1244
1245 /* raise clock */
1246 eepcr |= EEPCR_EESCK;
1247 mutex_lock(&ks->lock);
1248 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1249 mutex_unlock(&ks->lock);
1250
1251 /* wait period / 2 */
1252 udelay(EEPROM_SK_PERIOD / 2);
1253 }
1254
1255 /* close transaction */
1256 mutex_lock(&ks->lock);
1257 eepcr &= ~EEPCR_EECS;
1258 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1259 eepcr = 0;
1260 ks8851_wrreg16(ks, KS_EEPCR, eepcr);
1261 mutex_unlock(&ks->lock);
1262
1263}
1264
1041/* ethtool support */ 1265/* ethtool support */
1042 1266
1043static void ks8851_get_drvinfo(struct net_device *dev, 1267static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1084,6 +1308,117 @@ static int ks8851_nway_reset(struct net_device *dev)
1084 return mii_nway_restart(&ks->mii); 1308 return mii_nway_restart(&ks->mii);
1085} 1309}
1086 1310
1311static int ks8851_get_eeprom_len(struct net_device *dev)
1312{
1313 struct ks8851_net *ks = netdev_priv(dev);
1314 return ks->eeprom_size;
1315}
1316
1317static int ks8851_get_eeprom(struct net_device *dev,
1318 struct ethtool_eeprom *eeprom, u8 *bytes)
1319{
1320 struct ks8851_net *ks = netdev_priv(dev);
1321 u16 *eeprom_buff;
1322 int first_word;
1323 int last_word;
1324 int ret_val = 0;
1325 u16 i;
1326
1327 if (eeprom->len == 0)
1328 return -EINVAL;
1329
1330 if (eeprom->len > ks->eeprom_size)
1331 return -EINVAL;
1332
1333 eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
1334
1335 first_word = eeprom->offset >> 1;
1336 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1337
1338 eeprom_buff = kmalloc(sizeof(u16) *
1339 (last_word - first_word + 1), GFP_KERNEL);
1340 if (!eeprom_buff)
1341 return -ENOMEM;
1342
1343 for (i = 0; i < last_word - first_word + 1; i++)
1344 eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
1345
1346 /* Device's eeprom is little-endian, word addressable */
1347 for (i = 0; i < last_word - first_word + 1; i++)
1348 le16_to_cpus(&eeprom_buff[i]);
1349
1350 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
1351 kfree(eeprom_buff);
1352
1353 return ret_val;
1354}
1355
1356static int ks8851_set_eeprom(struct net_device *dev,
1357 struct ethtool_eeprom *eeprom, u8 *bytes)
1358{
1359 struct ks8851_net *ks = netdev_priv(dev);
1360 u16 *eeprom_buff;
1361 void *ptr;
1362 int max_len;
1363 int first_word;
1364 int last_word;
1365 int ret_val = 0;
1366 u16 i;
1367
1368 if (eeprom->len == 0)
1369 return -EOPNOTSUPP;
1370
1371 if (eeprom->len > ks->eeprom_size)
1372 return -EINVAL;
1373
1374 if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
1375 return -EFAULT;
1376
1377 first_word = eeprom->offset >> 1;
1378 last_word = (eeprom->offset + eeprom->len - 1) >> 1;
1379 max_len = (last_word - first_word + 1) * 2;
1380 eeprom_buff = kmalloc(max_len, GFP_KERNEL);
1381 if (!eeprom_buff)
1382 return -ENOMEM;
1383
1384 ptr = (void *)eeprom_buff;
1385
1386 if (eeprom->offset & 1) {
1387 /* need read/modify/write of first changed EEPROM word */
1388 /* only the second byte of the word is being modified */
1389 eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
1390 ptr++;
1391 }
1392 if ((eeprom->offset + eeprom->len) & 1)
1393 /* need read/modify/write of last changed EEPROM word */
1394 /* only the first byte of the word is being modified */
1395 eeprom_buff[last_word - first_word] =
1396 ks8851_eeprom_read(dev, last_word);
1397
1398
1399 /* Device's eeprom is little-endian, word addressable */
1400 le16_to_cpus(&eeprom_buff[0]);
1401 le16_to_cpus(&eeprom_buff[last_word - first_word]);
1402
1403 memcpy(ptr, bytes, eeprom->len);
1404
1405 for (i = 0; i < last_word - first_word + 1; i++)
1406 eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
1407
1408 ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
1409
1410 for (i = 0; i < last_word - first_word + 1; i++) {
1411 ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
1412 eeprom_buff[i]);
1413 mdelay(EEPROM_WRITE_TIME);
1414 }
1415
1416 ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
1417
1418 kfree(eeprom_buff);
1419 return ret_val;
1420}
1421
1087static const struct ethtool_ops ks8851_ethtool_ops = { 1422static const struct ethtool_ops ks8851_ethtool_ops = {
1088 .get_drvinfo = ks8851_get_drvinfo, 1423 .get_drvinfo = ks8851_get_drvinfo,
1089 .get_msglevel = ks8851_get_msglevel, 1424 .get_msglevel = ks8851_get_msglevel,
@@ -1092,6 +1427,9 @@ static const struct ethtool_ops ks8851_ethtool_ops = {
1092 .set_settings = ks8851_set_settings, 1427 .set_settings = ks8851_set_settings,
1093 .get_link = ks8851_get_link, 1428 .get_link = ks8851_get_link,
1094 .nway_reset = ks8851_nway_reset, 1429 .nway_reset = ks8851_nway_reset,
1430 .get_eeprom_len = ks8851_get_eeprom_len,
1431 .get_eeprom = ks8851_get_eeprom,
1432 .set_eeprom = ks8851_set_eeprom,
1095}; 1433};
1096 1434
1097/* MII interface controls */ 1435/* MII interface controls */
@@ -1185,17 +1523,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1185 rd = ks8851_rdreg16(ks, KS_MBIR); 1523 rd = ks8851_rdreg16(ks, KS_MBIR);
1186 1524
1187 if ((rd & both_done) != both_done) { 1525 if ((rd & both_done) != both_done) {
1188 ks_warn(ks, "Memory selftest not finished\n"); 1526 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1189 return 0; 1527 return 0;
1190 } 1528 }
1191 1529
1192 if (rd & MBIR_TXMBFA) { 1530 if (rd & MBIR_TXMBFA) {
1193 ks_err(ks, "TX memory selftest fail\n"); 1531 netdev_err(ks->netdev, "TX memory selftest fail\n");
1194 ret |= 1; 1532 ret |= 1;
1195 } 1533 }
1196 1534
1197 if (rd & MBIR_RXMBFA) { 1535 if (rd & MBIR_RXMBFA) {
1198 ks_err(ks, "RX memory selftest fail\n"); 1536 netdev_err(ks->netdev, "RX memory selftest fail\n");
1199 ret |= 2; 1537 ret |= 2;
1200 } 1538 }
1201 1539
@@ -1277,6 +1615,14 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1277 goto err_id; 1615 goto err_id;
1278 } 1616 }
1279 1617
1618 /* cache the contents of the CCR register for EEPROM, etc. */
1619 ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
1620
1621 if (ks->rc_ccr & CCR_EEPROM)
1622 ks->eeprom_size = 128;
1623 else
1624 ks->eeprom_size = 0;
1625
1280 ks8851_read_selftest(ks); 1626 ks8851_read_selftest(ks);
1281 ks8851_init_mac(ks); 1627 ks8851_init_mac(ks);
1282 1628
@@ -1293,9 +1639,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1293 goto err_netdev; 1639 goto err_netdev;
1294 } 1640 }
1295 1641
1296 dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n", 1642 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
1297 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1643 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
1298 ndev->dev_addr, ndev->irq); 1644 ndev->dev_addr, ndev->irq);
1299 1645
1300 return 0; 1646 return 0;
1301 1647
@@ -1314,7 +1660,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
1314 struct ks8851_net *priv = dev_get_drvdata(&spi->dev); 1660 struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
1315 1661
1316 if (netif_msg_drv(priv)) 1662 if (netif_msg_drv(priv))
1317 dev_info(&spi->dev, "remove"); 1663 dev_info(&spi->dev, "remove\n");
1318 1664
1319 unregister_netdev(priv->netdev); 1665 unregister_netdev(priv->netdev);
1320 free_irq(spi->irq, priv); 1666 free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851.h b/drivers/net/ks8851.h
index f52c312cc356..537fb06e5932 100644
--- a/drivers/net/ks8851.h
+++ b/drivers/net/ks8851.h
@@ -25,12 +25,24 @@
25#define OBCR_ODS_16mA (1 << 6) 25#define OBCR_ODS_16mA (1 << 6)
26 26
27#define KS_EEPCR 0x22 27#define KS_EEPCR 0x22
28#define EEPCR_EESRWA (1 << 5)
28#define EEPCR_EESA (1 << 4) 29#define EEPCR_EESA (1 << 4)
29#define EEPCR_EESB (1 << 3) 30#define EEPCR_EESB_OFFSET 3
31#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
30#define EEPCR_EEDO (1 << 2) 32#define EEPCR_EEDO (1 << 2)
31#define EEPCR_EESCK (1 << 1) 33#define EEPCR_EESCK (1 << 1)
32#define EEPCR_EECS (1 << 0) 34#define EEPCR_EECS (1 << 0)
33 35
36#define EEPROM_OP_LEN 3 /* bits:*/
37#define EEPROM_OP_READ 0x06
38#define EEPROM_OP_EWEN 0x04
39#define EEPROM_OP_WRITE 0x05
40#define EEPROM_OP_EWDS 0x14
41
42#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
43#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
44#define EEPROM_SK_PERIOD 400 /* in us */
45
34#define KS_MBIR 0x24 46#define KS_MBIR 0x24
35#define MBIR_TXMBF (1 << 12) 47#define MBIR_TXMBF (1 << 12)
36#define MBIR_TXMBFA (1 << 11) 48#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3a45a6..2e2c69b24062 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
21 * KS8851 16bit MLL chip from Micrel Inc. 21 * KS8851 16bit MLL chip from Micrel Inc.
22 */ 22 */
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
361 363
362#define MAX_MCAST_LST 32 364#define MAX_MCAST_LST 32
363#define HW_MCAST_SIZE 8 365#define HW_MCAST_SIZE 8
364#define MAC_ADDR_LEN 6
365 366
366/** 367/**
367 * union ks_tx_hdr - tx header data 368 * union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
449 u16 promiscuous; 450 u16 promiscuous;
450 u16 all_mcast; 451 u16 all_mcast;
451 u16 mcast_lst_size; 452 u16 mcast_lst_size;
452 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN]; 453 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
453 u8 mcast_bits[HW_MCAST_SIZE]; 454 u8 mcast_bits[HW_MCAST_SIZE];
454 u8 mac_addr[6]; 455 u8 mac_addr[6];
455 u8 fid; 456 u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
459 460
460static int msg_enable; 461static int msg_enable;
461 462
462#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
463#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
464#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
465#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
466
467#define BE3 0x8000 /* Byte Enable 3 */ 463#define BE3 0x8000 /* Byte Enable 3 */
468#define BE2 0x4000 /* Byte Enable 2 */ 464#define BE2 0x4000 /* Byte Enable 2 */
469#define BE1 0x2000 /* Byte Enable 1 */ 465#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
625{ 621{
626 unsigned pmecr; 622 unsigned pmecr;
627 623
628 if (netif_msg_hw(ks)) 624 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
629 ks_dbg(ks, "setting power mode %d\n", pwrmode);
630 625
631 ks_rdreg16(ks, KS_GRR); 626 ks_rdreg16(ks, KS_GRR);
632 pmecr = ks_rdreg16(ks, KS_PMECR); 627 pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
806 /* read data block including CRC 4 bytes */ 801 /* read data block including CRC 4 bytes */
807 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); 802 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
808 skb_put(skb, frame_hdr->len); 803 skb_put(skb, frame_hdr->len);
809 skb->dev = netdev;
810 skb->protocol = eth_type_trans(skb, netdev); 804 skb->protocol = eth_type_trans(skb, netdev);
811 netif_rx(skb); 805 netif_rx(skb);
812 } else { 806 } else {
813 printk(KERN_ERR "%s: err:skb alloc\n", __func__); 807 pr_err("%s: err:skb alloc\n", __func__);
814 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); 808 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
815 if (skb) 809 if (skb)
816 dev_kfree_skb_irq(skb); 810 dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
837 netif_carrier_off(netdev); 831 netif_carrier_off(netdev);
838 link_up_status = false; 832 link_up_status = false;
839 } 833 }
840 if (netif_msg_link(ks)) 834 netif_dbg(ks, link, ks->netdev,
841 ks_dbg(ks, "%s: %s\n", 835 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
842 __func__, link_up_status ? "UP" : "DOWN");
843} 836}
844 837
845/** 838/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
909 * else at the moment. 902 * else at the moment.
910 */ 903 */
911 904
912 if (netif_msg_ifup(ks)) 905 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
913 ks_dbg(ks, "%s - entry\n", __func__);
914 906
915 /* reset the HW */ 907 /* reset the HW */
916 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); 908 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
917 909
918 if (err) { 910 if (err) {
919 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 911 pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
920 ks->irq, err);
921 return err; 912 return err;
922 } 913 }
923 914
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
930 ks_enable_qmu(ks); 921 ks_enable_qmu(ks);
931 netif_start_queue(ks->netdev); 922 netif_start_queue(ks->netdev);
932 923
933 if (netif_msg_ifup(ks)) 924 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
934 ks_dbg(ks, "network device %s up\n", netdev->name);
935 925
936 return 0; 926 return 0;
937} 927}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
948{ 938{
949 struct ks_net *ks = netdev_priv(netdev); 939 struct ks_net *ks = netdev_priv(netdev);
950 940
951 if (netif_msg_ifdown(ks)) 941 netif_info(ks, ifdown, netdev, "shutting down\n");
952 ks_info(ks, "%s: shutting down\n", netdev->name);
953 942
954 netif_stop_queue(netdev); 943 netif_stop_queue(netdev);
955 944
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1181static void ks_set_rx_mode(struct net_device *netdev) 1170static void ks_set_rx_mode(struct net_device *netdev)
1182{ 1171{
1183 struct ks_net *ks = netdev_priv(netdev); 1172 struct ks_net *ks = netdev_priv(netdev);
1184 struct dev_mc_list *ptr; 1173 struct netdev_hw_addr *ha;
1185 1174
1186 /* Turn on/off promiscuous mode. */ 1175 /* Turn on/off promiscuous mode. */
1187 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) 1176 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
1198 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { 1187 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1199 int i = 0; 1188 int i = 0;
1200 1189
1201 netdev_for_each_mc_addr(ptr, netdev) { 1190 netdev_for_each_mc_addr(ha, netdev) {
1202 if (!(*ptr->dmi_addr & 1)) 1191 if (!(*ha->addr & 1))
1203 continue; 1192 continue;
1204 if (i >= MAX_MCAST_LST) 1193 if (i >= MAX_MCAST_LST)
1205 break; 1194 break;
1206 memcpy(ks->mcast_lst[i++], ptr->dmi_addr, 1195 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1207 MAC_ADDR_LEN);
1208 } 1196 }
1209 ks->mcast_lst_size = (u8)i; 1197 ks->mcast_lst_size = (u8)i;
1210 ks_set_grpaddr(ks); 1198 ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
1430 rd = ks_rdreg16(ks, KS_MBIR); 1418 rd = ks_rdreg16(ks, KS_MBIR);
1431 1419
1432 if ((rd & both_done) != both_done) { 1420 if ((rd & both_done) != both_done) {
1433 ks_warn(ks, "Memory selftest not finished\n"); 1421 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1434 return 0; 1422 return 0;
1435 } 1423 }
1436 1424
1437 if (rd & MBIR_TXMBFA) { 1425 if (rd & MBIR_TXMBFA) {
1438 ks_err(ks, "TX memory selftest fails\n"); 1426 netdev_err(ks->netdev, "TX memory selftest fails\n");
1439 ret |= 1; 1427 ret |= 1;
1440 } 1428 }
1441 1429
1442 if (rd & MBIR_RXMBFA) { 1430 if (rd & MBIR_RXMBFA) {
1443 ks_err(ks, "RX memory selftest fails\n"); 1431 netdev_err(ks->netdev, "RX memory selftest fails\n");
1444 ret |= 2; 1432 ret |= 2;
1445 } 1433 }
1446 1434
1447 ks_info(ks, "the selftest passes\n"); 1435 netdev_info(ks->netdev, "the selftest passes\n");
1448 return ret; 1436 return ret;
1449} 1437}
1450 1438
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
1515 ks->frame_head_info = (struct type_frame_head *) \ 1503 ks->frame_head_info = (struct type_frame_head *) \
1516 kmalloc(MHEADER_SIZE, GFP_KERNEL); 1504 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1517 if (!ks->frame_head_info) { 1505 if (!ks->frame_head_info) {
1518 printk(KERN_ERR "Error: Fail to allocate frame memory\n"); 1506 pr_err("Error: Fail to allocate frame memory\n");
1519 return false; 1507 return false;
1520 } 1508 }
1521 1509
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1581 ks->mii.mdio_read = ks_phy_read; 1569 ks->mii.mdio_read = ks_phy_read;
1582 ks->mii.mdio_write = ks_phy_write; 1570 ks->mii.mdio_write = ks_phy_write;
1583 1571
1584 ks_info(ks, "message enable is %d\n", msg_enable); 1572 netdev_info(netdev, "message enable is %d\n", msg_enable);
1585 /* set the default message enable */ 1573 /* set the default message enable */
1586 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | 1574 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1587 NETIF_MSG_PROBE | 1575 NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1590 1578
1591 /* simple check for a valid chip being connected to the bus */ 1579 /* simple check for a valid chip being connected to the bus */
1592 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1580 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1593 ks_err(ks, "failed to read device ID\n"); 1581 netdev_err(netdev, "failed to read device ID\n");
1594 err = -ENODEV; 1582 err = -ENODEV;
1595 goto err_register; 1583 goto err_register;
1596 } 1584 }
1597 1585
1598 if (ks_read_selftest(ks)) { 1586 if (ks_read_selftest(ks)) {
1599 ks_err(ks, "failed to read device ID\n"); 1587 netdev_err(netdev, "failed to read device ID\n");
1600 err = -ENODEV; 1588 err = -ENODEV;
1601 goto err_register; 1589 goto err_register;
1602 } 1590 }
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1627 1615
1628 id = ks_rdreg16(ks, KS_CIDER); 1616 id = ks_rdreg16(ks, KS_CIDER);
1629 1617
1630 printk(KERN_INFO DRV_NAME 1618 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1631 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", 1619 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1632 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1633 return 0; 1620 return 0;
1634 1621
1635err_register: 1622err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f359fb..c80ca64277b2 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/init.h> 19#include <linux/init.h>
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/ioport.h> 22#include <linux/ioport.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
23#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
1484 int promiscuous; 1485 int promiscuous;
1485}; 1486};
1486 1487
1487#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
1488#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
1489#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
1490#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
1491
1492#define DRV_NAME "KSZ884X PCI" 1488#define DRV_NAME "KSZ884X PCI"
1493#define DEVICE_NAME "KSZ884x PCI" 1489#define DEVICE_NAME "KSZ884x PCI"
1494#define DRV_VERSION "1.0.0" 1490#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
3835 alloc >>= 1; 3831 alloc >>= 1;
3836 } 3832 }
3837 if (alloc != 1 || shift < MIN_DESC_SHIFT) { 3833 if (alloc != 1 || shift < MIN_DESC_SHIFT) {
3838 printk(KERN_ALERT "Hardware descriptor numbers not right!\n"); 3834 pr_alert("Hardware descriptor numbers not right!\n");
3839 while (alloc) { 3835 while (alloc) {
3840 shift++; 3836 shift++;
3841 alloc >>= 1; 3837 alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
4546 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / 4542 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4547 DESC_ALIGNMENT) * DESC_ALIGNMENT); 4543 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4548 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) 4544 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
4549 printk(KERN_ALERT 4545 pr_alert("Hardware descriptor size not right!\n");
4550 "Hardware descriptor size not right!\n");
4551 ksz_check_desc_num(&hw->rx_desc_info); 4546 ksz_check_desc_num(&hw->rx_desc_info);
4552 ksz_check_desc_num(&hw->tx_desc_info); 4547 ksz_check_desc_num(&hw->tx_desc_info);
4553 4548
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
4689 int frag; 4684 int frag;
4690 skb_frag_t *this_frag; 4685 skb_frag_t *this_frag;
4691 4686
4692 dma_buf->len = skb->len - skb->data_len; 4687 dma_buf->len = skb_headlen(skb);
4693 4688
4694 dma_buf->dma = pci_map_single( 4689 dma_buf->dma = pci_map_single(
4695 hw_priv->pdev, skb->data, dma_buf->len, 4690 hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5049 dma_buf->skb->data, packet_len); 5044 dma_buf->skb->data, packet_len);
5050 } while (0); 5045 } while (0);
5051 5046
5052 skb->dev = dev;
5053
5054 skb->protocol = eth_type_trans(skb, dev); 5047 skb->protocol = eth_type_trans(skb, dev);
5055 5048
5056 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) 5049 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5061 priv->stats.rx_bytes += packet_len; 5054 priv->stats.rx_bytes += packet_len;
5062 5055
5063 /* Notify upper layer for received packet. */ 5056 /* Notify upper layer for received packet. */
5064 dev->last_rx = jiffies;
5065
5066 rx_status = netif_rx(skb); 5057 rx_status = netif_rx(skb);
5067 5058
5068 return 0; 5059 return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
5320 u32 data; 5311 u32 data;
5321 5312
5322 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; 5313 hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
5323 printk(KERN_INFO "Tx stopped\n"); 5314 pr_info("Tx stopped\n");
5324 data = readl(hw->io + KS_DMA_TX_CTRL); 5315 data = readl(hw->io + KS_DMA_TX_CTRL);
5325 if (!(data & DMA_TX_ENABLE)) 5316 if (!(data & DMA_TX_ENABLE))
5326 printk(KERN_INFO "Tx disabled\n"); 5317 pr_info("Tx disabled\n");
5327 break; 5318 break;
5328 } 5319 }
5329 } while (0); 5320 } while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
5496 return 0; 5487 return 0;
5497} 5488}
5498 5489
5490static void set_media_state(struct net_device *dev, int media_state)
5491{
5492 struct dev_priv *priv = netdev_priv(dev);
5493
5494 if (media_state == priv->media_state)
5495 netif_carrier_on(dev);
5496 else
5497 netif_carrier_off(dev);
5498 netif_info(priv, link, dev, "link %s\n",
5499 media_state == priv->media_state ? "on" : "off");
5500}
5501
5499/** 5502/**
5500 * netdev_open - open network device 5503 * netdev_open - open network device
5501 * @dev: Network device. 5504 * @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
5585 5588
5586 priv->media_state = port->linked->state; 5589 priv->media_state = port->linked->state;
5587 5590
5588 if (media_connected == priv->media_state) 5591 set_media_state(dev, media_connected);
5589 netif_carrier_on(dev);
5590 else
5591 netif_carrier_off(dev);
5592 if (netif_msg_link(priv))
5593 printk(KERN_INFO "%s link %s\n", dev->name,
5594 (media_connected == priv->media_state ?
5595 "on" : "off"));
5596
5597 netif_start_queue(dev); 5592 netif_start_queue(dev);
5598 5593
5599 return 0; 5594 return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
5767 struct dev_priv *priv = netdev_priv(dev); 5762 struct dev_priv *priv = netdev_priv(dev);
5768 struct dev_info *hw_priv = priv->adapter; 5763 struct dev_info *hw_priv = priv->adapter;
5769 struct ksz_hw *hw = &hw_priv->hw; 5764 struct ksz_hw *hw = &hw_priv->hw;
5770 struct dev_mc_list *mc_ptr; 5765 struct netdev_hw_addr *ha;
5771 int multicast = (dev->flags & IFF_ALLMULTI); 5766 int multicast = (dev->flags & IFF_ALLMULTI);
5772 5767
5773 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); 5768 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
5784 int i = 0; 5779 int i = 0;
5785 5780
5786 /* List too big to support so turn on all multicast mode. */ 5781 /* List too big to support so turn on all multicast mode. */
5787 if (dev->mc_count > MAX_MULTICAST_LIST) { 5782 if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
5788 if (MAX_MULTICAST_LIST != hw->multi_list_size) { 5783 if (MAX_MULTICAST_LIST != hw->multi_list_size) {
5789 hw->multi_list_size = MAX_MULTICAST_LIST; 5784 hw->multi_list_size = MAX_MULTICAST_LIST;
5790 ++hw->all_multi; 5785 ++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
5793 return; 5788 return;
5794 } 5789 }
5795 5790
5796 netdev_for_each_mc_addr(mc_ptr, dev) { 5791 netdev_for_each_mc_addr(ha, dev) {
5797 if (!(*mc_ptr->dmi_addr & 1)) 5792 if (!(*ha->addr & 1))
5798 continue; 5793 continue;
5799 if (i >= MAX_MULTICAST_LIST) 5794 if (i >= MAX_MULTICAST_LIST)
5800 break; 5795 break;
5801 memcpy(hw->multi_list[i++], mc_ptr->dmi_addr, 5796 memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
5802 MAC_ADDR_LEN);
5803 } 5797 }
5804 hw->multi_list_size = (u8) i; 5798 hw->multi_list_size = (u8) i;
5805 hw_set_grp_addr(hw); 5799 hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
6683{ 6677{
6684 if (priv->media_state != port->linked->state) { 6678 if (priv->media_state != port->linked->state) {
6685 priv->media_state = port->linked->state; 6679 priv->media_state = port->linked->state;
6686 if (netif_running(dev)) { 6680 if (netif_running(dev))
6687 if (media_connected == priv->media_state) 6681 set_media_state(dev, media_connected);
6688 netif_carrier_on(dev);
6689 else
6690 netif_carrier_off(dev);
6691 if (netif_msg_link(priv))
6692 printk(KERN_INFO "%s link %s\n", dev->name,
6693 (media_connected == priv->media_state ?
6694 "on" : "off"));
6695 }
6696 } 6682 }
6697} 6683}
6698 6684
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
6986 int pi; 6972 int pi;
6987 int port_count; 6973 int port_count;
6988 int result; 6974 int result;
6989 char banner[80]; 6975 char banner[sizeof(version)];
6990 struct ksz_switch *sw = NULL; 6976 struct ksz_switch *sw = NULL;
6991 6977
6992 result = pci_enable_device(pdev); 6978 result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
7010 6996
7011 result = -ENOMEM; 6997 result = -ENOMEM;
7012 6998
7013 info = kmalloc(sizeof(struct platform_info), GFP_KERNEL); 6999 info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
7014 if (!info) 7000 if (!info)
7015 goto pcidev_init_dev_err; 7001 goto pcidev_init_dev_err;
7016 memset(info, 0, sizeof(struct platform_info));
7017 7002
7018 hw_priv = &info->dev_info; 7003 hw_priv = &info->dev_info;
7019 hw_priv->pdev = pdev; 7004 hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
7027 cnt = hw_init(hw); 7012 cnt = hw_init(hw);
7028 if (!cnt) { 7013 if (!cnt) {
7029 if (msg_enable & NETIF_MSG_PROBE) 7014 if (msg_enable & NETIF_MSG_PROBE)
7030 printk(KERN_ALERT "chip not detected\n"); 7015 pr_alert("chip not detected\n");
7031 result = -ENODEV; 7016 result = -ENODEV;
7032 goto pcidev_init_alloc_err; 7017 goto pcidev_init_alloc_err;
7033 } 7018 }
7034 7019
7035 sprintf(banner, "%s\n", version); 7020 snprintf(banner, sizeof(banner), "%s", version);
7036 banner[13] = cnt + '0'; 7021 banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
7037 ks_info(hw_priv, "%s", banner); 7022 dev_info(&hw_priv->pdev->dev, "%s\n", banner);
7038 ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); 7023 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
7039 7024
7040 /* Assume device is KSZ8841. */ 7025 /* Assume device is KSZ8841. */
7041 hw->dev_count = 1; 7026 hw->dev_count = 1;
@@ -7064,10 +7049,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
7064 mib_port_count = SWITCH_PORT_NUM; 7049 mib_port_count = SWITCH_PORT_NUM;
7065 } 7050 }
7066 hw->mib_port_cnt = TOTAL_PORT_NUM; 7051 hw->mib_port_cnt = TOTAL_PORT_NUM;
7067 hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL); 7052 hw->ksz_switch = kzalloc(sizeof(struct ksz_switch), GFP_KERNEL);
7068 if (!hw->ksz_switch) 7053 if (!hw->ksz_switch)
7069 goto pcidev_init_alloc_err; 7054 goto pcidev_init_alloc_err;
7070 memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
7071 7055
7072 sw = hw->ksz_switch; 7056 sw = hw->ksz_switch;
7073 } 7057 }
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 7b9447646f8a..21f8adaa87c1 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -945,7 +945,7 @@ static void lance_tx_timeout (struct net_device *dev)
945#endif 945#endif
946 lance_restart (dev, 0x0043, 1); 946 lance_restart (dev, 0x0043, 1);
947 947
948 dev->trans_start = jiffies; 948 dev->trans_start = jiffies; /* prevent tx timeout */
949 netif_wake_queue (dev); 949 netif_wake_queue (dev);
950} 950}
951 951
@@ -1011,8 +1011,6 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
1011 outw(0x0000, ioaddr+LANCE_ADDR); 1011 outw(0x0000, ioaddr+LANCE_ADDR);
1012 outw(0x0048, ioaddr+LANCE_DATA); 1012 outw(0x0048, ioaddr+LANCE_DATA);
1013 1013
1014 dev->trans_start = jiffies;
1015
1016 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE) 1014 if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1017 netif_stop_queue(dev); 1015 netif_stop_queue(dev);
1018 1016
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b82ec2..ce5d6e909218 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -963,7 +963,7 @@ static void i596_tx_timeout (struct net_device *dev)
963 lp->last_restart = dev->stats.tx_packets; 963 lp->last_restart = dev->stats.tx_packets;
964 } 964 }
965 965
966 dev->trans_start = jiffies; 966 dev->trans_start = jiffies; /* prevent tx timeout */
967 netif_wake_queue (dev); 967 netif_wake_queue (dev);
968} 968}
969 969
@@ -974,7 +974,6 @@ static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
974 struct tx_cmd *tx_cmd; 974 struct tx_cmd *tx_cmd;
975 struct i596_tbd *tbd; 975 struct i596_tbd *tbd;
976 short length = skb->len; 976 short length = skb->len;
977 dev->trans_start = jiffies;
978 977
979 DEB(DEB_STARTTX, printk(KERN_DEBUG 978 DEB(DEB_STARTTX, printk(KERN_DEBUG
980 "%s: i596_start_xmit(%x,%p) called\n", 979 "%s: i596_start_xmit(%x,%p) called\n",
@@ -1092,7 +1091,7 @@ static int __devinit i82596_probe(struct net_device *dev)
1092 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma), 1091 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1093 (void *)dma, lp->dma_addr); 1092 (void *)dma, lp->dma_addr);
1094 return i; 1093 return i;
1095 }; 1094 }
1096 1095
1097 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n", 1096 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
1098 dev->name, dev->base_addr, dev->dev_addr, 1097 dev->name, dev->base_addr, dev->dev_addr,
@@ -1388,7 +1387,7 @@ static void set_multicast_list(struct net_device *dev)
1388 } 1387 }
1389 1388
1390 if (!netdev_mc_empty(dev)) { 1389 if (!netdev_mc_empty(dev)) {
1391 struct dev_mc_list *dmi; 1390 struct netdev_hw_addr *ha;
1392 unsigned char *cp; 1391 unsigned char *cp;
1393 struct mc_cmd *cmd; 1392 struct mc_cmd *cmd;
1394 1393
@@ -1396,10 +1395,10 @@ static void set_multicast_list(struct net_device *dev)
1396 cmd->cmd.command = SWAP16(CmdMulticastList); 1395 cmd->cmd.command = SWAP16(CmdMulticastList);
1397 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); 1396 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1398 cp = cmd->mc_addrs; 1397 cp = cmd->mc_addrs;
1399 netdev_for_each_mc_addr(dmi, dev) { 1398 netdev_for_each_mc_addr(ha, dev) {
1400 if (!cnt--) 1399 if (!cnt--)
1401 break; 1400 break;
1402 memcpy(cp, dmi->dmi_addr, 6); 1401 memcpy(cp, ha->addr, 6);
1403 if (i596_debug > 1) 1402 if (i596_debug > 1)
1404 DEB(DEB_MULTI, 1403 DEB(DEB_MULTI,
1405 printk(KERN_DEBUG 1404 printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f485400..316bb70775b1 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -257,7 +257,7 @@ static void __ei_tx_timeout(struct net_device *dev)
257{ 257{
258 unsigned long e8390_base = dev->base_addr; 258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev->trans_start; 260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags; 261 unsigned long flags;
262 262
263 dev->stats.tx_errors++; 263 dev->stats.tx_errors++;
@@ -386,7 +386,6 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
386 { 386 {
387 ei_local->txing = 1; 387 ei_local->txing = 1;
388 NS8390_trigger_send(dev, send_length, output_page); 388 NS8390_trigger_send(dev, send_length, output_page);
389 dev->trans_start = jiffies;
390 if (output_page == ei_local->tx_start_page) 389 if (output_page == ei_local->tx_start_page)
391 { 390 {
392 ei_local->tx1 = -1; 391 ei_local->tx1 = -1;
@@ -445,14 +444,14 @@ static irqreturn_t __ei_interrupt(int irq, void *dev_id)
445 444
446 if (ei_local->irqlock) 445 if (ei_local->irqlock)
447 { 446 {
448#if 1 /* This might just be an interrupt for a PCI device sharing this line */ 447 /*
449 /* The "irqlock" check is only for testing. */ 448 * This might just be an interrupt for a PCI device sharing
450 printk(ei_local->irqlock 449 * this line
451 ? "%s: Interrupted while interrupts are masked! isr=%#2x imr=%#2x.\n" 450 */
452 : "%s: Reentering the interrupt handler! isr=%#2x imr=%#2x.\n", 451 printk("%s: Interrupted while interrupts are masked!"
452 " isr=%#2x imr=%#2x.\n",
453 dev->name, ei_inb_p(e8390_base + EN0_ISR), 453 dev->name, ei_inb_p(e8390_base + EN0_ISR),
454 ei_inb_p(e8390_base + EN0_IMR)); 454 ei_inb_p(e8390_base + EN0_IMR));
455#endif
456 spin_unlock(&ei_local->page_lock); 455 spin_unlock(&ei_local->page_lock);
457 return IRQ_NONE; 456 return IRQ_NONE;
458 } 457 }
@@ -792,7 +791,6 @@ static void ei_receive(struct net_device *dev)
792 /* We used to also ack ENISR_OVER here, but that would sometimes mask 791 /* We used to also ack ENISR_OVER here, but that would sometimes mask
793 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */ 792 a real overrun, leaving the 8390 in a stopped state with rec'vr off. */
794 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR); 793 ei_outb_p(ENISR_RX+ENISR_RX_ERR, e8390_base+EN0_ISR);
795 return;
796} 794}
797 795
798/** 796/**
@@ -905,10 +903,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
905 903
906static inline void make_mc_bits(u8 *bits, struct net_device *dev) 904static inline void make_mc_bits(u8 *bits, struct net_device *dev)
907{ 905{
908 struct dev_mc_list *dmi; 906 struct netdev_hw_addr *ha;
909 907
910 netdev_for_each_mc_addr(dmi, dev) { 908 netdev_for_each_mc_addr(ha, dev) {
911 u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 909 u32 crc = ether_crc(ETH_ALEN, ha->addr);
912 /* 910 /*
913 * The 8390 uses the 6 most significant bits of the 911 * The 8390 uses the 6 most significant bits of the
914 * CRC to index the multicast table. 912 * CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1e6911..c03358434acb 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8
9#ifdef CONFIG_PPC_DCR
8#include <asm/dcr.h> 10#include <asm/dcr.h>
9#include <asm/dcr-regs.h> 11#include <asm/dcr-regs.h>
12#endif
10 13
11/* packet size info */ 14/* packet size info */
12#define XTE_HDR_SIZE 14 /* size of Ethernet header */ 15#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,9 +293,6 @@ This option defaults to enabled (set) */
290 293
291#define TX_CONTROL_CALC_CSUM_MASK 1 294#define TX_CONTROL_CALC_CSUM_MASK 1
292 295
293#define XTE_ALIGN 32
294#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
295
296#define MULTICAST_CAM_TABLE_NUM 4 296#define MULTICAST_CAM_TABLE_NUM 4
297 297
298/* TX/RX CURDESC_PTR points to first descriptor */ 298/* TX/RX CURDESC_PTR points to first descriptor */
@@ -335,9 +335,15 @@ struct temac_local {
335 struct mii_bus *mii_bus; /* MII bus reference */ 335 struct mii_bus *mii_bus; /* MII bus reference */
336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ 336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
337 337
338 /* IO registers and IRQs */ 338 /* IO registers, dma functions and IRQs */
339 void __iomem *regs; 339 void __iomem *regs;
340 void __iomem *sdma_regs;
341#ifdef CONFIG_PPC_DCR
340 dcr_host_t sdma_dcrs; 342 dcr_host_t sdma_dcrs;
343#endif
344 u32 (*dma_in)(struct temac_local *, int);
345 void (*dma_out)(struct temac_local *, int, u32);
346
341 int tx_irq; 347 int tx_irq;
342 int rx_irq; 348 int rx_irq;
343 int emac_num; 349 int emac_num;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index dc318330ec79..fa7620e28404 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
20 * or rx, so this should be okay. 20 * or rx, so this should be okay.
21 * 21 *
22 * TODO: 22 * TODO:
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
25 * instructions.
26 * - Factor out locallink DMA code into separate driver 23 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment. 24 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming. 25 * - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 113 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117} 114}
118 115
116/**
117 * temac_dma_in32 - Memory mapped DMA read, this function expects a
118 * register input that is based on DCR word addresses which
119 * are then converted to memory mapped byte addresses
120 */
119static u32 temac_dma_in32(struct temac_local *lp, int reg) 121static u32 temac_dma_in32(struct temac_local *lp, int reg)
120{ 122{
121 return dcr_read(lp->sdma_dcrs, reg); 123 return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
122} 124}
123 125
126/**
127 * temac_dma_out32 - Memory mapped DMA read, this function expects a
128 * register input that is based on DCR word addresses which
129 * are then converted to memory mapped byte addresses
130 */
124static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) 131static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
125{ 132{
133 out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
134}
135
136/* DMA register access functions can be DCR based or memory mapped.
137 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
138 * memory mapped.
139 */
140#ifdef CONFIG_PPC_DCR
141
142/**
143 * temac_dma_dcr_in32 - DCR based DMA read
144 */
145static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
146{
147 return dcr_read(lp->sdma_dcrs, reg);
148}
149
150/**
151 * temac_dma_dcr_out32 - DCR based DMA write
152 */
153static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
154{
126 dcr_write(lp->sdma_dcrs, reg, value); 155 dcr_write(lp->sdma_dcrs, reg, value);
127} 156}
128 157
129/** 158/**
159 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
160 * I/O functions
161 */
162static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
163 struct device_node *np)
164{
165 unsigned int dcrs;
166
167 /* setup the dcr address mapping if it's in the device tree */
168
169 dcrs = dcr_resource_start(np, 0);
170 if (dcrs != 0) {
171 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
172 lp->dma_in = temac_dma_dcr_in;
173 lp->dma_out = temac_dma_dcr_out;
174 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
175 return 0;
176 }
177 /* no DCR in the device tree, indicate a failure */
178 return -1;
179}
180
181#else
182
183/*
184 * temac_dcr_setup - This is a stub for when DCR is not supported,
185 * such as with MicroBlaze
186 */
187static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
188 struct device_node *np)
189{
190 return -1;
191}
192
193#endif
194
195/**
130 * temac_dma_bd_init - Setup buffer descriptor rings 196 * temac_dma_bd_init - Setup buffer descriptor rings
131 */ 197 */
132static int temac_dma_bd_init(struct net_device *ndev) 198static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
156 lp->rx_bd_v[i].next = lp->rx_bd_p + 222 lp->rx_bd_v[i].next = lp->rx_bd_p +
157 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 223 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
158 224
159 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE 225 skb = netdev_alloc_skb_ip_align(ndev,
160 + XTE_ALIGN, GFP_ATOMIC); 226 XTE_MAX_JUMBO_FRAME_SIZE);
227
161 if (skb == 0) { 228 if (skb == 0) {
162 dev_err(&ndev->dev, "alloc_skb error %d\n", i); 229 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
163 return -1; 230 return -1;
164 } 231 }
165 lp->rx_skb[i] = skb; 232 lp->rx_skb[i] = skb;
166 skb_reserve(skb, BUFFER_ALIGN(skb->data));
167 /* returns physical address of skb->data */ 233 /* returns physical address of skb->data */
168 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
169 skb->data, 235 skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
173 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; 239 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
174 } 240 }
175 241
176 temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 | 242 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
177 CHNL_CTRL_IRQ_EN | 243 CHNL_CTRL_IRQ_EN |
178 CHNL_CTRL_IRQ_DLY_EN | 244 CHNL_CTRL_IRQ_DLY_EN |
179 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
180 /* 0x10220483 */ 246 /* 0x10220483 */
181 /* 0x00100483 */ 247 /* 0x00100483 */
182 temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 |
183 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
184 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
185 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
186 CHNL_CTRL_IRQ_IOE); 252 CHNL_CTRL_IRQ_IOE);
187 /* 0xff010283 */ 253 /* 0xff010283 */
188 254
189 temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p); 255 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
190 temac_dma_out32(lp, RX_TAILDESC_PTR, 256 lp->dma_out(lp, RX_TAILDESC_PTR,
191 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 257 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
192 temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p); 258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
193 259
194 return 0; 260 return 0;
195} 261}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
251 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 317 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
252 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 318 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
253 } else if (!netdev_mc_empty(ndev)) { 319 } else if (!netdev_mc_empty(ndev)) {
254 struct dev_mc_list *mclist; 320 struct netdev_hw_addr *ha;
255 321
256 i = 0; 322 i = 0;
257 netdev_for_each_mc_addr(mclist, ndev) { 323 netdev_for_each_mc_addr(ha, ndev) {
258 if (i >= MULTICAST_CAM_TABLE_NUM) 324 if (i >= MULTICAST_CAM_TABLE_NUM)
259 break; 325 break;
260 multi_addr_msw = ((mclist->dmi_addr[3] << 24) | 326 multi_addr_msw = ((ha->addr[3] << 24) |
261 (mclist->dmi_addr[2] << 16) | 327 (ha->addr[2] << 16) |
262 (mclist->dmi_addr[1] << 8) | 328 (ha->addr[1] << 8) |
263 (mclist->dmi_addr[0])); 329 (ha->addr[0]));
264 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 330 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
265 multi_addr_msw); 331 multi_addr_msw);
266 multi_addr_lsw = ((mclist->dmi_addr[5] << 8) | 332 multi_addr_lsw = ((ha->addr[5] << 8) |
267 (mclist->dmi_addr[4]) | (i << 16)); 333 (ha->addr[4]) | (i << 16));
268 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 334 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
269 multi_addr_lsw); 335 multi_addr_lsw);
270 i++; 336 i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
427 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); 493 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
428 494
429 /* Reset Local Link (DMA) */ 495 /* Reset Local Link (DMA) */
430 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); 496 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
431 timeout = 1000; 497 timeout = 1000;
432 while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { 498 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
433 udelay(1); 499 udelay(1);
434 if (--timeout == 0) { 500 if (--timeout == 0) {
435 dev_err(&ndev->dev, 501 dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
437 break; 503 break;
438 } 504 }
439 } 505 }
440 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); 506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
441 507
442 temac_dma_bd_init(ndev); 508 temac_dma_bd_init(ndev);
443 509
@@ -461,7 +527,7 @@ static void temac_device_reset(struct net_device *ndev)
461 dev_err(&ndev->dev, "Error setting TEMAC options\n"); 527 dev_err(&ndev->dev, "Error setting TEMAC options\n");
462 528
463 /* Init Driver variable */ 529 /* Init Driver variable */
464 ndev->trans_start = 0; 530 ndev->trans_start = jiffies; /* prevent tx timeout */
465} 531}
466 532
467void temac_adjust_link(struct net_device *ndev) 533void temac_adjust_link(struct net_device *ndev)
@@ -598,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
598 lp->tx_bd_tail = 0; 664 lp->tx_bd_tail = 0;
599 665
600 /* Kick off the transfer */ 666 /* Kick off the transfer */
601 temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ 667 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
602 668
603 return NETDEV_TX_OK; 669 return NETDEV_TX_OK;
604} 670}
@@ -612,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev)
612 struct cdmac_bd *cur_p; 678 struct cdmac_bd *cur_p;
613 dma_addr_t tail_p; 679 dma_addr_t tail_p;
614 int length; 680 int length;
615 unsigned long skb_vaddr;
616 unsigned long flags; 681 unsigned long flags;
617 682
618 spin_lock_irqsave(&lp->rx_lock, flags); 683 spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev)
626 skb = lp->rx_skb[lp->rx_bd_ci]; 691 skb = lp->rx_skb[lp->rx_bd_ci];
627 length = cur_p->app4 & 0x3FFF; 692 length = cur_p->app4 & 0x3FFF;
628 693
629 skb_vaddr = virt_to_bus(skb->data); 694 dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
630 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
631 DMA_FROM_DEVICE); 695 DMA_FROM_DEVICE);
632 696
633 skb_put(skb, length); 697 skb_put(skb, length);
@@ -640,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev)
640 ndev->stats.rx_packets++; 704 ndev->stats.rx_packets++;
641 ndev->stats.rx_bytes += length; 705 ndev->stats.rx_bytes += length;
642 706
643 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN, 707 new_skb = netdev_alloc_skb_ip_align(ndev,
644 GFP_ATOMIC); 708 XTE_MAX_JUMBO_FRAME_SIZE);
709
645 if (new_skb == 0) { 710 if (new_skb == 0) {
646 dev_err(&ndev->dev, "no memory for new sk_buff\n"); 711 dev_err(&ndev->dev, "no memory for new sk_buff\n");
647 spin_unlock_irqrestore(&lp->rx_lock, flags); 712 spin_unlock_irqrestore(&lp->rx_lock, flags);
648 return; 713 return;
649 } 714 }
650 715
651 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
652
653 cur_p->app0 = STS_CTRL_APP0_IRQONEND; 716 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
654 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 717 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
655 XTE_MAX_JUMBO_FRAME_SIZE, 718 XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev)
664 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 727 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
665 bdstat = cur_p->app0; 728 bdstat = cur_p->app0;
666 } 729 }
667 temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p); 730 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
668 731
669 spin_unlock_irqrestore(&lp->rx_lock, flags); 732 spin_unlock_irqrestore(&lp->rx_lock, flags);
670} 733}
@@ -675,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
675 struct temac_local *lp = netdev_priv(ndev); 738 struct temac_local *lp = netdev_priv(ndev);
676 unsigned int status; 739 unsigned int status;
677 740
678 status = temac_dma_in32(lp, TX_IRQ_REG); 741 status = lp->dma_in(lp, TX_IRQ_REG);
679 temac_dma_out32(lp, TX_IRQ_REG, status); 742 lp->dma_out(lp, TX_IRQ_REG, status);
680 743
681 if (status & (IRQ_COAL | IRQ_DLY)) 744 if (status & (IRQ_COAL | IRQ_DLY))
682 temac_start_xmit_done(lp->ndev); 745 temac_start_xmit_done(lp->ndev);
@@ -693,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
693 unsigned int status; 756 unsigned int status;
694 757
695 /* Read and clear the status registers */ 758 /* Read and clear the status registers */
696 status = temac_dma_in32(lp, RX_IRQ_REG); 759 status = lp->dma_in(lp, RX_IRQ_REG);
697 temac_dma_out32(lp, RX_IRQ_REG, status); 760 lp->dma_out(lp, RX_IRQ_REG, status);
698 761
699 if (status & (IRQ_COAL | IRQ_DLY)) 762 if (status & (IRQ_COAL | IRQ_DLY))
700 ll_temac_recv(lp->ndev); 763 ll_temac_recv(lp->ndev);
@@ -795,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
795 int i, len = 0; 858 int i, len = 0;
796 859
797 for (i = 0; i < 0x11; i++) 860 for (i = 0; i < 0x11; i++)
798 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i), 861 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
799 (i % 8) == 7 ? "\n" : " "); 862 (i % 8) == 7 ? "\n" : " ");
800 len += sprintf(buf + len, "\n"); 863 len += sprintf(buf + len, "\n");
801 864
@@ -821,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
821 struct net_device *ndev; 884 struct net_device *ndev;
822 const void *addr; 885 const void *addr;
823 int size, rc = 0; 886 int size, rc = 0;
824 unsigned int dcrs;
825 887
826 /* Init network device structure */ 888 /* Init network device structure */
827 ndev = alloc_etherdev(sizeof(*lp)); 889 ndev = alloc_etherdev(sizeof(*lp));
@@ -871,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
871 goto nodev; 933 goto nodev;
872 } 934 }
873 935
874 dcrs = dcr_resource_start(np, 0); 936 /* Setup the DMA register accesses, could be DCR or memory mapped */
875 if (dcrs == 0) { 937 if (temac_dcr_setup(lp, op, np)) {
876 dev_err(&op->dev, "could not get DMA register address\n"); 938
877 goto nodev; 939 /* no DCR in the device tree, try non-DCR */
940 lp->sdma_regs = of_iomap(np, 0);
941 if (lp->sdma_regs) {
942 lp->dma_in = temac_dma_in32;
943 lp->dma_out = temac_dma_out32;
944 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
945 } else {
946 dev_err(&op->dev, "unable to map DMA registers\n");
947 goto nodev;
948 }
878 } 949 }
879 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
880 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
881 950
882 lp->rx_irq = irq_of_parse_and_map(np, 0); 951 lp->rx_irq = irq_of_parse_and_map(np, 0);
883 lp->tx_irq = irq_of_parse_and_map(np, 1); 952 lp->tx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/lne390.c b/drivers/net/lne390.c
index 41cbaaef0654..8a1097cf8a83 100644
--- a/drivers/net/lne390.c
+++ b/drivers/net/lne390.c
@@ -307,8 +307,6 @@ static void lne390_reset_8390(struct net_device *dev)
307 ei_status.txing = 0; 307 ei_status.txing = 0;
308 outb(0x01, ioaddr + LNE390_RESET_PORT); 308 outb(0x01, ioaddr + LNE390_RESET_PORT);
309 if (ei_debug > 1) printk("reset done\n"); 309 if (ei_debug > 1) printk("reset done\n");
310
311 return;
312} 310}
313 311
314/* 312/*
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04defd0..3df046a58b1d 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -875,8 +875,6 @@ static netdev_tx_t i596_start_xmit (struct sk_buff *skb, struct net_device *dev)
875 length = ETH_ZLEN; 875 length = ETH_ZLEN;
876 } 876 }
877 877
878 dev->trans_start = jiffies;
879
880 tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC); 878 tx_cmd = kmalloc((sizeof (struct tx_cmd) + sizeof (struct i596_tbd)), GFP_ATOMIC);
881 if (tx_cmd == NULL) { 879 if (tx_cmd == NULL) {
882 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name); 880 printk(KERN_WARNING "%s: i596_xmit Memory squeeze, dropping packet.\n", dev->name);
@@ -1256,7 +1254,7 @@ static void set_multicast_list(struct net_device *dev) {
1256 dev->name, netdev_mc_count(dev)); 1254 dev->name, netdev_mc_count(dev));
1257 1255
1258 if (!netdev_mc_empty(dev)) { 1256 if (!netdev_mc_empty(dev)) {
1259 struct dev_mc_list *dmi; 1257 struct netdev_hw_addr *ha;
1260 char *cp; 1258 char *cp;
1261 cmd = kmalloc(sizeof(struct i596_cmd) + 2 + 1259 cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
1262 netdev_mc_count(dev) * 6, GFP_ATOMIC); 1260 netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1265,8 @@ static void set_multicast_list(struct net_device *dev) {
1267 cmd->command = CmdMulticastList; 1265 cmd->command = CmdMulticastList;
1268 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; 1266 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
1269 cp = ((char *)(cmd + 1))+2; 1267 cp = ((char *)(cmd + 1))+2;
1270 netdev_for_each_mc_addr(dmi, dev) { 1268 netdev_for_each_mc_addr(ha, dev) {
1271 memcpy(cp, dmi->dmi_addr, 6); 1269 memcpy(cp, ha->addr, 6);
1272 cp += 6; 1270 cp += 6;
1273 } 1271 }
1274 if (i596_debug & LOG_SRCDST) 1272 if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index c8e68fde0664..1136c9a22b67 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -661,7 +661,6 @@ static void mac8390_no_reset(struct net_device *dev)
661 ei_status.txing = 0; 661 ei_status.txing = 0;
662 if (ei_debug > 1) 662 if (ei_debug > 1)
663 pr_info("reset not supported\n"); 663 pr_info("reset not supported\n");
664 return;
665} 664}
666 665
667static void interlan_reset(struct net_device *dev) 666static void interlan_reset(struct net_device *dev)
@@ -673,7 +672,6 @@ static void interlan_reset(struct net_device *dev)
673 target[0xC0000] = 0; 672 target[0xC0000] = 0;
674 if (ei_debug > 1) 673 if (ei_debug > 1)
675 pr_cont("reset complete\n"); 674 pr_cont("reset complete\n");
676 return;
677} 675}
678 676
679/* dayna_memcpy_fromio/dayna_memcpy_toio */ 677/* dayna_memcpy_fromio/dayna_memcpy_toio */
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index c0876e915eed..69fa4ef64dd2 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -408,7 +408,6 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
408 skb->len+1); 408 skb->len+1);
409 409
410 local_irq_restore(flags); 410 local_irq_restore(flags);
411 dev->trans_start = jiffies;
412 dev_kfree_skb (skb); 411 dev_kfree_skb (skb);
413 412
414 return NETDEV_TX_OK; 413 return NETDEV_TX_OK;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6203c8..40797fbdca9f 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -666,8 +666,6 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
666 666
667 spin_unlock_irqrestore(&bp->lock, flags); 667 spin_unlock_irqrestore(&bp->lock, flags);
668 668
669 dev->trans_start = jiffies;
670
671 return NETDEV_TX_OK; 669 return NETDEV_TX_OK;
672} 670}
673 671
@@ -793,6 +791,7 @@ static void macb_init_hw(struct macb *bp)
793 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); 791 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
794 config |= MACB_BIT(PAE); /* PAuse Enable */ 792 config |= MACB_BIT(PAE); /* PAuse Enable */
795 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 793 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
794 config |= MACB_BIT(BIG); /* Receive oversized frames */
796 if (bp->dev->flags & IFF_PROMISC) 795 if (bp->dev->flags & IFF_PROMISC)
797 config |= MACB_BIT(CAF); /* Copy All Frames */ 796 config |= MACB_BIT(CAF); /* Copy All Frames */
798 if (!(bp->dev->flags & IFF_BROADCAST)) 797 if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +881,15 @@ static int hash_get_index(__u8 *addr)
882 */ 881 */
883static void macb_sethashtable(struct net_device *dev) 882static void macb_sethashtable(struct net_device *dev)
884{ 883{
885 struct dev_mc_list *curr; 884 struct netdev_hw_addr *ha;
886 unsigned long mc_filter[2]; 885 unsigned long mc_filter[2];
887 unsigned int bitnr; 886 unsigned int bitnr;
888 struct macb *bp = netdev_priv(dev); 887 struct macb *bp = netdev_priv(dev);
889 888
890 mc_filter[0] = mc_filter[1] = 0; 889 mc_filter[0] = mc_filter[1] = 0;
891 890
892 netdev_for_each_mc_addr(curr, dev) { 891 netdev_for_each_mc_addr(ha, dev) {
893 bitnr = hash_get_index(curr->dmi_addr); 892 bitnr = hash_get_index(ha->addr);
894 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 893 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
895 } 894 }
896 895
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d0c8df..b6855a6476f8 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
599 mp->maccc |= PROM; 599 mp->maccc |= PROM;
600 } else { 600 } else {
601 unsigned char multicast_filter[8]; 601 unsigned char multicast_filter[8];
602 struct dev_mc_list *dmi; 602 struct netdev_hw_addr *ha;
603 603
604 if (dev->flags & IFF_ALLMULTI) { 604 if (dev->flags & IFF_ALLMULTI) {
605 for (i = 0; i < 8; i++) 605 for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
607 } else { 607 } else {
608 for (i = 0; i < 8; i++) 608 for (i = 0; i < 8; i++)
609 multicast_filter[i] = 0; 609 multicast_filter[i] = 0;
610 netdev_for_each_mc_addr(dmi, dev) { 610 netdev_for_each_mc_addr(ha, dev) {
611 crc = ether_crc_le(6, dmi->dmi_addr); 611 crc = ether_crc_le(6, ha->addr);
612 i = crc >> 26; /* bit number in multicast_filter */ 612 i = crc >> 26; /* bit number in multicast_filter */
613 multicast_filter[i >> 3] |= 1 << (i & 7); 613 multicast_filter[i >> 3] |= 1 << (i & 7);
614 } 614 }
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51c4c4f..c685a4656878 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -488,7 +488,6 @@ static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
488 488
489 dev_kfree_skb(skb); 489 dev_kfree_skb(skb);
490 490
491 dev->trans_start = jiffies;
492 return NETDEV_TX_OK; 491 return NETDEV_TX_OK;
493} 492}
494 493
@@ -509,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
509 mb->maccc |= PROM; 508 mb->maccc |= PROM;
510 } else { 509 } else {
511 unsigned char multicast_filter[8]; 510 unsigned char multicast_filter[8];
512 struct dev_mc_list *dmi; 511 struct netdev_hw_addr *ha;
513 512
514 if (dev->flags & IFF_ALLMULTI) { 513 if (dev->flags & IFF_ALLMULTI) {
515 for (i = 0; i < 8; i++) { 514 for (i = 0; i < 8; i++) {
@@ -518,8 +517,8 @@ static void mace_set_multicast(struct net_device *dev)
518 } else { 517 } else {
519 for (i = 0; i < 8; i++) 518 for (i = 0; i < 8; i++)
520 multicast_filter[i] = 0; 519 multicast_filter[i] = 0;
521 netdev_for_each_mc_addr(dmi, dev) { 520 netdev_for_each_mc_addr(ha, dev) {
522 crc = ether_crc_le(6, dmi->dmi_addr); 521 crc = ether_crc_le(6, ha->addr);
523 /* bit number in multicast_filter */ 522 /* bit number in multicast_filter */
524 i = crc >> 26; 523 i = crc >> 26;
525 multicast_filter[i >> 3] |= 1 << (i & 7); 524 multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa368b07a..4e238afab4a3 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -145,19 +145,15 @@ static void macvlan_broadcast(struct sk_buff *skb,
145} 145}
146 146
147/* called under rcu_read_lock() from netif_receive_skb */ 147/* called under rcu_read_lock() from netif_receive_skb */
148static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb) 148static struct sk_buff *macvlan_handle_frame(struct macvlan_port *port,
149 struct sk_buff *skb)
149{ 150{
150 const struct ethhdr *eth = eth_hdr(skb); 151 const struct ethhdr *eth = eth_hdr(skb);
151 const struct macvlan_port *port;
152 const struct macvlan_dev *vlan; 152 const struct macvlan_dev *vlan;
153 const struct macvlan_dev *src; 153 const struct macvlan_dev *src;
154 struct net_device *dev; 154 struct net_device *dev;
155 unsigned int len; 155 unsigned int len;
156 156
157 port = rcu_dereference(skb->dev->macvlan_port);
158 if (port == NULL)
159 return skb;
160
161 if (is_multicast_ether_addr(eth->h_dest)) { 157 if (is_multicast_ether_addr(eth->h_dest)) {
162 src = macvlan_hash_lookup(port, eth->h_source); 158 src = macvlan_hash_lookup(port, eth->h_source);
163 if (!src) 159 if (!src)
@@ -243,7 +239,7 @@ netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
243 int ret; 239 int ret;
244 240
245 ret = macvlan_queue_xmit(skb, dev); 241 ret = macvlan_queue_xmit(skb, dev);
246 if (likely(ret == NET_XMIT_SUCCESS)) { 242 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
247 txq->tx_packets++; 243 txq->tx_packets++;
248 txq->tx_bytes += len; 244 txq->tx_bytes += len;
249 } else 245 } else
@@ -282,7 +278,7 @@ static int macvlan_open(struct net_device *dev)
282 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 278 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
283 goto out; 279 goto out;
284 280
285 err = dev_unicast_add(lowerdev, dev->dev_addr); 281 err = dev_uc_add(lowerdev, dev->dev_addr);
286 if (err < 0) 282 if (err < 0)
287 goto out; 283 goto out;
288 if (dev->flags & IFF_ALLMULTI) { 284 if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +290,7 @@ static int macvlan_open(struct net_device *dev)
294 return 0; 290 return 0;
295 291
296del_unicast: 292del_unicast:
297 dev_unicast_delete(lowerdev, dev->dev_addr); 293 dev_uc_del(lowerdev, dev->dev_addr);
298out: 294out:
299 return err; 295 return err;
300} 296}
@@ -308,7 +304,7 @@ static int macvlan_stop(struct net_device *dev)
308 if (dev->flags & IFF_ALLMULTI) 304 if (dev->flags & IFF_ALLMULTI)
309 dev_set_allmulti(lowerdev, -1); 305 dev_set_allmulti(lowerdev, -1);
310 306
311 dev_unicast_delete(lowerdev, dev->dev_addr); 307 dev_uc_del(lowerdev, dev->dev_addr);
312 308
313 macvlan_hash_del(vlan); 309 macvlan_hash_del(vlan);
314 return 0; 310 return 0;
@@ -332,11 +328,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
332 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 328 if (macvlan_addr_busy(vlan->port, addr->sa_data))
333 return -EBUSY; 329 return -EBUSY;
334 330
335 err = dev_unicast_add(lowerdev, addr->sa_data); 331 err = dev_uc_add(lowerdev, addr->sa_data);
336 if (err) 332 if (err)
337 return err; 333 return err;
338 334
339 dev_unicast_delete(lowerdev, dev->dev_addr); 335 dev_uc_del(lowerdev, dev->dev_addr);
340 336
341 macvlan_hash_change_addr(vlan, addr->sa_data); 337 macvlan_hash_change_addr(vlan, addr->sa_data);
342 } 338 }
@@ -748,6 +744,9 @@ static int macvlan_device_event(struct notifier_block *unused,
748 list_for_each_entry_safe(vlan, next, &port->vlans, list) 744 list_for_each_entry_safe(vlan, next, &port->vlans, list)
749 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); 745 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
750 break; 746 break;
747 case NETDEV_PRE_TYPE_CHANGE:
748 /* Forbid underlaying device to change its type. */
749 return NOTIFY_BAD;
751 } 750 }
752 return NOTIFY_DONE; 751 return NOTIFY_DONE;
753} 752}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f12..a8a94e2f6ddc 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -37,6 +37,8 @@
37struct macvtap_queue { 37struct macvtap_queue {
38 struct sock sk; 38 struct sock sk;
39 struct socket sock; 39 struct socket sock;
40 struct socket_wq wq;
41 int vnet_hdr_sz;
40 struct macvlan_dev *vlan; 42 struct macvlan_dev *vlan;
41 struct file *file; 43 struct file *file;
42 unsigned int flags; 44 unsigned int flags;
@@ -181,7 +183,7 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
181 return -ENOLINK; 183 return -ENOLINK;
182 184
183 skb_queue_tail(&q->sk.sk_receive_queue, skb); 185 skb_queue_tail(&q->sk.sk_receive_queue, skb);
184 wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND); 186 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
185 return 0; 187 return 0;
186} 188}
187 189
@@ -242,12 +244,15 @@ static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
242 244
243static void macvtap_sock_write_space(struct sock *sk) 245static void macvtap_sock_write_space(struct sock *sk)
244{ 246{
247 wait_queue_head_t *wqueue;
248
245 if (!sock_writeable(sk) || 249 if (!sock_writeable(sk) ||
246 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 250 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
247 return; 251 return;
248 252
249 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 253 wqueue = sk_sleep(sk);
250 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND); 254 if (wqueue && waitqueue_active(wqueue))
255 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
251} 256}
252 257
253static int macvtap_open(struct inode *inode, struct file *file) 258static int macvtap_open(struct inode *inode, struct file *file)
@@ -272,7 +277,8 @@ static int macvtap_open(struct inode *inode, struct file *file)
272 if (!q) 277 if (!q)
273 goto out; 278 goto out;
274 279
275 init_waitqueue_head(&q->sock.wait); 280 q->sock.wq = &q->wq;
281 init_waitqueue_head(&q->wq.wait);
276 q->sock.type = SOCK_RAW; 282 q->sock.type = SOCK_RAW;
277 q->sock.state = SS_CONNECTED; 283 q->sock.state = SS_CONNECTED;
278 q->sock.file = file; 284 q->sock.file = file;
@@ -280,6 +286,7 @@ static int macvtap_open(struct inode *inode, struct file *file)
280 sock_init_data(&q->sock, &q->sk); 286 sock_init_data(&q->sock, &q->sk);
281 q->sk.sk_write_space = macvtap_sock_write_space; 287 q->sk.sk_write_space = macvtap_sock_write_space;
282 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; 288 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
289 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
283 290
284 err = macvtap_set_queue(dev, file, q); 291 err = macvtap_set_queue(dev, file, q);
285 if (err) 292 if (err)
@@ -308,7 +315,7 @@ static unsigned int macvtap_poll(struct file *file, poll_table * wait)
308 goto out; 315 goto out;
309 316
310 mask = 0; 317 mask = 0;
311 poll_wait(file, &q->sock.wait, wait); 318 poll_wait(file, &q->wq.wait, wait);
312 319
313 if (!skb_queue_empty(&q->sk.sk_receive_queue)) 320 if (!skb_queue_empty(&q->sk.sk_receive_queue))
314 mask |= POLLIN | POLLRDNORM; 321 mask |= POLLIN | POLLRDNORM;
@@ -440,14 +447,14 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
440 int vnet_hdr_len = 0; 447 int vnet_hdr_len = 0;
441 448
442 if (q->flags & IFF_VNET_HDR) { 449 if (q->flags & IFF_VNET_HDR) {
443 vnet_hdr_len = sizeof(vnet_hdr); 450 vnet_hdr_len = q->vnet_hdr_sz;
444 451
445 err = -EINVAL; 452 err = -EINVAL;
446 if ((len -= vnet_hdr_len) < 0) 453 if ((len -= vnet_hdr_len) < 0)
447 goto err; 454 goto err;
448 455
449 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 456 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
450 vnet_hdr_len); 457 sizeof(vnet_hdr));
451 if (err < 0) 458 if (err < 0)
452 goto err; 459 goto err;
453 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && 460 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
@@ -529,7 +536,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
529 536
530 if (q->flags & IFF_VNET_HDR) { 537 if (q->flags & IFF_VNET_HDR) {
531 struct virtio_net_hdr vnet_hdr; 538 struct virtio_net_hdr vnet_hdr;
532 vnet_hdr_len = sizeof (vnet_hdr); 539 vnet_hdr_len = q->vnet_hdr_sz;
533 if ((len -= vnet_hdr_len) < 0) 540 if ((len -= vnet_hdr_len) < 0)
534 return -EINVAL; 541 return -EINVAL;
535 542
@@ -537,7 +544,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
537 if (ret) 544 if (ret)
538 return ret; 545 return ret;
539 546
540 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len)) 547 if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
541 return -EFAULT; 548 return -EFAULT;
542 } 549 }
543 550
@@ -562,7 +569,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
562 struct sk_buff *skb; 569 struct sk_buff *skb;
563 ssize_t ret = 0; 570 ssize_t ret = 0;
564 571
565 add_wait_queue(q->sk.sk_sleep, &wait); 572 add_wait_queue(sk_sleep(&q->sk), &wait);
566 while (len) { 573 while (len) {
567 current->state = TASK_INTERRUPTIBLE; 574 current->state = TASK_INTERRUPTIBLE;
568 575
@@ -587,7 +594,7 @@ static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
587 } 594 }
588 595
589 current->state = TASK_RUNNING; 596 current->state = TASK_RUNNING;
590 remove_wait_queue(q->sk.sk_sleep, &wait); 597 remove_wait_queue(sk_sleep(&q->sk), &wait);
591 return ret; 598 return ret;
592} 599}
593 600
@@ -622,6 +629,8 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
622 struct ifreq __user *ifr = argp; 629 struct ifreq __user *ifr = argp;
623 unsigned int __user *up = argp; 630 unsigned int __user *up = argp;
624 unsigned int u; 631 unsigned int u;
632 int __user *sp = argp;
633 int s;
625 int ret; 634 int ret;
626 635
627 switch (cmd) { 636 switch (cmd) {
@@ -667,6 +676,21 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
667 q->sk.sk_sndbuf = u; 676 q->sk.sk_sndbuf = u;
668 return 0; 677 return 0;
669 678
679 case TUNGETVNETHDRSZ:
680 s = q->vnet_hdr_sz;
681 if (put_user(s, sp))
682 return -EFAULT;
683 return 0;
684
685 case TUNSETVNETHDRSZ:
686 if (get_user(s, sp))
687 return -EFAULT;
688 if (s < (int)sizeof(struct virtio_net_hdr))
689 return -EINVAL;
690
691 q->vnet_hdr_sz = s;
692 return 0;
693
670 case TUNSETOFFLOAD: 694 case TUNSETOFFLOAD:
671 /* let the user check for future flags */ 695 /* let the user check for future flags */
672 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 696 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 9f72cb45f4af..42e3294671d7 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -746,10 +746,8 @@ static void meth_tx_timeout(struct net_device *dev)
746 /* Enable interrupt */ 746 /* Enable interrupt */
747 spin_unlock_irqrestore(&priv->meth_lock, flags); 747 spin_unlock_irqrestore(&priv->meth_lock, flags);
748 748
749 dev->trans_start = jiffies; 749 dev->trans_start = jiffies; /* prevent tx timeout */
750 netif_wake_queue(dev); 750 netif_wake_queue(dev);
751
752 return;
753} 751}
754 752
755/* 753/*
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 86467b444ac6..d5afd037cd7d 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -140,8 +140,6 @@ static void mlx4_en_get_wol(struct net_device *netdev,
140{ 140{
141 wol->supported = 0; 141 wol->supported = 0;
142 wol->wolopts = 0; 142 wol->wolopts = 0;
143
144 return;
145} 143}
146 144
147static int mlx4_en_get_sset_count(struct net_device *dev, int sset) 145static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20c6453..96180c0ec206 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
161static void mlx4_en_clear_list(struct net_device *dev) 161static void mlx4_en_clear_list(struct net_device *dev)
162{ 162{
163 struct mlx4_en_priv *priv = netdev_priv(dev); 163 struct mlx4_en_priv *priv = netdev_priv(dev);
164 struct dev_mc_list *plist = priv->mc_list;
165 struct dev_mc_list *next;
166 164
167 while (plist) { 165 kfree(priv->mc_addrs);
168 next = plist->next; 166 priv->mc_addrs_cnt = 0;
169 kfree(plist);
170 plist = next;
171 }
172 priv->mc_list = NULL;
173} 167}
174 168
175static void mlx4_en_cache_mclist(struct net_device *dev) 169static void mlx4_en_cache_mclist(struct net_device *dev)
176{ 170{
177 struct mlx4_en_priv *priv = netdev_priv(dev); 171 struct mlx4_en_priv *priv = netdev_priv(dev);
178 struct dev_mc_list *mclist; 172 struct netdev_hw_addr *ha;
179 struct dev_mc_list *tmp; 173 char *mc_addrs;
180 struct dev_mc_list *plist = NULL; 174 int mc_addrs_cnt = netdev_mc_count(dev);
181 175 int i;
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 176
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 177 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
184 if (!tmp) { 178 if (!mc_addrs) {
185 en_err(priv, "failed to allocate multicast list\n"); 179 en_err(priv, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev); 180 return;
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 } 181 }
182 i = 0;
183 netdev_for_each_mc_addr(ha, dev)
184 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
185 priv->mc_addrs = mc_addrs;
186 priv->mc_addrs_cnt = mc_addrs_cnt;
197} 187}
198 188
199 189
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
213 mcast_task); 203 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev; 204 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev; 205 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0; 206 u64 mcast_addr = 0;
218 int err; 207 int err;
219 208
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
289 if (err) 278 if (err)
290 en_err(priv, "Failed disabling multicast filter\n"); 279 en_err(priv, "Failed disabling multicast filter\n");
291 } else { 280 } else {
281 int i;
282
292 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 283 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
293 0, MLX4_MCAST_DISABLE); 284 0, MLX4_MCAST_DISABLE);
294 if (err) 285 if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
303 netif_tx_lock_bh(dev); 294 netif_tx_lock_bh(dev);
304 mlx4_en_cache_mclist(dev); 295 mlx4_en_cache_mclist(dev);
305 netif_tx_unlock_bh(dev); 296 netif_tx_unlock_bh(dev);
306 for (mclist = priv->mc_list; mclist; mclist = mclist->next) { 297 for (i = 0; i < priv->mc_addrs_cnt; i++) {
307 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); 298 mcast_addr =
299 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
308 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 300 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
309 mcast_addr, 0, MLX4_MCAST_CONFIG); 301 mcast_addr, 0, MLX4_MCAST_CONFIG);
310 } 302 }
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
512 504
513 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 505 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
514 if (err) 506 if (err)
515 en_dbg(HW, priv, "Could not update stats \n"); 507 en_dbg(HW, priv, "Could not update stats\n");
516 508
517 mutex_lock(&mdev->state_lock); 509 mutex_lock(&mdev->state_lock);
518 if (mdev->device_up) { 510 if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
985 priv->flags = prof->flags; 977 priv->flags = prof->flags;
986 priv->tx_ring_num = prof->tx_ring_num; 978 priv->tx_ring_num = prof->tx_ring_num;
987 priv->rx_ring_num = prof->rx_ring_num; 979 priv->rx_ring_num = prof->rx_ring_num;
988 priv->mc_list = NULL;
989 priv->mac_index = -1; 980 priv->mac_index = -1;
990 priv->msg_enable = MLX4_EN_MSG_LEVEL; 981 priv->msg_enable = MLX4_EN_MSG_LEVEL;
991 spin_lock_init(&priv->stats_lock); 982 spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 7365bf488b81..423053482ed5 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -239,7 +239,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
239 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n", 239 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
240 eqe->type, eqe->subtype, eq->eqn, eq->cons_index); 240 eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
241 break; 241 break;
242 }; 242 }
243 243
244 ++eq->cons_index; 244 ++eq->cons_index;
245 eqes_found = 1; 245 eqes_found = 1;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index bc72d6e4919b..13343e884999 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -40,6 +40,7 @@
40#include <linux/mutex.h> 40#include <linux/mutex.h>
41#include <linux/radix-tree.h> 41#include <linux/radix-tree.h>
42#include <linux/timer.h> 42#include <linux/timer.h>
43#include <linux/semaphore.h>
43#include <linux/workqueue.h> 44#include <linux/workqueue.h>
44 45
45#include <linux/mlx4/device.h> 46#include <linux/mlx4/device.h>
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc584e3..b55e46c8b682 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
492 struct mlx4_en_perf_stats pstats; 492 struct mlx4_en_perf_stats pstats;
493 struct mlx4_en_pkt_stats pkstats; 493 struct mlx4_en_pkt_stats pkstats;
494 struct mlx4_en_port_stats port_stats; 494 struct mlx4_en_port_stats port_stats;
495 struct dev_mc_list *mc_list; 495 char *mc_addrs;
496 int mc_addrs_cnt;
496 struct mlx4_en_stat_out_mbox hw_stats; 497 struct mlx4_en_stat_out_mbox hw_stats;
497}; 498};
498 499
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52ddf17..e345ec8cb473 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -882,7 +882,6 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
882 882
883 txq->tx_bytes += skb->len; 883 txq->tx_bytes += skb->len;
884 txq->tx_packets++; 884 txq->tx_packets++;
885 dev->trans_start = jiffies;
886 885
887 entries_left = txq->tx_ring_size - txq->tx_desc_count; 886 entries_left = txq->tx_ring_size - txq->tx_desc_count;
888 if (entries_left < MAX_SKB_FRAGS + 1) 887 if (entries_left < MAX_SKB_FRAGS + 1)
@@ -1770,7 +1769,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1770 struct mv643xx_eth_private *mp = netdev_priv(dev); 1769 struct mv643xx_eth_private *mp = netdev_priv(dev);
1771 u32 *mc_spec; 1770 u32 *mc_spec;
1772 u32 *mc_other; 1771 u32 *mc_other;
1773 struct dev_addr_list *addr; 1772 struct netdev_hw_addr *ha;
1774 int i; 1773 int i;
1775 1774
1776 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1775 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1794,8 @@ oom:
1795 memset(mc_spec, 0, 0x100); 1794 memset(mc_spec, 0, 0x100);
1796 memset(mc_other, 0, 0x100); 1795 memset(mc_other, 0, 0x100);
1797 1796
1798 netdev_for_each_mc_addr(addr, dev) { 1797 netdev_for_each_mc_addr(ha, dev) {
1799 u8 *a = addr->da_addr; 1798 u8 *a = ha->addr;
1800 u32 *table; 1799 u32 *table;
1801 int entry; 1800 int entry;
1802 1801
@@ -2609,10 +2608,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2609 goto out; 2608 goto out;
2610 2609
2611 ret = -ENOMEM; 2610 ret = -ENOMEM;
2612 msp = kmalloc(sizeof(*msp), GFP_KERNEL); 2611 msp = kzalloc(sizeof(*msp), GFP_KERNEL);
2613 if (msp == NULL) 2612 if (msp == NULL)
2614 goto out; 2613 goto out;
2615 memset(msp, 0, sizeof(*msp));
2616 2614
2617 msp->base = ioremap(res->start, res->end - res->start + 1); 2615 msp->base = ioremap(res->start, res->end - res->start + 1);
2618 if (msp->base == NULL) 2616 if (msp->base == NULL)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde0876a785..e0b47cc8a86e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
110struct myri10ge_rx_buffer_state { 110struct myri10ge_rx_buffer_state {
111 struct page *page; 111 struct page *page;
112 int page_offset; 112 int page_offset;
113 DECLARE_PCI_UNMAP_ADDR(bus) 113 DEFINE_DMA_UNMAP_ADDR(bus);
114 DECLARE_PCI_UNMAP_LEN(len) 114 DEFINE_DMA_UNMAP_LEN(len);
115}; 115};
116 116
117struct myri10ge_tx_buffer_state { 117struct myri10ge_tx_buffer_state {
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 int last; 119 int last;
120 DECLARE_PCI_UNMAP_ADDR(bus) 120 DEFINE_DMA_UNMAP_ADDR(bus);
121 DECLARE_PCI_UNMAP_LEN(len) 121 DEFINE_DMA_UNMAP_LEN(len);
122}; 122};
123 123
124struct myri10ge_cmd { 124struct myri10ge_cmd {
@@ -1234,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1234 rx->info[idx].page_offset = rx->page_offset; 1234 rx->info[idx].page_offset = rx->page_offset;
1235 /* note that this is the address of the start of the 1235 /* note that this is the address of the start of the
1236 * page */ 1236 * page */
1237 pci_unmap_addr_set(&rx->info[idx], bus, rx->bus); 1237 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1238 rx->shadow[idx].addr_low = 1238 rx->shadow[idx].addr_low =
1239 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); 1239 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1240 rx->shadow[idx].addr_high = 1240 rx->shadow[idx].addr_high =
@@ -1266,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1266 /* unmap the recvd page if we're the only or last user of it */ 1266 /* unmap the recvd page if we're the only or last user of it */
1267 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || 1267 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1268 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { 1268 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1269 pci_unmap_page(pdev, (pci_unmap_addr(info, bus) 1269 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1270 & ~(MYRI10GE_ALLOC_SIZE - 1)), 1270 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1271 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 1271 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1272 } 1272 }
@@ -1373,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1373 tx->info[idx].last = 0; 1373 tx->info[idx].last = 0;
1374 } 1374 }
1375 tx->done++; 1375 tx->done++;
1376 len = pci_unmap_len(&tx->info[idx], len); 1376 len = dma_unmap_len(&tx->info[idx], len);
1377 pci_unmap_len_set(&tx->info[idx], len, 0); 1377 dma_unmap_len_set(&tx->info[idx], len, 0);
1378 if (skb) { 1378 if (skb) {
1379 ss->stats.tx_bytes += skb->len; 1379 ss->stats.tx_bytes += skb->len;
1380 ss->stats.tx_packets++; 1380 ss->stats.tx_packets++;
1381 dev_kfree_skb_irq(skb); 1381 dev_kfree_skb_irq(skb);
1382 if (len) 1382 if (len)
1383 pci_unmap_single(pdev, 1383 pci_unmap_single(pdev,
1384 pci_unmap_addr(&tx->info[idx], 1384 dma_unmap_addr(&tx->info[idx],
1385 bus), len, 1385 bus), len,
1386 PCI_DMA_TODEVICE); 1386 PCI_DMA_TODEVICE);
1387 } else { 1387 } else {
1388 if (len) 1388 if (len)
1389 pci_unmap_page(pdev, 1389 pci_unmap_page(pdev,
1390 pci_unmap_addr(&tx->info[idx], 1390 dma_unmap_addr(&tx->info[idx],
1391 bus), len, 1391 bus), len,
1392 PCI_DMA_TODEVICE); 1392 PCI_DMA_TODEVICE);
1393 } 1393 }
@@ -2094,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2094 /* Mark as free */ 2094 /* Mark as free */
2095 tx->info[idx].skb = NULL; 2095 tx->info[idx].skb = NULL;
2096 tx->done++; 2096 tx->done++;
2097 len = pci_unmap_len(&tx->info[idx], len); 2097 len = dma_unmap_len(&tx->info[idx], len);
2098 pci_unmap_len_set(&tx->info[idx], len, 0); 2098 dma_unmap_len_set(&tx->info[idx], len, 0);
2099 if (skb) { 2099 if (skb) {
2100 ss->stats.tx_dropped++; 2100 ss->stats.tx_dropped++;
2101 dev_kfree_skb_any(skb); 2101 dev_kfree_skb_any(skb);
2102 if (len) 2102 if (len)
2103 pci_unmap_single(mgp->pdev, 2103 pci_unmap_single(mgp->pdev,
2104 pci_unmap_addr(&tx->info[idx], 2104 dma_unmap_addr(&tx->info[idx],
2105 bus), len, 2105 bus), len,
2106 PCI_DMA_TODEVICE); 2106 PCI_DMA_TODEVICE);
2107 } else { 2107 } else {
2108 if (len) 2108 if (len)
2109 pci_unmap_page(mgp->pdev, 2109 pci_unmap_page(mgp->pdev,
2110 pci_unmap_addr(&tx->info[idx], 2110 dma_unmap_addr(&tx->info[idx],
2111 bus), len, 2111 bus), len,
2112 PCI_DMA_TODEVICE); 2112 PCI_DMA_TODEVICE);
2113 } 2113 }
@@ -2757,12 +2757,12 @@ again:
2757 } 2757 }
2758 2758
2759 /* map the skb for DMA */ 2759 /* map the skb for DMA */
2760 len = skb->len - skb->data_len; 2760 len = skb_headlen(skb);
2761 idx = tx->req & tx->mask; 2761 idx = tx->req & tx->mask;
2762 tx->info[idx].skb = skb; 2762 tx->info[idx].skb = skb;
2763 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); 2763 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2764 pci_unmap_addr_set(&tx->info[idx], bus, bus); 2764 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2765 pci_unmap_len_set(&tx->info[idx], len, len); 2765 dma_unmap_len_set(&tx->info[idx], len, len);
2766 2766
2767 frag_cnt = skb_shinfo(skb)->nr_frags; 2767 frag_cnt = skb_shinfo(skb)->nr_frags;
2768 frag_idx = 0; 2768 frag_idx = 0;
@@ -2865,8 +2865,8 @@ again:
2865 len = frag->size; 2865 len = frag->size;
2866 bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset, 2866 bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
2867 len, PCI_DMA_TODEVICE); 2867 len, PCI_DMA_TODEVICE);
2868 pci_unmap_addr_set(&tx->info[idx], bus, bus); 2868 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2869 pci_unmap_len_set(&tx->info[idx], len, len); 2869 dma_unmap_len_set(&tx->info[idx], len, len);
2870 } 2870 }
2871 2871
2872 (req - rdma_count)->rdma_count = rdma_count; 2872 (req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2903,19 @@ abort_linearize:
2903 idx = tx->req & tx->mask; 2903 idx = tx->req & tx->mask;
2904 tx->info[idx].skb = NULL; 2904 tx->info[idx].skb = NULL;
2905 do { 2905 do {
2906 len = pci_unmap_len(&tx->info[idx], len); 2906 len = dma_unmap_len(&tx->info[idx], len);
2907 if (len) { 2907 if (len) {
2908 if (tx->info[idx].skb != NULL) 2908 if (tx->info[idx].skb != NULL)
2909 pci_unmap_single(mgp->pdev, 2909 pci_unmap_single(mgp->pdev,
2910 pci_unmap_addr(&tx->info[idx], 2910 dma_unmap_addr(&tx->info[idx],
2911 bus), len, 2911 bus), len,
2912 PCI_DMA_TODEVICE); 2912 PCI_DMA_TODEVICE);
2913 else 2913 else
2914 pci_unmap_page(mgp->pdev, 2914 pci_unmap_page(mgp->pdev,
2915 pci_unmap_addr(&tx->info[idx], 2915 dma_unmap_addr(&tx->info[idx],
2916 bus), len, 2916 bus), len,
2917 PCI_DMA_TODEVICE); 2917 PCI_DMA_TODEVICE);
2918 pci_unmap_len_set(&tx->info[idx], len, 0); 2918 dma_unmap_len_set(&tx->info[idx], len, 0);
2919 tx->info[idx].skb = NULL; 2919 tx->info[idx].skb = NULL;
2920 } 2920 }
2921 idx = (idx + 1) & tx->mask; 2921 idx = (idx + 1) & tx->mask;
@@ -3002,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3002{ 3002{
3003 struct myri10ge_priv *mgp = netdev_priv(dev); 3003 struct myri10ge_priv *mgp = netdev_priv(dev);
3004 struct myri10ge_cmd cmd; 3004 struct myri10ge_cmd cmd;
3005 struct dev_mc_list *mc_list; 3005 struct netdev_hw_addr *ha;
3006 __be32 data[2] = { 0, 0 }; 3006 __be32 data[2] = { 0, 0 };
3007 int err; 3007 int err;
3008 3008
@@ -3039,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3039 } 3039 }
3040 3040
3041 /* Walk the multicast list, and add each address */ 3041 /* Walk the multicast list, and add each address */
3042 netdev_for_each_mc_addr(mc_list, dev) { 3042 netdev_for_each_mc_addr(ha, dev) {
3043 memcpy(data, &mc_list->dmi_addr, 6); 3043 memcpy(data, &ha->addr, 6);
3044 cmd.data0 = ntohl(data[0]); 3044 cmd.data0 = ntohl(data[0]);
3045 cmd.data1 = ntohl(data[1]); 3045 cmd.data1 = ntohl(data[1]);
3046 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, 3046 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3048 3048
3049 if (err != 0) { 3049 if (err != 0) {
3050 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", 3050 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3051 err, mc_list->dmi_addr); 3051 err, ha->addr);
3052 goto abort; 3052 goto abort;
3053 } 3053 }
3054 } 3054 }
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index 77835df4d013..1a57c3da1f49 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -865,7 +865,7 @@ static inline void determine_reg_space_size(struct myri_eth *mp)
865 printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n", 865 printk("myricom: AIEEE weird cpu version %04x assuming pre4.0\n",
866 mp->eeprom.cpuvers); 866 mp->eeprom.cpuvers);
867 mp->reg_size = (3 * 128 * 1024) + 4096; 867 mp->reg_size = (3 * 128 * 1024) + 4096;
868 }; 868 }
869} 869}
870 870
871#ifdef DEBUG_DETECT 871#ifdef DEBUG_DETECT
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e52038783245..2a17b503feaa 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1905,7 +1905,7 @@ static void ns_tx_timeout(struct net_device *dev)
1905 spin_unlock_irq(&np->lock); 1905 spin_unlock_irq(&np->lock);
1906 enable_irq(dev->irq); 1906 enable_irq(dev->irq);
1907 1907
1908 dev->trans_start = jiffies; 1908 dev->trans_start = jiffies; /* prevent tx timeout */
1909 np->stats.tx_errors++; 1909 np->stats.tx_errors++;
1910 netif_wake_queue(dev); 1910 netif_wake_queue(dev);
1911} 1911}
@@ -2119,8 +2119,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2119 } 2119 }
2120 spin_unlock_irqrestore(&np->lock, flags); 2120 spin_unlock_irqrestore(&np->lock, flags);
2121 2121
2122 dev->trans_start = jiffies;
2123
2124 if (netif_msg_tx_queued(np)) { 2122 if (netif_msg_tx_queued(np)) {
2125 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 2123 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2126 dev->name, np->cur_tx, entry); 2124 dev->name, np->cur_tx, entry);
@@ -2493,12 +2491,12 @@ static void __set_rx_mode(struct net_device *dev)
2493 rx_mode = RxFilterEnable | AcceptBroadcast 2491 rx_mode = RxFilterEnable | AcceptBroadcast
2494 | AcceptAllMulticast | AcceptMyPhys; 2492 | AcceptAllMulticast | AcceptMyPhys;
2495 } else { 2493 } else {
2496 struct dev_mc_list *mclist; 2494 struct netdev_hw_addr *ha;
2497 int i; 2495 int i;
2498 2496
2499 memset(mc_filter, 0, sizeof(mc_filter)); 2497 memset(mc_filter, 0, sizeof(mc_filter));
2500 netdev_for_each_mc_addr(mclist, dev) { 2498 netdev_for_each_mc_addr(ha, dev) {
2501 int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; 2499 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2502 mc_filter[b/8] |= (1 << (b & 0x07)); 2500 mc_filter[b/8] |= (1 << (b & 0x07));
2503 } 2501 }
2504 rx_mode = RxFilterEnable | AcceptBroadcast 2502 rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index 7bd6662d5b04..e0b0ef11f110 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -608,7 +608,6 @@ retry:
608 608
609 outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */ 609 outb_p(ENISR_RDC, NE_BASE + EN0_ISR); /* Ack intr. */
610 ei_status.dmaing &= ~0x01; 610 ei_status.dmaing &= ~0x01;
611 return;
612} 611}
613 612
614 613
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index f4347f88b6f2..b8e2923a1d69 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -785,7 +785,6 @@ retry:
785 785
786 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 786 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
787 ei_status.dmaing &= ~0x01; 787 ei_status.dmaing &= ~0x01;
788 return;
789} 788}
790 789
791static int __init ne_drv_probe(struct platform_device *pdev) 790static int __init ne_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ne2.c b/drivers/net/ne2.c
index ff3c4c814988..70cdc6996342 100644
--- a/drivers/net/ne2.c
+++ b/drivers/net/ne2.c
@@ -730,7 +730,6 @@ retry:
730 730
731 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 731 outb_p(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
732 ei_status.dmaing &= ~0x01; 732 ei_status.dmaing &= ~0x01;
733 return;
734} 733}
735 734
736 735
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 85aec4f10131..3c333cb5d34e 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -631,7 +631,6 @@ static void ne2k_pci_block_output(struct net_device *dev, int count,
631 631
632 outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ 632 outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
633 ei_status.dmaing &= ~0x01; 633 ei_status.dmaing &= ~0x01;
634 return;
635} 634}
636 635
637static void ne2k_pci_get_drvinfo(struct net_device *dev, 636static void ne2k_pci_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/ne3210.c b/drivers/net/ne3210.c
index a00bbfb9aed0..243ed2aee88e 100644
--- a/drivers/net/ne3210.c
+++ b/drivers/net/ne3210.c
@@ -255,8 +255,6 @@ static void ne3210_reset_8390(struct net_device *dev)
255 ei_status.txing = 0; 255 ei_status.txing = 0;
256 outb(0x01, ioaddr + NE3210_RESET_PORT); 256 outb(0x01, ioaddr + NE3210_RESET_PORT);
257 if (ei_debug > 1) printk("reset done\n"); 257 if (ei_debug > 1) printk("reset done\n");
258
259 return;
260} 258}
261 259
262/* 260/*
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index a361dea35574..ca142c47b2e4 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -665,7 +665,8 @@ static int netconsole_netdev_event(struct notifier_block *this,
665 struct netconsole_target *nt; 665 struct netconsole_target *nt;
666 struct net_device *dev = ptr; 666 struct net_device *dev = ptr;
667 667
668 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER)) 668 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
669 event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
669 goto done; 670 goto done;
670 671
671 spin_lock_irqsave(&target_list_lock, flags); 672 spin_lock_irqsave(&target_list_lock, flags);
@@ -677,19 +678,21 @@ static int netconsole_netdev_event(struct notifier_block *this,
677 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ); 678 strlcpy(nt->np.dev_name, dev->name, IFNAMSIZ);
678 break; 679 break;
679 case NETDEV_UNREGISTER: 680 case NETDEV_UNREGISTER:
680 if (!nt->enabled)
681 break;
682 netpoll_cleanup(&nt->np); 681 netpoll_cleanup(&nt->np);
682 /* Fall through */
683 case NETDEV_GOING_DOWN:
684 case NETDEV_BONDING_DESLAVE:
683 nt->enabled = 0; 685 nt->enabled = 0;
684 printk(KERN_INFO "netconsole: network logging stopped"
685 ", interface %s unregistered\n",
686 dev->name);
687 break; 686 break;
688 } 687 }
689 } 688 }
690 netconsole_target_put(nt); 689 netconsole_target_put(nt);
691 } 690 }
692 spin_unlock_irqrestore(&target_list_lock, flags); 691 spin_unlock_irqrestore(&target_list_lock, flags);
692 if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE)
693 printk(KERN_INFO "netconsole: network logging stopped, "
694 "interface %s %s\n", dev->name,
695 event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
693 696
694done: 697done:
695 return NOTIFY_DONE; 698 return NOTIFY_DONE;
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 64770298c4f7..2e4b42175f3f 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -126,7 +126,6 @@ netx_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
126 FIFO_PTR_FRAMENO(1) | 126 FIFO_PTR_FRAMENO(1) |
127 FIFO_PTR_FRAMELEN(len)); 127 FIFO_PTR_FRAMELEN(len));
128 128
129 ndev->trans_start = jiffies;
130 ndev->stats.tx_packets++; 129 ndev->stats.tx_packets++;
131 ndev->stats.tx_bytes += skb->len; 130 ndev->stats.tx_bytes += skb->len;
132 131
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f703838e21a..ffa1b9ce1cc5 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -95,6 +95,9 @@
95#define ADDR_IN_WINDOW1(off) \ 95#define ADDR_IN_WINDOW1(off) \
96 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0 96 ((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
97 97
98#define ADDR_IN_RANGE(addr, low, high) \
99 (((addr) < (high)) && ((addr) >= (low)))
100
98/* 101/*
99 * normalize a 64MB crb address to 32MB PCI window 102 * normalize a 64MB crb address to 32MB PCI window
100 * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1 103 * To use NETXEN_CRB_NORMALIZE, window _must_ be set to 1
@@ -420,7 +423,6 @@ struct status_desc {
420} __attribute__ ((aligned(16))); 423} __attribute__ ((aligned(16)));
421 424
422/* UNIFIED ROMIMAGE *************************/ 425/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0xc8000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 426#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6 427#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7 428#define NX_UNI_DIR_SECT_FW 0x7
@@ -1353,6 +1355,8 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable);
1353int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd); 1355int netxen_config_ipaddr(struct netxen_adapter *adapter, u32 ip, int cmd);
1354int netxen_linkevent_request(struct netxen_adapter *adapter, int enable); 1356int netxen_linkevent_request(struct netxen_adapter *adapter, int enable);
1355void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup); 1357void netxen_advert_link_change(struct netxen_adapter *adapter, int linkup);
1358void netxen_pci_camqm_read_2M(struct netxen_adapter *, u64, u64 *);
1359void netxen_pci_camqm_write_2M(struct netxen_adapter *, u64, u64);
1356 1360
1357int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu); 1361int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu);
1358int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu); 1362int netxen_nic_change_mtu(struct net_device *netdev, int new_mtu);
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e56cbee..20f7c58bd092 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -632,6 +632,9 @@ static int netxen_nic_reg_test(struct net_device *dev)
632 if ((data_read & 0xffff) != adapter->pdev->vendor) 632 if ((data_read & 0xffff) != adapter->pdev->vendor)
633 return 1; 633 return 1;
634 634
635 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
636 return 0;
637
635 data_written = (u32)0xa5a5a5a5; 638 data_written = (u32)0xa5a5a5a5;
636 639
637 NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written); 640 NXWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
@@ -703,6 +706,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
703 } 706 }
704} 707}
705 708
709static u32 netxen_nic_get_tx_csum(struct net_device *dev)
710{
711 return dev->features & NETIF_F_IP_CSUM;
712}
713
706static u32 netxen_nic_get_rx_csum(struct net_device *dev) 714static u32 netxen_nic_get_rx_csum(struct net_device *dev)
707{ 715{
708 struct netxen_adapter *adapter = netdev_priv(dev); 716 struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +917,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
909 .set_ringparam = netxen_nic_set_ringparam, 917 .set_ringparam = netxen_nic_set_ringparam,
910 .get_pauseparam = netxen_nic_get_pauseparam, 918 .get_pauseparam = netxen_nic_get_pauseparam,
911 .set_pauseparam = netxen_nic_set_pauseparam, 919 .set_pauseparam = netxen_nic_set_pauseparam,
920 .get_tx_csum = netxen_nic_get_tx_csum,
912 .set_tx_csum = ethtool_op_set_tx_csum, 921 .set_tx_csum = ethtool_op_set_tx_csum,
913 .set_sg = ethtool_op_set_sg, 922 .set_sg = ethtool_op_set_sg,
914 .get_tso = netxen_nic_get_tso, 923 .get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 622e4c8be937..d8bd73d7e296 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -681,14 +681,8 @@ enum {
681#define MIU_TEST_AGT_ADDR_HI (0x08) 681#define MIU_TEST_AGT_ADDR_HI (0x08)
682#define MIU_TEST_AGT_WRDATA_LO (0x10) 682#define MIU_TEST_AGT_WRDATA_LO (0x10)
683#define MIU_TEST_AGT_WRDATA_HI (0x14) 683#define MIU_TEST_AGT_WRDATA_HI (0x14)
684#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
685#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
686#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
687#define MIU_TEST_AGT_RDDATA_LO (0x18) 684#define MIU_TEST_AGT_RDDATA_LO (0x18)
688#define MIU_TEST_AGT_RDDATA_HI (0x1c) 685#define MIU_TEST_AGT_RDDATA_HI (0x1c)
689#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
690#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
691#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
692 686
693#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 687#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
694#define MIU_TEST_AGT_UPPER_ADDR(off) (0) 688#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
@@ -789,9 +783,7 @@ enum {
789 * for backward compability 783 * for backward compability
790 */ 784 */
791#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8) 785#define CRB_NIC_CAPABILITIES_HOST NETXEN_NIC_REG(0x1a8)
792#define CRB_NIC_CAPABILITIES_FW NETXEN_NIC_REG(0x1dc)
793#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270) 786#define CRB_NIC_MSI_MODE_HOST NETXEN_NIC_REG(0x270)
794#define CRB_NIC_MSI_MODE_FW NETXEN_NIC_REG(0x274)
795 787
796#define INTR_SCHEME_PERPORT 0x1 788#define INTR_SCHEME_PERPORT 0x1
797#define MSI_MODE_MULTIFUNC 0x1 789#define MSI_MODE_MULTIFUNC 0x1
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a0c48c..5c496f8d7c49 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -32,7 +32,6 @@
32#define MASK(n) ((1ULL<<(n))-1) 32#define MASK(n) ((1ULL<<(n))-1)
33#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) 33#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
34#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) 34#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
35#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
36#define MS_WIN(addr) (addr & 0x0ffc0000) 35#define MS_WIN(addr) (addr & 0x0ffc0000)
37 36
38#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 37#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -63,9 +62,6 @@ static inline void writeq(u64 val, void __iomem *addr)
63} 62}
64#endif 63#endif
65 64
66#define ADDR_IN_RANGE(addr, low, high) \
67 (((addr) < (high)) && ((addr) >= (low)))
68
69#define PCI_OFFSET_FIRST_RANGE(adapter, off) \ 65#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
70 ((adapter)->ahw.pci_base0 + (off)) 66 ((adapter)->ahw.pci_base0 + (off))
71#define PCI_OFFSET_SECOND_RANGE(adapter, off) \ 67#define PCI_OFFSET_SECOND_RANGE(adapter, off) \
@@ -538,7 +534,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
538void netxen_p2_nic_set_multi(struct net_device *netdev) 534void netxen_p2_nic_set_multi(struct net_device *netdev)
539{ 535{
540 struct netxen_adapter *adapter = netdev_priv(netdev); 536 struct netxen_adapter *adapter = netdev_priv(netdev);
541 struct dev_mc_list *mc_ptr; 537 struct netdev_hw_addr *ha;
542 u8 null_addr[6]; 538 u8 null_addr[6];
543 int i; 539 int i;
544 540
@@ -572,8 +568,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
572 netxen_nic_enable_mcast_filter(adapter); 568 netxen_nic_enable_mcast_filter(adapter);
573 569
574 i = 0; 570 i = 0;
575 netdev_for_each_mc_addr(mc_ptr, netdev) 571 netdev_for_each_mc_addr(ha, netdev)
576 netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr); 572 netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
577 573
578 /* Clear out remaining addresses */ 574 /* Clear out remaining addresses */
579 while (i < adapter->max_mc_count) 575 while (i < adapter->max_mc_count)
@@ -681,7 +677,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
681void netxen_p3_nic_set_multi(struct net_device *netdev) 677void netxen_p3_nic_set_multi(struct net_device *netdev)
682{ 678{
683 struct netxen_adapter *adapter = netdev_priv(netdev); 679 struct netxen_adapter *adapter = netdev_priv(netdev);
684 struct dev_mc_list *mc_ptr; 680 struct netdev_hw_addr *ha;
685 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 681 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
686 u32 mode = VPORT_MISS_MODE_DROP; 682 u32 mode = VPORT_MISS_MODE_DROP;
687 LIST_HEAD(del_list); 683 LIST_HEAD(del_list);
@@ -708,8 +704,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
708 } 704 }
709 705
710 if (!netdev_mc_empty(netdev)) { 706 if (!netdev_mc_empty(netdev)) {
711 netdev_for_each_mc_addr(mc_ptr, netdev) 707 netdev_for_each_mc_addr(ha, netdev)
712 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); 708 nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
713 } 709 }
714 710
715send_fw_cmd: 711send_fw_cmd:
@@ -1391,18 +1387,8 @@ netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1391 u64 addr, u32 *start) 1387 u64 addr, u32 *start)
1392{ 1388{
1393 u32 window; 1389 u32 window;
1394 struct pci_dev *pdev = adapter->pdev;
1395 1390
1396 if ((addr & 0x00ff800) == 0xff800) { 1391 window = OCM_WIN(addr);
1397 if (printk_ratelimit())
1398 dev_warn(&pdev->dev, "QM access not handled\n");
1399 return -EIO;
1400 }
1401
1402 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
1403 window = OCM_WIN_P3P(addr);
1404 else
1405 window = OCM_WIN(addr);
1406 1392
1407 writel(window, adapter->ahw.ocm_win_crb); 1393 writel(window, adapter->ahw.ocm_win_crb);
1408 /* read back to flush */ 1394 /* read back to flush */
@@ -1419,7 +1405,7 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
1419{ 1405{
1420 void __iomem *addr, *mem_ptr = NULL; 1406 void __iomem *addr, *mem_ptr = NULL;
1421 resource_size_t mem_base; 1407 resource_size_t mem_base;
1422 int ret = -EIO; 1408 int ret;
1423 u32 start; 1409 u32 start;
1424 1410
1425 spin_lock(&adapter->ahw.mem_lock); 1411 spin_lock(&adapter->ahw.mem_lock);
@@ -1428,20 +1414,23 @@ netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
1428 if (ret != 0) 1414 if (ret != 0)
1429 goto unlock; 1415 goto unlock;
1430 1416
1431 addr = pci_base_offset(adapter, start); 1417 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
1432 if (addr) 1418 addr = adapter->ahw.pci_base0 + start;
1433 goto noremap; 1419 } else {
1434 1420 addr = pci_base_offset(adapter, start);
1435 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK); 1421 if (addr)
1422 goto noremap;
1423
1424 mem_base = pci_resource_start(adapter->pdev, 0) +
1425 (start & PAGE_MASK);
1426 mem_ptr = ioremap(mem_base, PAGE_SIZE);
1427 if (mem_ptr == NULL) {
1428 ret = -EIO;
1429 goto unlock;
1430 }
1436 1431
1437 mem_ptr = ioremap(mem_base, PAGE_SIZE); 1432 addr = mem_ptr + (start & (PAGE_SIZE-1));
1438 if (mem_ptr == NULL) {
1439 ret = -EIO;
1440 goto unlock;
1441 } 1433 }
1442
1443 addr = mem_ptr + (start & (PAGE_SIZE - 1));
1444
1445noremap: 1434noremap:
1446 if (op == 0) /* read */ 1435 if (op == 0) /* read */
1447 *data = readq(addr); 1436 *data = readq(addr);
@@ -1456,6 +1445,28 @@ unlock:
1456 return ret; 1445 return ret;
1457} 1446}
1458 1447
1448void
1449netxen_pci_camqm_read_2M(struct netxen_adapter *adapter, u64 off, u64 *data)
1450{
1451 void __iomem *addr = adapter->ahw.pci_base0 +
1452 NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
1453
1454 spin_lock(&adapter->ahw.mem_lock);
1455 *data = readq(addr);
1456 spin_unlock(&adapter->ahw.mem_lock);
1457}
1458
1459void
1460netxen_pci_camqm_write_2M(struct netxen_adapter *adapter, u64 off, u64 data)
1461{
1462 void __iomem *addr = adapter->ahw.pci_base0 +
1463 NETXEN_PCI_CAMQM_2M_BASE + (off - NETXEN_PCI_CAMQM);
1464
1465 spin_lock(&adapter->ahw.mem_lock);
1466 writeq(data, addr);
1467 spin_unlock(&adapter->ahw.mem_lock);
1468}
1469
1459#define MAX_CTL_CHECK 1000 1470#define MAX_CTL_CHECK 1000
1460 1471
1461static int 1472static int
@@ -1621,9 +1632,8 @@ static int
1621netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, 1632netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1622 u64 off, u64 data) 1633 u64 off, u64 data)
1623{ 1634{
1624 int i, j, ret; 1635 int j, ret;
1625 u32 temp, off8; 1636 u32 temp, off8;
1626 u64 stride;
1627 void __iomem *mem_crb; 1637 void __iomem *mem_crb;
1628 1638
1629 /* Only 64-bit aligned access */ 1639 /* Only 64-bit aligned access */
@@ -1650,44 +1660,17 @@ netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1650 return -EIO; 1660 return -EIO;
1651 1661
1652correct: 1662correct:
1653 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 1663 off8 = off & 0xfffffff8;
1654
1655 off8 = off & ~(stride-1);
1656 1664
1657 spin_lock(&adapter->ahw.mem_lock); 1665 spin_lock(&adapter->ahw.mem_lock);
1658 1666
1659 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO)); 1667 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1660 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 1668 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1661 1669
1662 i = 0;
1663 if (stride == 16) {
1664 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1665 writel((TA_CTL_START | TA_CTL_ENABLE),
1666 (mem_crb + TEST_AGT_CTRL));
1667
1668 for (j = 0; j < MAX_CTL_CHECK; j++) {
1669 temp = readl(mem_crb + TEST_AGT_CTRL);
1670 if ((temp & TA_CTL_BUSY) == 0)
1671 break;
1672 }
1673
1674 if (j >= MAX_CTL_CHECK) {
1675 ret = -EIO;
1676 goto done;
1677 }
1678
1679 i = (off & 0xf) ? 0 : 2;
1680 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1681 mem_crb + MIU_TEST_AGT_WRDATA(i));
1682 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1683 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1684 i = (off & 0xf) ? 2 : 0;
1685 }
1686
1687 writel(data & 0xffffffff, 1670 writel(data & 0xffffffff,
1688 mem_crb + MIU_TEST_AGT_WRDATA(i)); 1671 mem_crb + MIU_TEST_AGT_WRDATA_LO);
1689 writel((data >> 32) & 0xffffffff, 1672 writel((data >> 32) & 0xffffffff,
1690 mem_crb + MIU_TEST_AGT_WRDATA(i+1)); 1673 mem_crb + MIU_TEST_AGT_WRDATA_HI);
1691 1674
1692 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL)); 1675 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1693 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE), 1676 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
@@ -1707,7 +1690,6 @@ correct:
1707 } else 1690 } else
1708 ret = 0; 1691 ret = 0;
1709 1692
1710done:
1711 spin_unlock(&adapter->ahw.mem_lock); 1693 spin_unlock(&adapter->ahw.mem_lock);
1712 1694
1713 return ret; 1695 return ret;
@@ -1719,7 +1701,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
1719{ 1701{
1720 int j, ret; 1702 int j, ret;
1721 u32 temp, off8; 1703 u32 temp, off8;
1722 u64 val, stride; 1704 u64 val;
1723 void __iomem *mem_crb; 1705 void __iomem *mem_crb;
1724 1706
1725 /* Only 64-bit aligned access */ 1707 /* Only 64-bit aligned access */
@@ -1748,9 +1730,7 @@ netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
1748 return -EIO; 1730 return -EIO;
1749 1731
1750correct: 1732correct:
1751 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 1733 off8 = off & 0xfffffff8;
1752
1753 off8 = off & ~(stride-1);
1754 1734
1755 spin_lock(&adapter->ahw.mem_lock); 1735 spin_lock(&adapter->ahw.mem_lock);
1756 1736
@@ -1771,13 +1751,8 @@ correct:
1771 "failed to read through agent\n"); 1751 "failed to read through agent\n");
1772 ret = -EIO; 1752 ret = -EIO;
1773 } else { 1753 } else {
1774 off8 = MIU_TEST_AGT_RDDATA_LO; 1754 val = (u64)(readl(mem_crb + MIU_TEST_AGT_RDDATA_HI)) << 32;
1775 if ((stride == 16) && (off & 0xf)) 1755 val |= readl(mem_crb + MIU_TEST_AGT_RDDATA_LO);
1776 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1777
1778 temp = readl(mem_crb + off8 + 4);
1779 val = (u64)temp << 32;
1780 val |= readl(mem_crb + off8);
1781 *data = val; 1756 *data = val;
1782 ret = 0; 1757 ret = 0;
1783 } 1758 }
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f59cbb2..045a7c8f5bdf 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
614 return NULL; 614 return NULL;
615} 615}
616 616
617#define QLCNIC_FILEHEADER_SIZE (14 * 4)
618
617static int 619static int
618nx_set_product_offs(struct netxen_adapter *adapter) 620netxen_nic_validate_header(struct netxen_adapter *adapter)
619{ 621 {
620 struct uni_table_desc *ptab_descr;
621 const u8 *unirom = adapter->fw->data; 622 const u8 *unirom = adapter->fw->data;
622 uint32_t i; 623 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
624 u32 fw_file_size = adapter->fw->size;
625 u32 tab_size;
623 __le32 entries; 626 __le32 entries;
627 __le32 entry_size;
628
629 if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
630 return -EINVAL;
631
632 entries = cpu_to_le32(directory->num_entries);
633 entry_size = cpu_to_le32(directory->entry_size);
634 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
635
636 if (fw_file_size < tab_size)
637 return -EINVAL;
638
639 return 0;
640}
641
642static int
643netxen_nic_validate_bootld(struct netxen_adapter *adapter)
644{
645 struct uni_table_desc *tab_desc;
646 struct uni_data_desc *descr;
647 const u8 *unirom = adapter->fw->data;
648 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
649 NX_UNI_BOOTLD_IDX_OFF));
650 u32 offs;
651 u32 tab_size;
652 u32 data_size;
653
654 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
655
656 if (!tab_desc)
657 return -EINVAL;
658
659 tab_size = cpu_to_le32(tab_desc->findex) +
660 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
661
662 if (adapter->fw->size < tab_size)
663 return -EINVAL;
664
665 offs = cpu_to_le32(tab_desc->findex) +
666 (cpu_to_le32(tab_desc->entry_size) * (idx));
667 descr = (struct uni_data_desc *)&unirom[offs];
668
669 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
670
671 if (adapter->fw->size < data_size)
672 return -EINVAL;
673
674 return 0;
675}
676
677static int
678netxen_nic_validate_fw(struct netxen_adapter *adapter)
679{
680 struct uni_table_desc *tab_desc;
681 struct uni_data_desc *descr;
682 const u8 *unirom = adapter->fw->data;
683 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
684 NX_UNI_FIRMWARE_IDX_OFF));
685 u32 offs;
686 u32 tab_size;
687 u32 data_size;
688
689 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
690
691 if (!tab_desc)
692 return -EINVAL;
693
694 tab_size = cpu_to_le32(tab_desc->findex) +
695 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
696
697 if (adapter->fw->size < tab_size)
698 return -EINVAL;
699
700 offs = cpu_to_le32(tab_desc->findex) +
701 (cpu_to_le32(tab_desc->entry_size) * (idx));
702 descr = (struct uni_data_desc *)&unirom[offs];
703 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
704
705 if (adapter->fw->size < data_size)
706 return -EINVAL;
624 707
708 return 0;
709}
710
711
712static int
713netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
714{
715 struct uni_table_desc *ptab_descr;
716 const u8 *unirom = adapter->fw->data;
625 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 717 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
626 1 : netxen_p3_has_mn(adapter); 718 1 : netxen_p3_has_mn(adapter);
719 __le32 entries;
720 __le32 entry_size;
721 u32 tab_size;
722 u32 i;
627 723
628 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); 724 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
629 if (ptab_descr == NULL) 725 if (ptab_descr == NULL)
630 return -1; 726 return -EINVAL;
631 727
632 entries = cpu_to_le32(ptab_descr->num_entries); 728 entries = cpu_to_le32(ptab_descr->num_entries);
729 entry_size = cpu_to_le32(ptab_descr->entry_size);
730 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
731
732 if (adapter->fw->size < tab_size)
733 return -EINVAL;
633 734
634nomn: 735nomn:
635 for (i = 0; i < entries; i++) { 736 for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
658 goto nomn; 759 goto nomn;
659 } 760 }
660 761
661 return -1; 762 return -EINVAL;
662} 763}
663 764
765static int
766netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
767{
768 if (netxen_nic_validate_header(adapter)) {
769 dev_err(&adapter->pdev->dev,
770 "unified image: header validation failed\n");
771 return -EINVAL;
772 }
773
774 if (netxen_nic_validate_product_offs(adapter)) {
775 dev_err(&adapter->pdev->dev,
776 "unified image: product validation failed\n");
777 return -EINVAL;
778 }
779
780 if (netxen_nic_validate_bootld(adapter)) {
781 dev_err(&adapter->pdev->dev,
782 "unified image: bootld validation failed\n");
783 return -EINVAL;
784 }
785
786 if (netxen_nic_validate_fw(adapter)) {
787 dev_err(&adapter->pdev->dev,
788 "unified image: firmware validation failed\n");
789 return -EINVAL;
790 }
791
792 return 0;
793}
664 794
665static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, 795static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
666 u32 section, u32 idx_offset) 796 u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
890 1020
891 flashaddr += 8; 1021 flashaddr += 8;
892 } 1022 }
1023
1024 size = (__force u32)nx_get_fw_size(adapter) % 8;
1025 if (size) {
1026 data = cpu_to_le64(ptr64[i]);
1027
1028 if (adapter->pci_mem_write(adapter,
1029 flashaddr, data))
1030 return -EIO;
1031 }
1032
893 } else { 1033 } else {
894 u64 data; 1034 u64 data;
895 u32 hi, lo; 1035 u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
934netxen_validate_firmware(struct netxen_adapter *adapter) 1074netxen_validate_firmware(struct netxen_adapter *adapter)
935{ 1075{
936 __le32 val; 1076 __le32 val;
937 u32 ver, min_ver, bios, min_size; 1077 u32 ver, min_ver, bios;
938 struct pci_dev *pdev = adapter->pdev; 1078 struct pci_dev *pdev = adapter->pdev;
939 const struct firmware *fw = adapter->fw; 1079 const struct firmware *fw = adapter->fw;
940 u8 fw_type = adapter->fw_type; 1080 u8 fw_type = adapter->fw_type;
941 1081
942 if (fw_type == NX_UNIFIED_ROMIMAGE) { 1082 if (fw_type == NX_UNIFIED_ROMIMAGE) {
943 if (nx_set_product_offs(adapter)) 1083 if (netxen_nic_validate_unified_romimage(adapter))
944 return -EINVAL; 1084 return -EINVAL;
945
946 min_size = NX_UNI_FW_MIN_SIZE;
947 } else { 1085 } else {
948 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 1086 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
949 if ((__force u32)val != NETXEN_BDINFO_MAGIC) 1087 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
950 return -EINVAL; 1088 return -EINVAL;
951 1089
952 min_size = NX_FW_MIN_SIZE; 1090 if (fw->size < NX_FW_MIN_SIZE)
1091 return -EINVAL;
953 } 1092 }
954 1093
955 if (fw->size < min_size)
956 return -EINVAL;
957
958 val = nx_get_fw_version(adapter); 1094 val = nx_get_fw_version(adapter);
959 1095
960 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1096 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
@@ -1225,10 +1361,12 @@ int netxen_init_firmware(struct netxen_adapter *adapter)
1225 return err; 1361 return err;
1226 1362
1227 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT); 1363 NXWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1228 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1229 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE); 1364 NXWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1230 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); 1365 NXWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1231 1366
1367 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
1368 NXWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1369
1232 return err; 1370 return err;
1233} 1371}
1234 1372
@@ -1763,6 +1901,5 @@ netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
1763void netxen_nic_clear_stats(struct netxen_adapter *adapter) 1901void netxen_nic_clear_stats(struct netxen_adapter *adapter)
1764{ 1902{
1765 memset(&adapter->stats, 0, sizeof(adapter->stats)); 1903 memset(&adapter->stats, 0, sizeof(adapter->stats));
1766 return;
1767} 1904}
1768 1905
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7c8b0f..6ce6ce1df6d2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
782 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 782 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
783 adapter->msix_supported = !!use_msi_x; 783 adapter->msix_supported = !!use_msi_x;
784 adapter->rss_supported = !!use_msi_x; 784 adapter->rss_supported = !!use_msi_x;
785 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) { 785 } else {
786 switch (adapter->ahw.board_type) { 786 u32 flashed_ver = 0;
787 case NETXEN_BRDTYPE_P2_SB31_10G: 787 netxen_rom_fast_read(adapter,
788 case NETXEN_BRDTYPE_P2_SB31_10G_CX4: 788 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
789 adapter->msix_supported = !!use_msi_x; 789 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
790 adapter->rss_supported = !!use_msi_x; 790
791 break; 791 if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
792 default: 792 switch (adapter->ahw.board_type) {
793 break; 793 case NETXEN_BRDTYPE_P2_SB31_10G:
794 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
795 adapter->msix_supported = !!use_msi_x;
796 adapter->rss_supported = !!use_msi_x;
797 break;
798 default:
799 break;
800 }
794 } 801 }
795 } 802 }
796 803
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
2304 } 2311 }
2305 break; 2312 break;
2306 2313
2314 case NX_DEV_NEED_RESET:
2307 case NX_DEV_INITALIZING: 2315 case NX_DEV_INITALIZING:
2308 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { 2316 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
2309 netxen_schedule_work(adapter, 2317 netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
2347 2355
2348 ref_cnt = nx_decr_dev_ref_cnt(adapter); 2356 ref_cnt = nx_decr_dev_ref_cnt(adapter);
2349 2357
2358 if (ref_cnt == -EIO)
2359 goto err_ret;
2360
2350 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); 2361 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
2351 2362
2352 adapter->fw_wait_cnt = 0; 2363 adapter->fw_wait_cnt = 0;
@@ -2526,51 +2537,81 @@ static int
2526netxen_sysfs_validate_crb(struct netxen_adapter *adapter, 2537netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
2527 loff_t offset, size_t size) 2538 loff_t offset, size_t size)
2528{ 2539{
2540 size_t crb_size = 4;
2541
2529 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED)) 2542 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2530 return -EIO; 2543 return -EIO;
2531 2544
2532 if ((size != 4) || (offset & 0x3)) 2545 if (offset < NETXEN_PCI_CRBSPACE) {
2533 return -EINVAL; 2546 if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
2547 return -EINVAL;
2534 2548
2535 if (offset < NETXEN_PCI_CRBSPACE) 2549 if (ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2536 return -EINVAL; 2550 NETXEN_PCI_CAMQM_2M_END))
2551 crb_size = 8;
2552 else
2553 return -EINVAL;
2554 }
2555
2556 if ((size != crb_size) || (offset & (crb_size-1)))
2557 return -EINVAL;
2537 2558
2538 return 0; 2559 return 0;
2539} 2560}
2540 2561
2541static ssize_t 2562static ssize_t
2542netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr, 2563netxen_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2564 struct bin_attribute *attr,
2543 char *buf, loff_t offset, size_t size) 2565 char *buf, loff_t offset, size_t size)
2544{ 2566{
2545 struct device *dev = container_of(kobj, struct device, kobj); 2567 struct device *dev = container_of(kobj, struct device, kobj);
2546 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2568 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2547 u32 data; 2569 u32 data;
2570 u64 qmdata;
2548 int ret; 2571 int ret;
2549 2572
2550 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2573 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2551 if (ret != 0) 2574 if (ret != 0)
2552 return ret; 2575 return ret;
2553 2576
2554 data = NXRD32(adapter, offset); 2577 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
2555 memcpy(buf, &data, size); 2578 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2579 NETXEN_PCI_CAMQM_2M_END)) {
2580 netxen_pci_camqm_read_2M(adapter, offset, &qmdata);
2581 memcpy(buf, &qmdata, size);
2582 } else {
2583 data = NXRD32(adapter, offset);
2584 memcpy(buf, &data, size);
2585 }
2586
2556 return size; 2587 return size;
2557} 2588}
2558 2589
2559static ssize_t 2590static ssize_t
2560netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr, 2591netxen_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2592 struct bin_attribute *attr,
2561 char *buf, loff_t offset, size_t size) 2593 char *buf, loff_t offset, size_t size)
2562{ 2594{
2563 struct device *dev = container_of(kobj, struct device, kobj); 2595 struct device *dev = container_of(kobj, struct device, kobj);
2564 struct netxen_adapter *adapter = dev_get_drvdata(dev); 2596 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2565 u32 data; 2597 u32 data;
2598 u64 qmdata;
2566 int ret; 2599 int ret;
2567 2600
2568 ret = netxen_sysfs_validate_crb(adapter, offset, size); 2601 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2569 if (ret != 0) 2602 if (ret != 0)
2570 return ret; 2603 return ret;
2571 2604
2572 memcpy(&data, buf, size); 2605 if (NX_IS_REVISION_P3(adapter->ahw.revision_id) &&
2573 NXWR32(adapter, offset, data); 2606 ADDR_IN_RANGE(offset, NETXEN_PCI_CAMQM,
2607 NETXEN_PCI_CAMQM_2M_END)) {
2608 memcpy(&qmdata, buf, size);
2609 netxen_pci_camqm_write_2M(adapter, offset, qmdata);
2610 } else {
2611 memcpy(&data, buf, size);
2612 NXWR32(adapter, offset, data);
2613 }
2614
2574 return size; 2615 return size;
2575} 2616}
2576 2617
@@ -2588,7 +2629,8 @@ netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
2588} 2629}
2589 2630
2590static ssize_t 2631static ssize_t
2591netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr, 2632netxen_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2633 struct bin_attribute *attr,
2592 char *buf, loff_t offset, size_t size) 2634 char *buf, loff_t offset, size_t size)
2593{ 2635{
2594 struct device *dev = container_of(kobj, struct device, kobj); 2636 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2608,7 +2650,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2608 return size; 2650 return size;
2609} 2651}
2610 2652
2611static ssize_t netxen_sysfs_write_mem(struct kobject *kobj, 2653static ssize_t netxen_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2612 struct bin_attribute *attr, char *buf, 2654 struct bin_attribute *attr, char *buf,
2613 loff_t offset, size_t size) 2655 loff_t offset, size_t size)
2614{ 2656{
@@ -2742,7 +2784,6 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
2742 } endfor_ifa(indev); 2784 } endfor_ifa(indev);
2743 2785
2744 in_dev_put(indev); 2786 in_dev_put(indev);
2745 return;
2746} 2787}
2747 2788
2748static int netxen_netdev_event(struct notifier_block *this, 2789static int netxen_netdev_event(struct notifier_block *this,
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 3892330f244a..4d3f2e2b28bd 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -444,7 +444,7 @@ static void ni5010_timeout(struct net_device *dev)
444 /* Try to restart the adaptor. */ 444 /* Try to restart the adaptor. */
445 /* FIXME: Give it a real kick here */ 445 /* FIXME: Give it a real kick here */
446 chipset_init(dev, 1); 446 chipset_init(dev, 1);
447 dev->trans_start = jiffies; 447 dev->trans_start = jiffies; /* prevent tx timeout */
448 netif_wake_queue(dev); 448 netif_wake_queue(dev);
449} 449}
450 450
@@ -460,7 +460,6 @@ static int ni5010_send_packet(struct sk_buff *skb, struct net_device *dev)
460 460
461 netif_stop_queue(dev); 461 netif_stop_queue(dev);
462 hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len); 462 hardware_send_packet(dev, (unsigned char *)skb->data, skb->len, length-skb->len);
463 dev->trans_start = jiffies;
464 dev_kfree_skb (skb); 463 dev_kfree_skb (skb);
465 return NETDEV_TX_OK; 464 return NETDEV_TX_OK;
466} 465}
@@ -515,8 +514,6 @@ static void dump_packet(void *buf, int len)
515 if (i % 16 == 15) printk("\n"); 514 if (i % 16 == 15) printk("\n");
516 } 515 }
517 printk("\n"); 516 printk("\n");
518
519 return;
520} 517}
521 518
522/* We have a good packet, get it out of the buffer. */ 519/* We have a good packet, get it out of the buffer. */
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f707361e..9bddb5fa7a96 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
595 struct iasetup_cmd_struct __iomem *ias_cmd; 595 struct iasetup_cmd_struct __iomem *ias_cmd;
596 struct tdr_cmd_struct __iomem *tdr_cmd; 596 struct tdr_cmd_struct __iomem *tdr_cmd;
597 struct mcsetup_cmd_struct __iomem *mc_cmd; 597 struct mcsetup_cmd_struct __iomem *mc_cmd;
598 struct dev_mc_list *dmi; 598 struct netdev_hw_addr *ha;
599 int num_addrs = netdev_mc_count(dev); 599 int num_addrs = netdev_mc_count(dev);
600 600
601 ptr = p->scb + 1; 601 ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
724 writew(num_addrs * 6, &mc_cmd->mc_cnt); 724 writew(num_addrs * 6, &mc_cmd->mc_cnt);
725 725
726 i = 0; 726 i = 0;
727 netdev_for_each_mc_addr(dmi, dev) 727 netdev_for_each_mc_addr(ha, dev)
728 memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6); 728 memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
729 729
730 writew(make16(mc_cmd), &p->scb->cbl_offset); 730 writew(make16(mc_cmd), &p->scb->cbl_offset);
731 writeb(CUC_START, &p->scb->cmd_cuc); 731 writeb(CUC_START, &p->scb->cmd_cuc);
@@ -1147,7 +1147,7 @@ static void ni52_timeout(struct net_device *dev)
1147 writeb(CUC_START, &p->scb->cmd_cuc); 1147 writeb(CUC_START, &p->scb->cmd_cuc);
1148 ni_attn586(); 1148 ni_attn586();
1149 wait_for_scb_cmd(dev); 1149 wait_for_scb_cmd(dev);
1150 dev->trans_start = jiffies; 1150 dev->trans_start = jiffies; /* prevent tx timeout */
1151 return 0; 1151 return 0;
1152 } 1152 }
1153#endif 1153#endif
@@ -1165,7 +1165,7 @@ static void ni52_timeout(struct net_device *dev)
1165 ni52_close(dev); 1165 ni52_close(dev);
1166 ni52_open(dev); 1166 ni52_open(dev);
1167 } 1167 }
1168 dev->trans_start = jiffies; 1168 dev->trans_start = jiffies; /* prevent tx timeout */
1169} 1169}
1170 1170
1171/****************************************************** 1171/******************************************************
@@ -1218,7 +1218,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
1218 writeb(CUC_START, &p->scb->cmd_cuc); 1218 writeb(CUC_START, &p->scb->cmd_cuc);
1219 } 1219 }
1220 ni_attn586(); 1220 ni_attn586();
1221 dev->trans_start = jiffies;
1222 if (!i) 1221 if (!i)
1223 dev_kfree_skb(skb); 1222 dev_kfree_skb(skb);
1224 wait_for_scb_cmd(dev); 1223 wait_for_scb_cmd(dev);
@@ -1240,7 +1239,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
1240 writew(0, &p->nop_cmds[next_nop]->cmd_status); 1239 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1241 1240
1242 writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link); 1241 writew(make16(p->xmit_cmds[0]), &p->nop_cmds[p->nop_point]->cmd_link);
1243 dev->trans_start = jiffies;
1244 p->nop_point = next_nop; 1242 p->nop_point = next_nop;
1245 dev_kfree_skb(skb); 1243 dev_kfree_skb(skb);
1246# endif 1244# endif
@@ -1256,7 +1254,6 @@ static netdev_tx_t ni52_send_packet(struct sk_buff *skb,
1256 writew(0, &p->nop_cmds[next_nop]->cmd_status); 1254 writew(0, &p->nop_cmds[next_nop]->cmd_status);
1257 writew(make16(p->xmit_cmds[p->xmit_count]), 1255 writew(make16(p->xmit_cmds[p->xmit_count]),
1258 &p->nop_cmds[p->xmit_count]->cmd_link); 1256 &p->nop_cmds[p->xmit_count]->cmd_link);
1259 dev->trans_start = jiffies;
1260 p->xmit_count = next_nop; 1257 p->xmit_count = next_nop;
1261 { 1258 {
1262 unsigned long flags; 1259 unsigned long flags;
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index 9225c76cac40..da228a0dd6cd 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -784,7 +784,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
784 if(!p->lock) 784 if(!p->lock)
785 if (p->tmdnum || !p->xmit_queued) 785 if (p->tmdnum || !p->xmit_queued)
786 netif_wake_queue(dev); 786 netif_wake_queue(dev);
787 dev->trans_start = jiffies; 787 dev->trans_start = jiffies; /* prevent tx timeout */
788 } 788 }
789 else 789 else
790 writedatareg(CSR0_STRT | csr0); 790 writedatareg(CSR0_STRT | csr0);
@@ -1150,7 +1150,7 @@ static void ni65_timeout(struct net_device *dev)
1150 printk("%02x ",p->tmdhead[i].u.s.status); 1150 printk("%02x ",p->tmdhead[i].u.s.status);
1151 printk("\n"); 1151 printk("\n");
1152 ni65_lance_reinit(dev); 1152 ni65_lance_reinit(dev);
1153 dev->trans_start = jiffies; 1153 dev->trans_start = jiffies; /* prevent tx timeout */
1154 netif_wake_queue(dev); 1154 netif_wake_queue(dev);
1155} 1155}
1156 1156
@@ -1213,7 +1213,6 @@ static netdev_tx_t ni65_send_packet(struct sk_buff *skb,
1213 netif_wake_queue(dev); 1213 netif_wake_queue(dev);
1214 1214
1215 p->lock = 0; 1215 p->lock = 0;
1216 dev->trans_start = jiffies;
1217 1216
1218 spin_unlock_irqrestore(&p->ring_lock, flags); 1217 spin_unlock_irqrestore(&p->ring_lock, flags);
1219 } 1218 }
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 406d72c4eb71..63e8e3893bd6 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -36,8 +36,8 @@
36#include "niu.h" 36#include "niu.h"
37 37
38#define DRV_MODULE_NAME "niu" 38#define DRV_MODULE_NAME "niu"
39#define DRV_MODULE_VERSION "1.0" 39#define DRV_MODULE_VERSION "1.1"
40#define DRV_MODULE_RELDATE "Nov 14, 2008" 40#define DRV_MODULE_RELDATE "Apr 22, 2010"
41 41
42static char version[] __devinitdata = 42static char version[] __devinitdata =
43 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 43 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -3444,6 +3444,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3444 struct rx_ring_info *rp) 3444 struct rx_ring_info *rp)
3445{ 3445{
3446 unsigned int index = rp->rcr_index; 3446 unsigned int index = rp->rcr_index;
3447 struct rx_pkt_hdr1 *rh;
3447 struct sk_buff *skb; 3448 struct sk_buff *skb;
3448 int len, num_rcr; 3449 int len, num_rcr;
3449 3450
@@ -3477,9 +3478,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3477 if (num_rcr == 1) { 3478 if (num_rcr == 1) {
3478 int ptype; 3479 int ptype;
3479 3480
3480 off += 2;
3481 append_size -= 2;
3482
3483 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT); 3481 ptype = (val >> RCR_ENTRY_PKT_TYPE_SHIFT);
3484 if ((ptype == RCR_PKT_TYPE_TCP || 3482 if ((ptype == RCR_PKT_TYPE_TCP ||
3485 ptype == RCR_PKT_TYPE_UDP) && 3483 ptype == RCR_PKT_TYPE_UDP) &&
@@ -3488,8 +3486,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3488 skb->ip_summed = CHECKSUM_UNNECESSARY; 3486 skb->ip_summed = CHECKSUM_UNNECESSARY;
3489 else 3487 else
3490 skb->ip_summed = CHECKSUM_NONE; 3488 skb->ip_summed = CHECKSUM_NONE;
3491 } 3489 } else if (!(val & RCR_ENTRY_MULTI))
3492 if (!(val & RCR_ENTRY_MULTI))
3493 append_size = len - skb->len; 3490 append_size = len - skb->len;
3494 3491
3495 niu_rx_skb_append(skb, page, off, append_size); 3492 niu_rx_skb_append(skb, page, off, append_size);
@@ -3510,8 +3507,17 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
3510 } 3507 }
3511 rp->rcr_index = index; 3508 rp->rcr_index = index;
3512 3509
3513 skb_reserve(skb, NET_IP_ALIGN); 3510 len += sizeof(*rh);
3514 __pskb_pull_tail(skb, min(len, VLAN_ETH_HLEN)); 3511 len = min_t(int, len, sizeof(*rh) + VLAN_ETH_HLEN);
3512 __pskb_pull_tail(skb, len);
3513
3514 rh = (struct rx_pkt_hdr1 *) skb->data;
3515 if (np->dev->features & NETIF_F_RXHASH)
3516 skb->rxhash = ((u32)rh->hashval2_0 << 24 |
3517 (u32)rh->hashval2_1 << 16 |
3518 (u32)rh->hashval1_1 << 8 |
3519 (u32)rh->hashval1_2 << 0);
3520 skb_pull(skb, sizeof(*rh));
3515 3521
3516 rp->rx_packets++; 3522 rp->rx_packets++;
3517 rp->rx_bytes += skb->len; 3523 rp->rx_bytes += skb->len;
@@ -4946,7 +4952,9 @@ static int niu_init_one_rx_channel(struct niu *np, struct rx_ring_info *rp)
4946 RX_DMA_CTL_STAT_RCRTO | 4952 RX_DMA_CTL_STAT_RCRTO |
4947 RX_DMA_CTL_STAT_RBR_EMPTY)); 4953 RX_DMA_CTL_STAT_RBR_EMPTY));
4948 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32); 4954 nw64(RXDMA_CFIG1(channel), rp->mbox_dma >> 32);
4949 nw64(RXDMA_CFIG2(channel), (rp->mbox_dma & 0x00000000ffffffc0)); 4955 nw64(RXDMA_CFIG2(channel),
4956 ((rp->mbox_dma & RXDMA_CFIG2_MBADDR_L) |
4957 RXDMA_CFIG2_FULL_HDR));
4950 nw64(RBR_CFIG_A(channel), 4958 nw64(RBR_CFIG_A(channel),
4951 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) | 4959 ((u64)rp->rbr_table_size << RBR_CFIG_A_LEN_SHIFT) |
4952 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR))); 4960 (rp->rbr_dma & (RBR_CFIG_A_STADDR_BASE | RBR_CFIG_A_STADDR)));
@@ -6314,7 +6322,6 @@ static void niu_set_rx_mode(struct net_device *dev)
6314{ 6322{
6315 struct niu *np = netdev_priv(dev); 6323 struct niu *np = netdev_priv(dev);
6316 int i, alt_cnt, err; 6324 int i, alt_cnt, err;
6317 struct dev_addr_list *addr;
6318 struct netdev_hw_addr *ha; 6325 struct netdev_hw_addr *ha;
6319 unsigned long flags; 6326 unsigned long flags;
6320 u16 hash[16] = { 0, }; 6327 u16 hash[16] = { 0, };
@@ -6366,8 +6373,8 @@ static void niu_set_rx_mode(struct net_device *dev)
6366 for (i = 0; i < 16; i++) 6373 for (i = 0; i < 16; i++)
6367 hash[i] = 0xffff; 6374 hash[i] = 0xffff;
6368 } else if (!netdev_mc_empty(dev)) { 6375 } else if (!netdev_mc_empty(dev)) {
6369 netdev_for_each_mc_addr(addr, dev) { 6376 netdev_for_each_mc_addr(ha, dev) {
6370 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 6377 u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6371 6378
6372 crc >>= 24; 6379 crc >>= 24;
6373 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6380 hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -7911,6 +7918,18 @@ static int niu_phys_id(struct net_device *dev, u32 data)
7911 return 0; 7918 return 0;
7912} 7919}
7913 7920
7921static int niu_set_flags(struct net_device *dev, u32 data)
7922{
7923 if (data & (ETH_FLAG_LRO | ETH_FLAG_NTUPLE))
7924 return -EOPNOTSUPP;
7925
7926 if (data & ETH_FLAG_RXHASH)
7927 dev->features |= NETIF_F_RXHASH;
7928 else
7929 dev->features &= ~NETIF_F_RXHASH;
7930 return 0;
7931}
7932
7914static const struct ethtool_ops niu_ethtool_ops = { 7933static const struct ethtool_ops niu_ethtool_ops = {
7915 .get_drvinfo = niu_get_drvinfo, 7934 .get_drvinfo = niu_get_drvinfo,
7916 .get_link = ethtool_op_get_link, 7935 .get_link = ethtool_op_get_link,
@@ -7927,6 +7946,8 @@ static const struct ethtool_ops niu_ethtool_ops = {
7927 .phys_id = niu_phys_id, 7946 .phys_id = niu_phys_id,
7928 .get_rxnfc = niu_get_nfc, 7947 .get_rxnfc = niu_get_nfc,
7929 .set_rxnfc = niu_set_nfc, 7948 .set_rxnfc = niu_set_nfc,
7949 .set_flags = niu_set_flags,
7950 .get_flags = ethtool_op_get_flags,
7930}; 7951};
7931 7952
7932static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent, 7953static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
@@ -9755,6 +9776,12 @@ static void __devinit niu_device_announce(struct niu *np)
9755 } 9776 }
9756} 9777}
9757 9778
9779static void __devinit niu_set_basic_features(struct net_device *dev)
9780{
9781 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM |
9782 NETIF_F_GRO | NETIF_F_RXHASH);
9783}
9784
9758static int __devinit niu_pci_init_one(struct pci_dev *pdev, 9785static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9759 const struct pci_device_id *ent) 9786 const struct pci_device_id *ent)
9760{ 9787{
@@ -9839,7 +9866,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9839 } 9866 }
9840 } 9867 }
9841 9868
9842 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9869 niu_set_basic_features(dev);
9843 9870
9844 np->regs = pci_ioremap_bar(pdev, 0); 9871 np->regs = pci_ioremap_bar(pdev, 0);
9845 if (!np->regs) { 9872 if (!np->regs) {
@@ -10081,7 +10108,7 @@ static int __devinit niu_of_probe(struct of_device *op,
10081 goto err_out_free_dev; 10108 goto err_out_free_dev;
10082 } 10109 }
10083 10110
10084 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 10111 niu_set_basic_features(dev);
10085 10112
10086 np->regs = of_ioremap(&op->resource[1], 0, 10113 np->regs = of_ioremap(&op->resource[1], 0,
10087 resource_size(&op->resource[1]), 10114 resource_size(&op->resource[1]),
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index 3bd0b5933d59..d6715465f35d 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -2706,7 +2706,7 @@ struct rx_pkt_hdr0 {
2706#if defined(__LITTLE_ENDIAN_BITFIELD) 2706#if defined(__LITTLE_ENDIAN_BITFIELD)
2707 u8 inputport:2, 2707 u8 inputport:2,
2708 maccheck:1, 2708 maccheck:1,
2709 class:4; 2709 class:5;
2710 u8 vlan:1, 2710 u8 vlan:1,
2711 llcsnap:1, 2711 llcsnap:1,
2712 noport:1, 2712 noport:1,
@@ -2715,7 +2715,7 @@ struct rx_pkt_hdr0 {
2715 tres:2, 2715 tres:2,
2716 tzfvld:1; 2716 tzfvld:1;
2717#elif defined(__BIG_ENDIAN_BITFIELD) 2717#elif defined(__BIG_ENDIAN_BITFIELD)
2718 u8 class:4, 2718 u8 class:5,
2719 maccheck:1, 2719 maccheck:1,
2720 inputport:2; 2720 inputport:2;
2721 u8 tzfvld:1, 2721 u8 tzfvld:1,
@@ -2775,6 +2775,9 @@ struct rx_pkt_hdr1 {
2775 /* Bits 7:0 of hash value, H1. */ 2775 /* Bits 7:0 of hash value, H1. */
2776 u8 hashval1_2; 2776 u8 hashval1_2;
2777 2777
2778 u8 hwrsvd5;
2779 u8 hwrsvd6;
2780
2778 u8 usrdata_0; /* Bits 39:32 of user data. */ 2781 u8 usrdata_0; /* Bits 39:32 of user data. */
2779 u8 usrdata_1; /* Bits 31:24 of user data. */ 2782 u8 usrdata_1; /* Bits 31:24 of user data. */
2780 u8 usrdata_2; /* Bits 23:16 of user data. */ 2783 u8 usrdata_2; /* Bits 23:16 of user data. */
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e2ddd7..000e792d57c0 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -189,12 +189,19 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
189 189
190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port)); 190 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
191 while (mix_orcnt.s.orcnt) { 191 while (mix_orcnt.s.orcnt) {
192 spin_lock_irqsave(&p->tx_list.lock, flags);
193
194 mix_orcnt.u64 = cvmx_read_csr(CVMX_MIXX_ORCNT(port));
195
196 if (mix_orcnt.s.orcnt == 0) {
197 spin_unlock_irqrestore(&p->tx_list.lock, flags);
198 break;
199 }
200
192 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, 201 dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
193 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 202 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
194 DMA_BIDIRECTIONAL); 203 DMA_BIDIRECTIONAL);
195 204
196 spin_lock_irqsave(&p->tx_list.lock, flags);
197
198 re.d64 = p->tx_ring[p->tx_next_clean]; 205 re.d64 = p->tx_ring[p->tx_next_clean];
199 p->tx_next_clean = 206 p->tx_next_clean =
200 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; 207 (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
@@ -317,7 +324,6 @@ good:
317 skb->protocol = eth_type_trans(skb, netdev); 324 skb->protocol = eth_type_trans(skb, netdev);
318 netdev->stats.rx_packets++; 325 netdev->stats.rx_packets++;
319 netdev->stats.rx_bytes += skb->len; 326 netdev->stats.rx_bytes += skb->len;
320 netdev->last_rx = jiffies;
321 netif_receive_skb(skb); 327 netif_receive_skb(skb);
322 rc = 0; 328 rc = 0;
323 } else if (re.s.code == RING_ENTRY_CODE_MORE) { 329 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -374,7 +380,6 @@ done:
374 mix_ircnt.s.ircnt = 1; 380 mix_ircnt.s.ircnt = 1;
375 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64); 381 cvmx_write_csr(CVMX_MIXX_IRCNT(port), mix_ircnt.u64);
376 return rc; 382 return rc;
377
378} 383}
379 384
380static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 385static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
@@ -384,7 +389,6 @@ static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
384 union cvmx_mixx_ircnt mix_ircnt; 389 union cvmx_mixx_ircnt mix_ircnt;
385 int rc; 390 int rc;
386 391
387
388 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port)); 392 mix_ircnt.u64 = cvmx_read_csr(CVMX_MIXX_IRCNT(port));
389 while (work_done < budget && mix_ircnt.s.ircnt) { 393 while (work_done < budget && mix_ircnt.s.ircnt) {
390 394
@@ -475,13 +479,12 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
475 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ 479 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
476 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 480 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
477 struct octeon_mgmt_cam_state cam_state; 481 struct octeon_mgmt_cam_state cam_state;
478 struct dev_addr_list *list; 482 struct netdev_hw_addr *ha;
479 struct list_head *pos;
480 int available_cam_entries; 483 int available_cam_entries;
481 484
482 memset(&cam_state, 0, sizeof(cam_state)); 485 memset(&cam_state, 0, sizeof(cam_state));
483 486
484 if ((netdev->flags & IFF_PROMISC) || netdev->dev_addrs.count > 7) { 487 if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
485 cam_mode = 0; 488 cam_mode = 0;
486 available_cam_entries = 8; 489 available_cam_entries = 8;
487 } else { 490 } else {
@@ -489,13 +492,13 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
489 * One CAM entry for the primary address, leaves seven 492 * One CAM entry for the primary address, leaves seven
490 * for the secondary addresses. 493 * for the secondary addresses.
491 */ 494 */
492 available_cam_entries = 7 - netdev->dev_addrs.count; 495 available_cam_entries = 7 - netdev->uc.count;
493 } 496 }
494 497
495 if (netdev->flags & IFF_MULTICAST) { 498 if (netdev->flags & IFF_MULTICAST) {
496 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || 499 if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
497 netdev_mc_count(netdev) > available_cam_entries) 500 netdev_mc_count(netdev) > available_cam_entries)
498 multicast_mode = 2; /* 1 - Accept all multicast. */ 501 multicast_mode = 2; /* 2 - Accept all multicast. */
499 else 502 else
500 multicast_mode = 0; /* 0 - Use CAM. */ 503 multicast_mode = 0; /* 0 - Use CAM. */
501 } 504 }
@@ -503,19 +506,14 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
503 if (cam_mode == 1) { 506 if (cam_mode == 1) {
504 /* Add primary address. */ 507 /* Add primary address. */
505 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); 508 octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
506 list_for_each(pos, &netdev->dev_addrs.list) { 509 netdev_for_each_uc_addr(ha, netdev)
507 struct netdev_hw_addr *hw_addr; 510 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
508 hw_addr = list_entry(pos, struct netdev_hw_addr, list);
509 octeon_mgmt_cam_state_add(&cam_state, hw_addr->addr);
510 list = list->next;
511 }
512 } 511 }
513 if (multicast_mode == 0) { 512 if (multicast_mode == 0) {
514 netdev_for_each_mc_addr(list, netdev) 513 netdev_for_each_mc_addr(ha, netdev)
515 octeon_mgmt_cam_state_add(&cam_state, list->da_addr); 514 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
516 } 515 }
517 516
518
519 spin_lock_irqsave(&p->lock, flags); 517 spin_lock_irqsave(&p->lock, flags);
520 518
521 /* Disable packet I/O. */ 519 /* Disable packet I/O. */
@@ -524,7 +522,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
524 agl_gmx_prtx.s.en = 0; 522 agl_gmx_prtx.s.en = 0;
525 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64); 523 cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);
526 524
527
528 adr_ctl.u64 = 0; 525 adr_ctl.u64 = 0;
529 adr_ctl.s.cam_mode = cam_mode; 526 adr_ctl.s.cam_mode = cam_mode;
530 adr_ctl.s.mcst = multicast_mode; 527 adr_ctl.s.mcst = multicast_mode;
@@ -597,8 +594,7 @@ static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
597 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port)); 594 mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));
598 595
599 /* Clear any pending interrupts */ 596 /* Clear any pending interrupts */
600 cvmx_write_csr(CVMX_MIXX_ISR(port), 597 cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
601 cvmx_read_csr(CVMX_MIXX_ISR(port)));
602 cvmx_read_csr(CVMX_MIXX_ISR(port)); 598 cvmx_read_csr(CVMX_MIXX_ISR(port));
603 599
604 if (mixx_isr.s.irthresh) { 600 if (mixx_isr.s.irthresh) {
@@ -832,9 +828,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
832 mix_irhwm.s.irhwm = 0; 828 mix_irhwm.s.irhwm = 0;
833 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64); 829 cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);
834 830
835 /* Interrupt when we have 5 or more packets to clean. */ 831 /* Interrupt when we have 1 or more packets to clean. */
836 mix_orhwm.u64 = 0; 832 mix_orhwm.u64 = 0;
837 mix_orhwm.s.orhwm = 5; 833 mix_orhwm.s.orhwm = 1;
838 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64); 834 cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);
839 835
840 /* Enable receive and transmit interrupts */ 836 /* Enable receive and transmit interrupts */
@@ -928,7 +924,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
928 924
929 octeon_mgmt_reset_hw(p); 925 octeon_mgmt_reset_hw(p);
930 926
931
932 free_irq(p->irq, netdev); 927 free_irq(p->irq, netdev);
933 928
934 /* dma_unmap is a nop on Octeon, so just free everything. */ 929 /* dma_unmap is a nop on Octeon, so just free everything. */
@@ -945,7 +940,6 @@ static int octeon_mgmt_stop(struct net_device *netdev)
945 DMA_BIDIRECTIONAL); 940 DMA_BIDIRECTIONAL);
946 kfree(p->tx_ring); 941 kfree(p->tx_ring);
947 942
948
949 return 0; 943 return 0;
950} 944}
951 945
@@ -955,6 +949,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
955 int port = p->port; 949 int port = p->port;
956 union mgmt_port_ring_entry re; 950 union mgmt_port_ring_entry re;
957 unsigned long flags; 951 unsigned long flags;
952 int rv = NETDEV_TX_BUSY;
958 953
959 re.d64 = 0; 954 re.d64 = 0;
960 re.s.len = skb->len; 955 re.s.len = skb->len;
@@ -964,15 +959,18 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
964 959
965 spin_lock_irqsave(&p->tx_list.lock, flags); 960 spin_lock_irqsave(&p->tx_list.lock, flags);
966 961
962 if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
963 spin_unlock_irqrestore(&p->tx_list.lock, flags);
964 netif_stop_queue(netdev);
965 spin_lock_irqsave(&p->tx_list.lock, flags);
966 }
967
967 if (unlikely(p->tx_current_fill >= 968 if (unlikely(p->tx_current_fill >=
968 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { 969 ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
969 spin_unlock_irqrestore(&p->tx_list.lock, flags); 970 spin_unlock_irqrestore(&p->tx_list.lock, flags);
970
971 dma_unmap_single(p->dev, re.s.addr, re.s.len, 971 dma_unmap_single(p->dev, re.s.addr, re.s.len,
972 DMA_TO_DEVICE); 972 DMA_TO_DEVICE);
973 973 goto out;
974 netif_stop_queue(netdev);
975 return NETDEV_TX_BUSY;
976 } 974 }
977 975
978 __skb_queue_tail(&p->tx_list, skb); 976 __skb_queue_tail(&p->tx_list, skb);
@@ -994,10 +992,10 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
994 /* Ring the bell. */ 992 /* Ring the bell. */
995 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1); 993 cvmx_write_csr(CVMX_MIXX_ORING2(port), 1);
996 994
997 netdev->trans_start = jiffies; 995 rv = NETDEV_TX_OK;
998 octeon_mgmt_clean_tx_buffers(p); 996out:
999 octeon_mgmt_update_tx_stats(netdev); 997 octeon_mgmt_update_tx_stats(netdev);
1000 return NETDEV_TX_OK; 998 return rv;
1001} 999}
1002 1000
1003#ifdef CONFIG_NET_POLL_CONTROLLER 1001#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1007,7 +1005,6 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
1007 1005
1008 octeon_mgmt_receive_packets(p, 16); 1006 octeon_mgmt_receive_packets(p, 16);
1009 octeon_mgmt_update_rx_stats(netdev); 1007 octeon_mgmt_update_rx_stats(netdev);
1010 return;
1011} 1008}
1012#endif 1009#endif
1013 1010
@@ -1107,7 +1104,6 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
1107 netdev->netdev_ops = &octeon_mgmt_ops; 1104 netdev->netdev_ops = &octeon_mgmt_ops;
1108 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1105 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1109 1106
1110
1111 /* The mgmt ports get the first N MACs. */ 1107 /* The mgmt ports get the first N MACs. */
1112 for (i = 0; i < 6; i++) 1108 for (i = 0; i < 6; i++)
1113 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i]; 1109 netdev->dev_addr[i] = octeon_bootinfo->mac_addr_base[i];
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 370c147d08a3..8ab6ae0a6107 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1472,8 +1472,6 @@ static void pasemi_mac_queue_csdesc(const struct sk_buff *skb,
1472 txring->next_to_fill = fill; 1472 txring->next_to_fill = fill;
1473 1473
1474 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); 1474 write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2);
1475
1476 return;
1477} 1475}
1478 1476
1479static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) 1477static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 36785853a149..56f3fc45dbaa 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1354,7 +1354,6 @@ static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1354 NETDRV_W32(TxStatus0 + (entry * sizeof(u32)), 1354 NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
1355 tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1355 tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1356 1356
1357 dev->trans_start = jiffies;
1358 atomic_inc(&tp->cur_tx); 1357 atomic_inc(&tp->cur_tx);
1359 if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC) 1358 if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
1360 netif_stop_queue(dev); 1359 netif_stop_queue(dev);
@@ -1813,12 +1812,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
1813 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1812 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1814 mc_filter[1] = mc_filter[0] = 0xffffffff; 1813 mc_filter[1] = mc_filter[0] = 0xffffffff;
1815 } else { 1814 } else {
1816 struct dev_mc_list *mclist; 1815 struct netdev_hw_addr *ha;
1817 1816
1818 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1817 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1819 mc_filter[1] = mc_filter[0] = 0; 1818 mc_filter[1] = mc_filter[0] = 0;
1820 netdev_for_each_mc_addr(mclist, dev) { 1819 netdev_for_each_mc_addr(ha, dev) {
1821 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1820 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1822 1821
1823 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1822 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1824 } 1823 }
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 3d1d3a7b7ed3..10ee106a1617 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -93,7 +93,6 @@ earlier 3Com products.
93#include <pcmcia/cisreg.h> 93#include <pcmcia/cisreg.h>
94#include <pcmcia/ciscode.h> 94#include <pcmcia/ciscode.h>
95#include <pcmcia/ds.h> 95#include <pcmcia/ds.h>
96#include <pcmcia/mem_op.h>
97 96
98#include <asm/uaccess.h> 97#include <asm/uaccess.h>
99#include <asm/io.h> 98#include <asm/io.h>
@@ -200,7 +199,6 @@ enum Window4 { /* Window 4: Xcvr/media bits. */
200 199
201struct el3_private { 200struct el3_private {
202 struct pcmcia_device *p_dev; 201 struct pcmcia_device *p_dev;
203 dev_node_t node;
204 u16 advertising, partner; /* NWay media advertisement */ 202 u16 advertising, partner; /* NWay media advertisement */
205 unsigned char phys; /* MII device address */ 203 unsigned char phys; /* MII device address */
206 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */ 204 unsigned int autoselect:1, default_media:3; /* Read from the EEPROM/Wn3_Config. */
@@ -283,8 +281,6 @@ static int tc574_probe(struct pcmcia_device *link)
283 spin_lock_init(&lp->window_lock); 281 spin_lock_init(&lp->window_lock);
284 link->io.NumPorts1 = 32; 282 link->io.NumPorts1 = 32;
285 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 283 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
286 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
287 link->irq.Handler = &el3_interrupt;
288 link->conf.Attributes = CONF_ENABLE_IRQ; 284 link->conf.Attributes = CONF_ENABLE_IRQ;
289 link->conf.IntType = INT_MEMORY_AND_IO; 285 link->conf.IntType = INT_MEMORY_AND_IO;
290 link->conf.ConfigIndex = 1; 286 link->conf.ConfigIndex = 1;
@@ -311,8 +307,7 @@ static void tc574_detach(struct pcmcia_device *link)
311 307
312 dev_dbg(&link->dev, "3c574_detach()\n"); 308 dev_dbg(&link->dev, "3c574_detach()\n");
313 309
314 if (link->dev_node) 310 unregister_netdev(dev);
315 unregister_netdev(dev);
316 311
317 tc574_release(link); 312 tc574_release(link);
318 313
@@ -353,7 +348,7 @@ static int tc574_config(struct pcmcia_device *link)
353 if (i != 0) 348 if (i != 0)
354 goto failed; 349 goto failed;
355 350
356 ret = pcmcia_request_irq(link, &link->irq); 351 ret = pcmcia_request_irq(link, el3_interrupt);
357 if (ret) 352 if (ret)
358 goto failed; 353 goto failed;
359 354
@@ -361,7 +356,7 @@ static int tc574_config(struct pcmcia_device *link)
361 if (ret) 356 if (ret)
362 goto failed; 357 goto failed;
363 358
364 dev->irq = link->irq.AssignedIRQ; 359 dev->irq = link->irq;
365 dev->base_addr = link->io.BasePort1; 360 dev->base_addr = link->io.BasePort1;
366 361
367 ioaddr = dev->base_addr; 362 ioaddr = dev->base_addr;
@@ -446,17 +441,13 @@ static int tc574_config(struct pcmcia_device *link)
446 } 441 }
447 } 442 }
448 443
449 link->dev_node = &lp->node;
450 SET_NETDEV_DEV(dev, &link->dev); 444 SET_NETDEV_DEV(dev, &link->dev);
451 445
452 if (register_netdev(dev) != 0) { 446 if (register_netdev(dev) != 0) {
453 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n"); 447 printk(KERN_NOTICE "3c574_cs: register_netdev() failed\n");
454 link->dev_node = NULL;
455 goto failed; 448 goto failed;
456 } 449 }
457 450
458 strcpy(lp->node.dev_name, dev->name);
459
460 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, " 451 printk(KERN_INFO "%s: %s at io %#3lx, irq %d, "
461 "hw_addr %pM.\n", 452 "hw_addr %pM.\n",
462 dev->name, cardname, dev->base_addr, dev->irq, 453 dev->name, cardname, dev->base_addr, dev->irq,
@@ -622,8 +613,6 @@ static void mdio_write(unsigned int ioaddr, int phy_id, int location, int value)
622 outw(MDIO_ENB_IN, mdio_addr); 613 outw(MDIO_ENB_IN, mdio_addr);
623 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr); 614 outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
624 } 615 }
625
626 return;
627} 616}
628 617
629/* Reset and restore all of the 3c574 registers. */ 618/* Reset and restore all of the 3c574 registers. */
@@ -739,7 +728,7 @@ static void el3_tx_timeout(struct net_device *dev)
739 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name); 728 printk(KERN_NOTICE "%s: Transmit timed out!\n", dev->name);
740 dump_status(dev); 729 dump_status(dev);
741 dev->stats.tx_errors++; 730 dev->stats.tx_errors++;
742 dev->trans_start = jiffies; 731 dev->trans_start = jiffies; /* prevent tx timeout */
743 /* Issue TX_RESET and TX_START commands. */ 732 /* Issue TX_RESET and TX_START commands. */
744 tc574_wait_for_completion(dev, TxReset); 733 tc574_wait_for_completion(dev, TxReset);
745 outw(TxEnable, ioaddr + EL3_CMD); 734 outw(TxEnable, ioaddr + EL3_CMD);
@@ -781,12 +770,15 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
781 inw(ioaddr + EL3_STATUS)); 770 inw(ioaddr + EL3_STATUS));
782 771
783 spin_lock_irqsave(&lp->window_lock, flags); 772 spin_lock_irqsave(&lp->window_lock, flags);
773
774 dev->stats.tx_bytes += skb->len;
775
776 /* Put out the doubleword header... */
784 outw(skb->len, ioaddr + TX_FIFO); 777 outw(skb->len, ioaddr + TX_FIFO);
785 outw(0, ioaddr + TX_FIFO); 778 outw(0, ioaddr + TX_FIFO);
779 /* ... and the packet rounded to a doubleword. */
786 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2); 780 outsl(ioaddr + TX_FIFO, skb->data, (skb->len+3)>>2);
787 781
788 dev->trans_start = jiffies;
789
790 /* TxFree appears only in Window 1, not offset 0x1c. */ 782 /* TxFree appears only in Window 1, not offset 0x1c. */
791 if (inw(ioaddr + TxFree) <= 1536) { 783 if (inw(ioaddr + TxFree) <= 1536) {
792 netif_stop_queue(dev); 784 netif_stop_queue(dev);
@@ -1021,8 +1013,6 @@ static void update_stats(struct net_device *dev)
1021 /* BadSSD */ inb(ioaddr + 12); 1013 /* BadSSD */ inb(ioaddr + 12);
1022 up = inb(ioaddr + 13); 1014 up = inb(ioaddr + 13);
1023 1015
1024 dev->stats.tx_bytes += tx + ((up & 0xf0) << 12);
1025
1026 EL3WINDOW(1); 1016 EL3WINDOW(1);
1027} 1017}
1028 1018
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043e..ce63c3773b4c 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
1/*====================================================================== 1/*======================================================================
2 2
3 A PCMCIA ethernet driver for the 3com 3c589 card. 3 A PCMCIA ethernet driver for the 3com 3c589 card.
4 4
5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net 5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
6 6
7 3c589_cs.c 1.162 2001/10/13 00:08:50 7 3c589_cs.c 1.162 2001/10/13 00:08:50
8 8
9 The network driver code is based on Donald Becker's 3c589 code: 9 The network driver code is based on Donald Becker's 3c589 code:
10 10
11 Written 1994 by Donald Becker. 11 Written 1994 by Donald Becker.
12 Copyright 1993 United States Government as represented by the 12 Copyright 1993 United States Government as represented by the
13 Director, National Security Agency. This software may be used and 13 Director, National Security Agency. This software may be used and
14 distributed according to the terms of the GNU General Public License, 14 distributed according to the terms of the GNU General Public License,
15 incorporated herein by reference. 15 incorporated herein by reference.
16 Donald Becker may be reached at becker@scyld.com 16 Donald Becker may be reached at becker@scyld.com
17 17
18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> 18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
19 19
20======================================================================*/ 20======================================================================*/
@@ -69,31 +69,54 @@
69/* The top five bits written to EL3_CMD are a command, the lower 69/* The top five bits written to EL3_CMD are a command, the lower
70 11 bits are the parameter, if applicable. */ 70 11 bits are the parameter, if applicable. */
71enum c509cmd { 71enum c509cmd {
72 TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, 72 TotalReset = 0<<11,
73 RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, 73 SelectWindow = 1<<11,
74 TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, 74 StartCoax = 2<<11,
75 FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, 75 RxDisable = 3<<11,
76 SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, 76 RxEnable = 4<<11,
77 SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, 77 RxReset = 5<<11,
78 StatsDisable = 22<<11, StopCoax = 23<<11, 78 RxDiscard = 8<<11,
79 TxEnable = 9<<11,
80 TxDisable = 10<<11,
81 TxReset = 11<<11,
82 FakeIntr = 12<<11,
83 AckIntr = 13<<11,
84 SetIntrEnb = 14<<11,
85 SetStatusEnb = 15<<11,
86 SetRxFilter = 16<<11,
87 SetRxThreshold = 17<<11,
88 SetTxThreshold = 18<<11,
89 SetTxStart = 19<<11,
90 StatsEnable = 21<<11,
91 StatsDisable = 22<<11,
92 StopCoax = 23<<11
79}; 93};
80 94
81enum c509status { 95enum c509status {
82 IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, 96 IntLatch = 0x0001,
83 TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, 97 AdapterFailure = 0x0002,
84 IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 98 TxComplete = 0x0004,
99 TxAvailable = 0x0008,
100 RxComplete = 0x0010,
101 RxEarly = 0x0020,
102 IntReq = 0x0040,
103 StatsFull = 0x0080,
104 CmdBusy = 0x1000
85}; 105};
86 106
87/* The SetRxFilter command accepts the following classes: */ 107/* The SetRxFilter command accepts the following classes: */
88enum RxFilter { 108enum RxFilter {
89 RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 109 RxStation = 1,
110 RxMulticast = 2,
111 RxBroadcast = 4,
112 RxProm = 8
90}; 113};
91 114
92/* Register window 1 offsets, the window used in normal operation. */ 115/* Register window 1 offsets, the window used in normal operation. */
93#define TX_FIFO 0x00 116#define TX_FIFO 0x00
94#define RX_FIFO 0x00 117#define RX_FIFO 0x00
95#define RX_STATUS 0x08 118#define RX_STATUS 0x08
96#define TX_STATUS 0x0B 119#define TX_STATUS 0x0B
97#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ 120#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
98 121
99#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ 122#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,13 +129,12 @@ enum RxFilter {
106 129
107struct el3_private { 130struct el3_private {
108 struct pcmcia_device *p_dev; 131 struct pcmcia_device *p_dev;
109 dev_node_t node; 132 /* For transceiver monitoring */
110 /* For transceiver monitoring */ 133 struct timer_list media;
111 struct timer_list media; 134 u16 media_status;
112 u16 media_status; 135 u16 fast_poll;
113 u16 fast_poll; 136 unsigned long last_irq;
114 unsigned long last_irq; 137 spinlock_t lock;
115 spinlock_t lock;
116}; 138};
117 139
118static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; 140static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -164,15 +186,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
164======================================================================*/ 186======================================================================*/
165 187
166static const struct net_device_ops el3_netdev_ops = { 188static const struct net_device_ops el3_netdev_ops = {
167 .ndo_open = el3_open, 189 .ndo_open = el3_open,
168 .ndo_stop = el3_close, 190 .ndo_stop = el3_close,
169 .ndo_start_xmit = el3_start_xmit, 191 .ndo_start_xmit = el3_start_xmit,
170 .ndo_tx_timeout = el3_tx_timeout, 192 .ndo_tx_timeout = el3_tx_timeout,
171 .ndo_set_config = el3_config, 193 .ndo_set_config = el3_config,
172 .ndo_get_stats = el3_get_stats, 194 .ndo_get_stats = el3_get_stats,
173 .ndo_set_multicast_list = set_multicast_list, 195 .ndo_set_multicast_list = set_multicast_list,
174 .ndo_change_mtu = eth_change_mtu, 196 .ndo_change_mtu = eth_change_mtu,
175 .ndo_set_mac_address = eth_mac_addr, 197 .ndo_set_mac_address = eth_mac_addr,
176 .ndo_validate_addr = eth_validate_addr, 198 .ndo_validate_addr = eth_validate_addr,
177}; 199};
178 200
@@ -194,8 +216,7 @@ static int tc589_probe(struct pcmcia_device *link)
194 spin_lock_init(&lp->lock); 216 spin_lock_init(&lp->lock);
195 link->io.NumPorts1 = 16; 217 link->io.NumPorts1 = 16;
196 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16; 218 link->io.Attributes1 = IO_DATA_PATH_WIDTH_16;
197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 219
198 link->irq.Handler = &el3_interrupt;
199 link->conf.Attributes = CONF_ENABLE_IRQ; 220 link->conf.Attributes = CONF_ENABLE_IRQ;
200 link->conf.IntType = INT_MEMORY_AND_IO; 221 link->conf.IntType = INT_MEMORY_AND_IO;
201 link->conf.ConfigIndex = 1; 222 link->conf.ConfigIndex = 1;
@@ -223,8 +244,7 @@ static void tc589_detach(struct pcmcia_device *link)
223 244
224 dev_dbg(&link->dev, "3c589_detach\n"); 245 dev_dbg(&link->dev, "3c589_detach\n");
225 246
226 if (link->dev_node) 247 unregister_netdev(dev);
227 unregister_netdev(dev);
228 248
229 tc589_release(link); 249 tc589_release(link);
230 250
@@ -236,20 +256,19 @@ static void tc589_detach(struct pcmcia_device *link)
236 tc589_config() is scheduled to run after a CARD_INSERTION event 256 tc589_config() is scheduled to run after a CARD_INSERTION event
237 is received, to configure the PCMCIA socket, and to make the 257 is received, to configure the PCMCIA socket, and to make the
238 ethernet device available to the system. 258 ethernet device available to the system.
239 259
240======================================================================*/ 260======================================================================*/
241 261
242static int tc589_config(struct pcmcia_device *link) 262static int tc589_config(struct pcmcia_device *link)
243{ 263{
244 struct net_device *dev = link->priv; 264 struct net_device *dev = link->priv;
245 struct el3_private *lp = netdev_priv(dev);
246 __be16 *phys_addr; 265 __be16 *phys_addr;
247 int ret, i, j, multi = 0, fifo; 266 int ret, i, j, multi = 0, fifo;
248 unsigned int ioaddr; 267 unsigned int ioaddr;
249 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 268 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
250 u8 *buf; 269 u8 *buf;
251 size_t len; 270 size_t len;
252 271
253 dev_dbg(&link->dev, "3c589_config\n"); 272 dev_dbg(&link->dev, "3c589_config\n");
254 273
255 phys_addr = (__be16 *)dev->dev_addr; 274 phys_addr = (__be16 *)dev->dev_addr;
@@ -271,15 +290,15 @@ static int tc589_config(struct pcmcia_device *link)
271 if (i != 0) 290 if (i != 0)
272 goto failed; 291 goto failed;
273 292
274 ret = pcmcia_request_irq(link, &link->irq); 293 ret = pcmcia_request_irq(link, el3_interrupt);
275 if (ret) 294 if (ret)
276 goto failed; 295 goto failed;
277 296
278 ret = pcmcia_request_configuration(link, &link->conf); 297 ret = pcmcia_request_configuration(link, &link->conf);
279 if (ret) 298 if (ret)
280 goto failed; 299 goto failed;
281 300
282 dev->irq = link->irq.AssignedIRQ; 301 dev->irq = link->irq;
283 dev->base_addr = link->io.BasePort1; 302 dev->base_addr = link->io.BasePort1;
284 ioaddr = dev->base_addr; 303 ioaddr = dev->base_addr;
285 EL3WINDOW(0); 304 EL3WINDOW(0);
@@ -312,25 +331,20 @@ static int tc589_config(struct pcmcia_device *link)
312 dev->if_port = if_port; 331 dev->if_port = if_port;
313 else 332 else
314 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 333 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
315 334
316 link->dev_node = &lp->node;
317 SET_NETDEV_DEV(dev, &link->dev); 335 SET_NETDEV_DEV(dev, &link->dev);
318 336
319 if (register_netdev(dev) != 0) { 337 if (register_netdev(dev) != 0) {
320 printk(KERN_ERR "3c589_cs: register_netdev() failed\n"); 338 printk(KERN_ERR "3c589_cs: register_netdev() failed\n");
321 link->dev_node = NULL;
322 goto failed; 339 goto failed;
323 } 340 }
324 341
325 strcpy(lp->node.dev_name, dev->name); 342 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
326 343 (multi ? "562" : "589"), dev->base_addr, dev->irq,
327 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " 344 dev->dev_addr);
328 "hw_addr %pM\n", 345 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
329 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, 346 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
330 dev->dev_addr); 347 if_names[dev->if_port]);
331 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n",
332 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
333 if_names[dev->if_port]);
334 return 0; 348 return 0;
335 349
336failed: 350failed:
@@ -343,7 +357,7 @@ failed:
343 After a card is removed, tc589_release() will unregister the net 357 After a card is removed, tc589_release() will unregister the net
344 device, and release the PCMCIA configuration. If the device is 358 device, and release the PCMCIA configuration. If the device is
345 still open, this will be postponed until it is closed. 359 still open, this will be postponed until it is closed.
346 360
347======================================================================*/ 361======================================================================*/
348 362
349static void tc589_release(struct pcmcia_device *link) 363static void tc589_release(struct pcmcia_device *link)
@@ -365,7 +379,7 @@ static int tc589_resume(struct pcmcia_device *link)
365{ 379{
366 struct net_device *dev = link->priv; 380 struct net_device *dev = link->priv;
367 381
368 if (link->open) { 382 if (link->open) {
369 tc589_reset(dev); 383 tc589_reset(dev);
370 netif_device_attach(dev); 384 netif_device_attach(dev);
371 } 385 }
@@ -385,8 +399,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
385 while (--i > 0) 399 while (--i > 0)
386 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 400 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
387 if (i == 0) 401 if (i == 0)
388 printk(KERN_WARNING "%s: command 0x%04x did not complete!\n", 402 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
389 dev->name, cmd);
390} 403}
391 404
392/* 405/*
@@ -412,7 +425,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
412{ 425{
413 struct el3_private *lp = netdev_priv(dev); 426 struct el3_private *lp = netdev_priv(dev);
414 unsigned int ioaddr = dev->base_addr; 427 unsigned int ioaddr = dev->base_addr;
415 428
416 EL3WINDOW(0); 429 EL3WINDOW(0);
417 switch (if_port) { 430 switch (if_port) {
418 case 0: case 1: outw(0, ioaddr + 6); break; 431 case 0: case 1: outw(0, ioaddr + 6); break;
@@ -435,14 +448,13 @@ static void dump_status(struct net_device *dev)
435{ 448{
436 unsigned int ioaddr = dev->base_addr; 449 unsigned int ioaddr = dev->base_addr;
437 EL3WINDOW(1); 450 EL3WINDOW(1);
438 printk(KERN_INFO " irq status %04x, rx status %04x, tx status " 451 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
439 "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS), 452 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
440 inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS), 453 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
441 inw(ioaddr+TX_FREE));
442 EL3WINDOW(4); 454 EL3WINDOW(4);
443 printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x" 455 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
444 " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06), 456 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
445 inw(ioaddr+0x08), inw(ioaddr+0x0a)); 457 inw(ioaddr+0x0a));
446 EL3WINDOW(1); 458 EL3WINDOW(1);
447} 459}
448 460
@@ -451,18 +463,18 @@ static void tc589_reset(struct net_device *dev)
451{ 463{
452 unsigned int ioaddr = dev->base_addr; 464 unsigned int ioaddr = dev->base_addr;
453 int i; 465 int i;
454 466
455 EL3WINDOW(0); 467 EL3WINDOW(0);
456 outw(0x0001, ioaddr + 4); /* Activate board. */ 468 outw(0x0001, ioaddr + 4); /* Activate board. */
457 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ 469 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
458 470
459 /* Set the station address in window 2. */ 471 /* Set the station address in window 2. */
460 EL3WINDOW(2); 472 EL3WINDOW(2);
461 for (i = 0; i < 6; i++) 473 for (i = 0; i < 6; i++)
462 outb(dev->dev_addr[i], ioaddr + i); 474 outb(dev->dev_addr[i], ioaddr + i);
463 475
464 tc589_set_xcvr(dev, dev->if_port); 476 tc589_set_xcvr(dev, dev->if_port);
465 477
466 /* Switch to the stats window, and clear all stats by reading. */ 478 /* Switch to the stats window, and clear all stats by reading. */
467 outw(StatsDisable, ioaddr + EL3_CMD); 479 outw(StatsDisable, ioaddr + EL3_CMD);
468 EL3WINDOW(6); 480 EL3WINDOW(6);
@@ -470,7 +482,7 @@ static void tc589_reset(struct net_device *dev)
470 inb(ioaddr+i); 482 inb(ioaddr+i);
471 inw(ioaddr + 10); 483 inw(ioaddr + 10);
472 inw(ioaddr + 12); 484 inw(ioaddr + 12);
473 485
474 /* Switch to register set 1 for normal use. */ 486 /* Switch to register set 1 for normal use. */
475 EL3WINDOW(1); 487 EL3WINDOW(1);
476 488
@@ -504,8 +516,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
504 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 516 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
505 if (map->port <= 3) { 517 if (map->port <= 3) {
506 dev->if_port = map->port; 518 dev->if_port = map->port;
507 printk(KERN_INFO "%s: switched to %s port\n", 519 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
508 dev->name, if_names[dev->if_port]);
509 tc589_set_xcvr(dev, dev->if_port); 520 tc589_set_xcvr(dev, dev->if_port);
510 } else 521 } else
511 return -EINVAL; 522 return -EINVAL;
@@ -517,13 +528,13 @@ static int el3_open(struct net_device *dev)
517{ 528{
518 struct el3_private *lp = netdev_priv(dev); 529 struct el3_private *lp = netdev_priv(dev);
519 struct pcmcia_device *link = lp->p_dev; 530 struct pcmcia_device *link = lp->p_dev;
520 531
521 if (!pcmcia_dev_present(link)) 532 if (!pcmcia_dev_present(link))
522 return -ENODEV; 533 return -ENODEV;
523 534
524 link->open++; 535 link->open++;
525 netif_start_queue(dev); 536 netif_start_queue(dev);
526 537
527 tc589_reset(dev); 538 tc589_reset(dev);
528 init_timer(&lp->media); 539 init_timer(&lp->media);
529 lp->media.function = &media_check; 540 lp->media.function = &media_check;
@@ -533,18 +544,18 @@ static int el3_open(struct net_device *dev)
533 544
534 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", 545 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
535 dev->name, inw(dev->base_addr + EL3_STATUS)); 546 dev->name, inw(dev->base_addr + EL3_STATUS));
536 547
537 return 0; 548 return 0;
538} 549}
539 550
540static void el3_tx_timeout(struct net_device *dev) 551static void el3_tx_timeout(struct net_device *dev)
541{ 552{
542 unsigned int ioaddr = dev->base_addr; 553 unsigned int ioaddr = dev->base_addr;
543 554
544 printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); 555 netdev_warn(dev, "Transmit timed out!\n");
545 dump_status(dev); 556 dump_status(dev);
546 dev->stats.tx_errors++; 557 dev->stats.tx_errors++;
547 dev->trans_start = jiffies; 558 dev->trans_start = jiffies; /* prevent tx timeout */
548 /* Issue TX_RESET and TX_START commands. */ 559 /* Issue TX_RESET and TX_START commands. */
549 tc589_wait_for_completion(dev, TxReset); 560 tc589_wait_for_completion(dev, TxReset);
550 outw(TxEnable, ioaddr + EL3_CMD); 561 outw(TxEnable, ioaddr + EL3_CMD);
@@ -555,19 +566,18 @@ static void pop_tx_status(struct net_device *dev)
555{ 566{
556 unsigned int ioaddr = dev->base_addr; 567 unsigned int ioaddr = dev->base_addr;
557 int i; 568 int i;
558 569
559 /* Clear the Tx status stack. */ 570 /* Clear the Tx status stack. */
560 for (i = 32; i > 0; i--) { 571 for (i = 32; i > 0; i--) {
561 u_char tx_status = inb(ioaddr + TX_STATUS); 572 u_char tx_status = inb(ioaddr + TX_STATUS);
562 if (!(tx_status & 0x84)) break; 573 if (!(tx_status & 0x84)) break;
563 /* reset transmitter on jabber error or underrun */ 574 /* reset transmitter on jabber error or underrun */
564 if (tx_status & 0x30) 575 if (tx_status & 0x30)
565 tc589_wait_for_completion(dev, TxReset); 576 tc589_wait_for_completion(dev, TxReset);
566 if (tx_status & 0x38) { 577 if (tx_status & 0x38) {
567 pr_debug("%s: transmit error: status 0x%02x\n", 578 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
568 dev->name, tx_status); 579 outw(TxEnable, ioaddr + EL3_CMD);
569 outw(TxEnable, ioaddr + EL3_CMD); 580 dev->stats.tx_aborted_errors++;
570 dev->stats.tx_aborted_errors++;
571 } 581 }
572 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 582 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
573 } 583 }
@@ -580,11 +590,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
580 struct el3_private *priv = netdev_priv(dev); 590 struct el3_private *priv = netdev_priv(dev);
581 unsigned long flags; 591 unsigned long flags;
582 592
583 pr_debug("%s: el3_start_xmit(length = %ld) called, " 593 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
584 "status %4.4x.\n", dev->name, (long)skb->len, 594 (long)skb->len, inw(ioaddr + EL3_STATUS));
585 inw(ioaddr + EL3_STATUS));
586 595
587 spin_lock_irqsave(&priv->lock, flags); 596 spin_lock_irqsave(&priv->lock, flags);
588 597
589 dev->stats.tx_bytes += skb->len; 598 dev->stats.tx_bytes += skb->len;
590 599
@@ -594,7 +603,6 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
594 /* ... and the packet rounded to a doubleword. */ 603 /* ... and the packet rounded to a doubleword. */
595 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2); 604 outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
596 605
597 dev->trans_start = jiffies;
598 if (inw(ioaddr + TX_FREE) <= 1536) { 606 if (inw(ioaddr + TX_FREE) <= 1536) {
599 netif_stop_queue(dev); 607 netif_stop_queue(dev);
600 /* Interrupt us when the FIFO has room for max-sized packet. */ 608 /* Interrupt us when the FIFO has room for max-sized packet. */
@@ -602,9 +610,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
602 } 610 }
603 611
604 pop_tx_status(dev); 612 pop_tx_status(dev);
605 spin_unlock_irqrestore(&priv->lock, flags); 613 spin_unlock_irqrestore(&priv->lock, flags);
606 dev_kfree_skb(skb); 614 dev_kfree_skb(skb);
607 615
608 return NETDEV_TX_OK; 616 return NETDEV_TX_OK;
609} 617}
610 618
@@ -616,37 +624,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
616 unsigned int ioaddr; 624 unsigned int ioaddr;
617 __u16 status; 625 __u16 status;
618 int i = 0, handled = 1; 626 int i = 0, handled = 1;
619 627
620 if (!netif_device_present(dev)) 628 if (!netif_device_present(dev))
621 return IRQ_NONE; 629 return IRQ_NONE;
622 630
623 ioaddr = dev->base_addr; 631 ioaddr = dev->base_addr;
624 632
625 pr_debug("%s: interrupt, status %4.4x.\n", 633 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
626 dev->name, inw(ioaddr + EL3_STATUS));
627 634
628 spin_lock(&lp->lock); 635 spin_lock(&lp->lock);
629 while ((status = inw(ioaddr + EL3_STATUS)) & 636 while ((status = inw(ioaddr + EL3_STATUS)) &
630 (IntLatch | RxComplete | StatsFull)) { 637 (IntLatch | RxComplete | StatsFull)) {
631 if ((status & 0xe000) != 0x2000) { 638 if ((status & 0xe000) != 0x2000) {
632 pr_debug("%s: interrupt from dead card\n", dev->name); 639 netdev_dbg(dev, "interrupt from dead card\n");
633 handled = 0; 640 handled = 0;
634 break; 641 break;
635 } 642 }
636
637 if (status & RxComplete) 643 if (status & RxComplete)
638 el3_rx(dev); 644 el3_rx(dev);
639
640 if (status & TxAvailable) { 645 if (status & TxAvailable) {
641 pr_debug(" TX room bit was handled.\n"); 646 netdev_dbg(dev, " TX room bit was handled.\n");
642 /* There's room in the FIFO for a full-sized packet. */ 647 /* There's room in the FIFO for a full-sized packet. */
643 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 648 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
644 netif_wake_queue(dev); 649 netif_wake_queue(dev);
645 } 650 }
646
647 if (status & TxComplete) 651 if (status & TxComplete)
648 pop_tx_status(dev); 652 pop_tx_status(dev);
649
650 if (status & (AdapterFailure | RxEarly | StatsFull)) { 653 if (status & (AdapterFailure | RxEarly | StatsFull)) {
651 /* Handle all uncommon interrupts. */ 654 /* Handle all uncommon interrupts. */
652 if (status & StatsFull) /* Empty statistics. */ 655 if (status & StatsFull) /* Empty statistics. */
@@ -660,8 +663,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
660 EL3WINDOW(4); 663 EL3WINDOW(4);
661 fifo_diag = inw(ioaddr + 4); 664 fifo_diag = inw(ioaddr + 4);
662 EL3WINDOW(1); 665 EL3WINDOW(1);
663 printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic" 666 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
664 " register %04x.\n", dev->name, fifo_diag); 667 fifo_diag);
665 if (fifo_diag & 0x0400) { 668 if (fifo_diag & 0x0400) {
666 /* Tx overrun */ 669 /* Tx overrun */
667 tc589_wait_for_completion(dev, TxReset); 670 tc589_wait_for_completion(dev, TxReset);
@@ -676,22 +679,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
676 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); 679 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
677 } 680 }
678 } 681 }
679
680 if (++i > 10) { 682 if (++i > 10) {
681 printk(KERN_ERR "%s: infinite loop in interrupt, " 683 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
682 "status %4.4x.\n", dev->name, status); 684 status);
683 /* Clear all interrupts */ 685 /* Clear all interrupts */
684 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 686 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
685 break; 687 break;
686 } 688 }
687 /* Acknowledge the IRQ. */ 689 /* Acknowledge the IRQ. */
688 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 690 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
689 } 691 }
690
691 lp->last_irq = jiffies; 692 lp->last_irq = jiffies;
692 spin_unlock(&lp->lock); 693 spin_unlock(&lp->lock);
693 pr_debug("%s: exiting interrupt, status %4.4x.\n", 694 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
694 dev->name, inw(ioaddr + EL3_STATUS)); 695 inw(ioaddr + EL3_STATUS));
695 return IRQ_RETVAL(handled); 696 return IRQ_RETVAL(handled);
696} 697}
697 698
@@ -710,7 +711,7 @@ static void media_check(unsigned long arg)
710 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && 711 if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
711 (inb(ioaddr + EL3_TIMER) == 0xff)) { 712 (inb(ioaddr + EL3_TIMER) == 0xff)) {
712 if (!lp->fast_poll) 713 if (!lp->fast_poll)
713 printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name); 714 netdev_warn(dev, "interrupt(s) dropped!\n");
714 715
715 local_irq_save(flags); 716 local_irq_save(flags);
716 el3_interrupt(dev->irq, dev); 717 el3_interrupt(dev->irq, dev);
@@ -727,7 +728,7 @@ static void media_check(unsigned long arg)
727 728
728 /* lp->lock guards the EL3 window. Window should always be 1 except 729 /* lp->lock guards the EL3 window. Window should always be 1 except
729 when the lock is held */ 730 when the lock is held */
730 spin_lock_irqsave(&lp->lock, flags); 731 spin_lock_irqsave(&lp->lock, flags);
731 EL3WINDOW(4); 732 EL3WINDOW(4);
732 media = inw(ioaddr+WN4_MEDIA) & 0xc810; 733 media = inw(ioaddr+WN4_MEDIA) & 0xc810;
733 734
@@ -747,32 +748,30 @@ static void media_check(unsigned long arg)
747 if (media != lp->media_status) { 748 if (media != lp->media_status) {
748 if ((media & lp->media_status & 0x8000) && 749 if ((media & lp->media_status & 0x8000) &&
749 ((lp->media_status ^ media) & 0x0800)) 750 ((lp->media_status ^ media) & 0x0800))
750 printk(KERN_INFO "%s: %s link beat\n", dev->name, 751 netdev_info(dev, "%s link beat\n",
751 (lp->media_status & 0x0800 ? "lost" : "found")); 752 (lp->media_status & 0x0800 ? "lost" : "found"));
752 else if ((media & lp->media_status & 0x4000) && 753 else if ((media & lp->media_status & 0x4000) &&
753 ((lp->media_status ^ media) & 0x0010)) 754 ((lp->media_status ^ media) & 0x0010))
754 printk(KERN_INFO "%s: coax cable %s\n", dev->name, 755 netdev_info(dev, "coax cable %s\n",
755 (lp->media_status & 0x0010 ? "ok" : "problem")); 756 (lp->media_status & 0x0010 ? "ok" : "problem"));
756 if (dev->if_port == 0) { 757 if (dev->if_port == 0) {
757 if (media & 0x8000) { 758 if (media & 0x8000) {
758 if (media & 0x0800) 759 if (media & 0x0800)
759 printk(KERN_INFO "%s: flipped to 10baseT\n", 760 netdev_info(dev, "flipped to 10baseT\n");
760 dev->name);
761 else 761 else
762 tc589_set_xcvr(dev, 2); 762 tc589_set_xcvr(dev, 2);
763 } else if (media & 0x4000) { 763 } else if (media & 0x4000) {
764 if (media & 0x0010) 764 if (media & 0x0010)
765 tc589_set_xcvr(dev, 1); 765 tc589_set_xcvr(dev, 1);
766 else 766 else
767 printk(KERN_INFO "%s: flipped to 10base2\n", 767 netdev_info(dev, "flipped to 10base2\n");
768 dev->name);
769 } 768 }
770 } 769 }
771 lp->media_status = media; 770 lp->media_status = media;
772 } 771 }
773 772
774 EL3WINDOW(1); 773 EL3WINDOW(1);
775 spin_unlock_irqrestore(&lp->lock, flags); 774 spin_unlock_irqrestore(&lp->lock, flags);
776 775
777reschedule: 776reschedule:
778 lp->media.expires = jiffies + HZ; 777 lp->media.expires = jiffies + HZ;
@@ -786,7 +785,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
786 struct pcmcia_device *link = lp->p_dev; 785 struct pcmcia_device *link = lp->p_dev;
787 786
788 if (pcmcia_dev_present(link)) { 787 if (pcmcia_dev_present(link)) {
789 spin_lock_irqsave(&lp->lock, flags); 788 spin_lock_irqsave(&lp->lock, flags);
790 update_stats(dev); 789 update_stats(dev);
791 spin_unlock_irqrestore(&lp->lock, flags); 790 spin_unlock_irqrestore(&lp->lock, flags);
792 } 791 }
@@ -798,21 +797,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
798 single-threaded if the device is active. This is expected to be a rare 797 single-threaded if the device is active. This is expected to be a rare
799 operation, and it's simpler for the rest of the driver to assume that 798 operation, and it's simpler for the rest of the driver to assume that
800 window 1 is always valid rather than use a special window-state variable. 799 window 1 is always valid rather than use a special window-state variable.
801 800
802 Caller must hold the lock for this 801 Caller must hold the lock for this
803*/ 802*/
804static void update_stats(struct net_device *dev) 803static void update_stats(struct net_device *dev)
805{ 804{
806 unsigned int ioaddr = dev->base_addr; 805 unsigned int ioaddr = dev->base_addr;
807 806
808 pr_debug("%s: updating the statistics.\n", dev->name); 807 netdev_dbg(dev, "updating the statistics.\n");
809 /* Turn off statistics updates while reading. */ 808 /* Turn off statistics updates while reading. */
810 outw(StatsDisable, ioaddr + EL3_CMD); 809 outw(StatsDisable, ioaddr + EL3_CMD);
811 /* Switch to the stats window, and read everything. */ 810 /* Switch to the stats window, and read everything. */
812 EL3WINDOW(6); 811 EL3WINDOW(6);
813 dev->stats.tx_carrier_errors += inb(ioaddr + 0); 812 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
814 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); 813 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
815 /* Multiple collisions. */ inb(ioaddr + 2); 814 /* Multiple collisions. */ inb(ioaddr + 2);
816 dev->stats.collisions += inb(ioaddr + 3); 815 dev->stats.collisions += inb(ioaddr + 3);
817 dev->stats.tx_window_errors += inb(ioaddr + 4); 816 dev->stats.tx_window_errors += inb(ioaddr + 4);
818 dev->stats.rx_fifo_errors += inb(ioaddr + 5); 817 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -821,7 +820,7 @@ static void update_stats(struct net_device *dev)
821 /* Tx deferrals */ inb(ioaddr + 8); 820 /* Tx deferrals */ inb(ioaddr + 8);
822 /* Rx octets */ inw(ioaddr + 10); 821 /* Rx octets */ inw(ioaddr + 10);
823 /* Tx octets */ inw(ioaddr + 12); 822 /* Tx octets */ inw(ioaddr + 12);
824 823
825 /* Back to window 1, and turn statistics back on. */ 824 /* Back to window 1, and turn statistics back on. */
826 EL3WINDOW(1); 825 EL3WINDOW(1);
827 outw(StatsEnable, ioaddr + EL3_CMD); 826 outw(StatsEnable, ioaddr + EL3_CMD);
@@ -832,9 +831,9 @@ static int el3_rx(struct net_device *dev)
832 unsigned int ioaddr = dev->base_addr; 831 unsigned int ioaddr = dev->base_addr;
833 int worklimit = 32; 832 int worklimit = 32;
834 short rx_status; 833 short rx_status;
835 834
836 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 835 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
837 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 836 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
838 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 837 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
839 worklimit > 0) { 838 worklimit > 0) {
840 worklimit--; 839 worklimit--;
@@ -852,11 +851,11 @@ static int el3_rx(struct net_device *dev)
852 } else { 851 } else {
853 short pkt_len = rx_status & 0x7ff; 852 short pkt_len = rx_status & 0x7ff;
854 struct sk_buff *skb; 853 struct sk_buff *skb;
855 854
856 skb = dev_alloc_skb(pkt_len+5); 855 skb = dev_alloc_skb(pkt_len+5);
857 856
858 pr_debug(" Receiving packet size %d status %4.4x.\n", 857 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
859 pkt_len, rx_status); 858 pkt_len, rx_status);
860 if (skb != NULL) { 859 if (skb != NULL) {
861 skb_reserve(skb, 2); 860 skb_reserve(skb, 2);
862 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 861 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -866,8 +865,8 @@ static int el3_rx(struct net_device *dev)
866 dev->stats.rx_packets++; 865 dev->stats.rx_packets++;
867 dev->stats.rx_bytes += pkt_len; 866 dev->stats.rx_bytes += pkt_len;
868 } else { 867 } else {
869 pr_debug("%s: couldn't allocate a sk_buff of" 868 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
870 " size %d.\n", dev->name, pkt_len); 869 pkt_len);
871 dev->stats.rx_dropped++; 870 dev->stats.rx_dropped++;
872 } 871 }
873 } 872 }
@@ -875,7 +874,7 @@ static int el3_rx(struct net_device *dev)
875 tc589_wait_for_completion(dev, RxDiscard); 874 tc589_wait_for_completion(dev, RxDiscard);
876 } 875 }
877 if (worklimit == 0) 876 if (worklimit == 0)
878 printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name); 877 netdev_warn(dev, "too much work in el3_rx!\n");
879 return 0; 878 return 0;
880} 879}
881 880
@@ -906,17 +905,17 @@ static int el3_close(struct net_device *dev)
906 struct el3_private *lp = netdev_priv(dev); 905 struct el3_private *lp = netdev_priv(dev);
907 struct pcmcia_device *link = lp->p_dev; 906 struct pcmcia_device *link = lp->p_dev;
908 unsigned int ioaddr = dev->base_addr; 907 unsigned int ioaddr = dev->base_addr;
909 908
910 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); 909 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
911 910
912 if (pcmcia_dev_present(link)) { 911 if (pcmcia_dev_present(link)) {
913 /* Turn off statistics ASAP. We update dev->stats below. */ 912 /* Turn off statistics ASAP. We update dev->stats below. */
914 outw(StatsDisable, ioaddr + EL3_CMD); 913 outw(StatsDisable, ioaddr + EL3_CMD);
915 914
916 /* Disable the receiver and transmitter. */ 915 /* Disable the receiver and transmitter. */
917 outw(RxDisable, ioaddr + EL3_CMD); 916 outw(RxDisable, ioaddr + EL3_CMD);
918 outw(TxDisable, ioaddr + EL3_CMD); 917 outw(TxDisable, ioaddr + EL3_CMD);
919 918
920 if (dev->if_port == 2) 919 if (dev->if_port == 2)
921 /* Turn off thinnet power. Green! */ 920 /* Turn off thinnet power. Green! */
922 outw(StopCoax, ioaddr + EL3_CMD); 921 outw(StopCoax, ioaddr + EL3_CMD);
@@ -925,12 +924,12 @@ static int el3_close(struct net_device *dev)
925 EL3WINDOW(4); 924 EL3WINDOW(4);
926 outw(0, ioaddr + WN4_MEDIA); 925 outw(0, ioaddr + WN4_MEDIA);
927 } 926 }
928 927
929 /* Switching back to window 0 disables the IRQ. */ 928 /* Switching back to window 0 disables the IRQ. */
930 EL3WINDOW(0); 929 EL3WINDOW(0);
931 /* But we explicitly zero the IRQ line select anyway. */ 930 /* But we explicitly zero the IRQ line select anyway. */
932 outw(0x0f00, ioaddr + WN0_IRQ); 931 outw(0x0f00, ioaddr + WN0_IRQ);
933 932
934 /* Check if the card still exists */ 933 /* Check if the card still exists */
935 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) 934 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
936 update_stats(dev); 935 update_stats(dev);
@@ -939,7 +938,7 @@ static int el3_close(struct net_device *dev)
939 link->open--; 938 link->open--;
940 netif_stop_queue(dev); 939 netif_stop_queue(dev);
941 del_timer_sync(&lp->media); 940 del_timer_sync(&lp->media);
942 941
943 return 0; 942 return 0;
944} 943}
945 944
@@ -961,7 +960,7 @@ static struct pcmcia_driver tc589_driver = {
961 }, 960 },
962 .probe = tc589_probe, 961 .probe = tc589_probe,
963 .remove = tc589_detach, 962 .remove = tc589_detach,
964 .id_table = tc589_ids, 963 .id_table = tc589_ids,
965 .suspend = tc589_suspend, 964 .suspend = tc589_suspend,
966 .resume = tc589_resume, 965 .resume = tc589_resume,
967}; 966};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14ed..5b3dfb4ab279 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -113,7 +113,6 @@ static irqreturn_t ax_interrupt(int irq, void *dev_id);
113 113
114typedef struct axnet_dev_t { 114typedef struct axnet_dev_t {
115 struct pcmcia_device *p_dev; 115 struct pcmcia_device *p_dev;
116 dev_node_t node;
117 caddr_t base; 116 caddr_t base;
118 struct timer_list watchdog; 117 struct timer_list watchdog;
119 int stale, fast_poll; 118 int stale, fast_poll;
@@ -168,7 +167,6 @@ static int axnet_probe(struct pcmcia_device *link)
168 info = PRIV(dev); 167 info = PRIV(dev);
169 info->p_dev = link; 168 info->p_dev = link;
170 link->priv = dev; 169 link->priv = dev;
171 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
172 link->conf.Attributes = CONF_ENABLE_IRQ; 170 link->conf.Attributes = CONF_ENABLE_IRQ;
173 link->conf.IntType = INT_MEMORY_AND_IO; 171 link->conf.IntType = INT_MEMORY_AND_IO;
174 172
@@ -195,8 +193,7 @@ static void axnet_detach(struct pcmcia_device *link)
195 193
196 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link); 194 dev_dbg(&link->dev, "axnet_detach(0x%p)\n", link);
197 195
198 if (link->dev_node) 196 unregister_netdev(dev);
199 unregister_netdev(dev);
200 197
201 axnet_release(link); 198 axnet_release(link);
202 199
@@ -265,12 +262,9 @@ static int try_io_port(struct pcmcia_device *link)
265 int j, ret; 262 int j, ret;
266 if (link->io.NumPorts1 == 32) { 263 if (link->io.NumPorts1 == 32) {
267 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 264 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
268 if (link->io.NumPorts2 > 0) { 265 /* for master/slave multifunction cards */
269 /* for master/slave multifunction cards */ 266 if (link->io.NumPorts2 > 0)
270 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 267 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
271 link->irq.Attributes =
272 IRQ_TYPE_DYNAMIC_SHARING;
273 }
274 } else { 268 } else {
275 /* This should be two 16-port windows */ 269 /* This should be two 16-port windows */
276 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 270 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
@@ -336,8 +330,7 @@ static int axnet_config(struct pcmcia_device *link)
336 if (ret != 0) 330 if (ret != 0)
337 goto failed; 331 goto failed;
338 332
339 ret = pcmcia_request_irq(link, &link->irq); 333 if (!link->irq)
340 if (ret)
341 goto failed; 334 goto failed;
342 335
343 if (link->io.NumPorts2 == 8) { 336 if (link->io.NumPorts2 == 8) {
@@ -349,7 +342,7 @@ static int axnet_config(struct pcmcia_device *link)
349 if (ret) 342 if (ret)
350 goto failed; 343 goto failed;
351 344
352 dev->irq = link->irq.AssignedIRQ; 345 dev->irq = link->irq;
353 dev->base_addr = link->io.BasePort1; 346 dev->base_addr = link->io.BasePort1;
354 347
355 if (!get_prom(link)) { 348 if (!get_prom(link)) {
@@ -397,17 +390,13 @@ static int axnet_config(struct pcmcia_device *link)
397 } 390 }
398 391
399 info->phy_id = (i < 32) ? i : -1; 392 info->phy_id = (i < 32) ? i : -1;
400 link->dev_node = &info->node;
401 SET_NETDEV_DEV(dev, &link->dev); 393 SET_NETDEV_DEV(dev, &link->dev);
402 394
403 if (register_netdev(dev) != 0) { 395 if (register_netdev(dev) != 0) {
404 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n"); 396 printk(KERN_NOTICE "axnet_cs: register_netdev() failed\n");
405 link->dev_node = NULL;
406 goto failed; 397 goto failed;
407 } 398 }
408 399
409 strcpy(info->node.dev_name, dev->name);
410
411 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, " 400 printk(KERN_INFO "%s: Asix AX88%d90: io %#3lx, irq %d, "
412 "hw_addr %pM\n", 401 "hw_addr %pM\n",
413 dev->name, ((info->flags & IS_AX88790) ? 7 : 1), 402 dev->name, ((info->flags & IS_AX88790) ? 7 : 1),
@@ -1005,7 +994,7 @@ static void axnet_tx_timeout(struct net_device *dev)
1005{ 994{
1006 long e8390_base = dev->base_addr; 995 long e8390_base = dev->base_addr;
1007 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 996 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
1008 int txsr, isr, tickssofar = jiffies - dev->trans_start; 997 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
1009 unsigned long flags; 998 unsigned long flags;
1010 999
1011 dev->stats.tx_errors++; 1000 dev->stats.tx_errors++;
@@ -1510,8 +1499,6 @@ static void ei_receive(struct net_device *dev)
1510 ei_local->current_page = next_frame; 1499 ei_local->current_page = next_frame;
1511 outb_p(next_frame-1, e8390_base+EN0_BOUNDARY); 1500 outb_p(next_frame-1, e8390_base+EN0_BOUNDARY);
1512 } 1501 }
1513
1514 return;
1515} 1502}
1516 1503
1517/** 1504/**
@@ -1622,11 +1609,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1622 1609
1623static inline void make_mc_bits(u8 *bits, struct net_device *dev) 1610static inline void make_mc_bits(u8 *bits, struct net_device *dev)
1624{ 1611{
1625 struct dev_mc_list *dmi; 1612 struct netdev_hw_addr *ha;
1626 u32 crc; 1613 u32 crc;
1627 1614
1628 netdev_for_each_mc_addr(dmi, dev) { 1615 netdev_for_each_mc_addr(ha, dev) {
1629 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 1616 crc = ether_crc(ETH_ALEN, ha->addr);
1630 /* 1617 /*
1631 * The 8390 uses the 6 most significant bits of the 1618 * The 8390 uses the 6 most significant bits of the
1632 * CRC to index the multicast table. 1619 * CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/com20020_cs.c b/drivers/net/pcmcia/com20020_cs.c
index 21d9c9d815d1..5643f94541bc 100644
--- a/drivers/net/pcmcia/com20020_cs.c
+++ b/drivers/net/pcmcia/com20020_cs.c
@@ -122,7 +122,6 @@ static void com20020_detach(struct pcmcia_device *p_dev);
122 122
123typedef struct com20020_dev_t { 123typedef struct com20020_dev_t {
124 struct net_device *dev; 124 struct net_device *dev;
125 dev_node_t node;
126} com20020_dev_t; 125} com20020_dev_t;
127 126
128/*====================================================================== 127/*======================================================================
@@ -163,7 +162,6 @@ static int com20020_probe(struct pcmcia_device *p_dev)
163 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 162 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
164 p_dev->io.NumPorts1 = 16; 163 p_dev->io.NumPorts1 = 16;
165 p_dev->io.IOAddrLines = 16; 164 p_dev->io.IOAddrLines = 16;
166 p_dev->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
167 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 165 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
168 p_dev->conf.IntType = INT_MEMORY_AND_IO; 166 p_dev->conf.IntType = INT_MEMORY_AND_IO;
169 167
@@ -196,18 +194,16 @@ static void com20020_detach(struct pcmcia_device *link)
196 194
197 dev_dbg(&link->dev, "com20020_detach\n"); 195 dev_dbg(&link->dev, "com20020_detach\n");
198 196
199 if (link->dev_node) { 197 dev_dbg(&link->dev, "unregister...\n");
200 dev_dbg(&link->dev, "unregister...\n");
201 198
202 unregister_netdev(dev); 199 unregister_netdev(dev);
203 200
204 /* 201 /*
205 * this is necessary because we register our IRQ separately 202 * this is necessary because we register our IRQ separately
206 * from card services. 203 * from card services.
207 */ 204 */
208 if (dev->irq) 205 if (dev->irq)
209 free_irq(dev->irq, dev); 206 free_irq(dev->irq, dev);
210 }
211 207
212 com20020_release(link); 208 com20020_release(link);
213 209
@@ -275,15 +271,14 @@ static int com20020_config(struct pcmcia_device *link)
275 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr); 271 dev_dbg(&link->dev, "got ioaddr %Xh\n", ioaddr);
276 272
277 dev_dbg(&link->dev, "request IRQ %d\n", 273 dev_dbg(&link->dev, "request IRQ %d\n",
278 link->irq.AssignedIRQ); 274 link->irq);
279 i = pcmcia_request_irq(link, &link->irq); 275 if (!link->irq)
280 if (i != 0)
281 { 276 {
282 dev_dbg(&link->dev, "requestIRQ failed totally!\n"); 277 dev_dbg(&link->dev, "requestIRQ failed totally!\n");
283 goto failed; 278 goto failed;
284 } 279 }
285 280
286 dev->irq = link->irq.AssignedIRQ; 281 dev->irq = link->irq;
287 282
288 ret = pcmcia_request_configuration(link, &link->conf); 283 ret = pcmcia_request_configuration(link, &link->conf);
289 if (ret) 284 if (ret)
@@ -299,7 +294,6 @@ static int com20020_config(struct pcmcia_device *link)
299 lp->card_name = "PCMCIA COM20020"; 294 lp->card_name = "PCMCIA COM20020";
300 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */ 295 lp->card_flags = ARC_CAN_10MBIT; /* pretend all of them can 10Mbit */
301 296
302 link->dev_node = &info->node;
303 SET_NETDEV_DEV(dev, &link->dev); 297 SET_NETDEV_DEV(dev, &link->dev);
304 298
305 i = com20020_found(dev, 0); /* calls register_netdev */ 299 i = com20020_found(dev, 0); /* calls register_netdev */
@@ -307,12 +301,9 @@ static int com20020_config(struct pcmcia_device *link)
307 if (i != 0) { 301 if (i != 0) {
308 dev_printk(KERN_NOTICE, &link->dev, 302 dev_printk(KERN_NOTICE, &link->dev,
309 "com20020_cs: com20020_found() failed\n"); 303 "com20020_cs: com20020_found() failed\n");
310 link->dev_node = NULL;
311 goto failed; 304 goto failed;
312 } 305 }
313 306
314 strcpy(info->node.dev_name, dev->name);
315
316 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n", 307 dev_dbg(&link->dev,KERN_INFO "%s: port %#3lx, irq %d\n",
317 dev->name, dev->base_addr, dev->irq); 308 dev->name, dev->base_addr, dev->irq);
318 return 0; 309 return 0;
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04a..7c27c50211a5 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -110,7 +110,6 @@ typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
110*/ 110*/
111typedef struct local_info_t { 111typedef struct local_info_t {
112 struct pcmcia_device *p_dev; 112 struct pcmcia_device *p_dev;
113 dev_node_t node;
114 long open_time; 113 long open_time;
115 uint tx_started:1; 114 uint tx_started:1;
116 uint tx_queue; 115 uint tx_queue;
@@ -254,10 +253,6 @@ static int fmvj18x_probe(struct pcmcia_device *link)
254 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 253 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
255 link->io.IOAddrLines = 5; 254 link->io.IOAddrLines = 5;
256 255
257 /* Interrupt setup */
258 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
259 link->irq.Handler = fjn_interrupt;
260
261 /* General socket configuration */ 256 /* General socket configuration */
262 link->conf.Attributes = CONF_ENABLE_IRQ; 257 link->conf.Attributes = CONF_ENABLE_IRQ;
263 link->conf.IntType = INT_MEMORY_AND_IO; 258 link->conf.IntType = INT_MEMORY_AND_IO;
@@ -278,8 +273,7 @@ static void fmvj18x_detach(struct pcmcia_device *link)
278 273
279 dev_dbg(&link->dev, "fmvj18x_detach\n"); 274 dev_dbg(&link->dev, "fmvj18x_detach\n");
280 275
281 if (link->dev_node) 276 unregister_netdev(dev);
282 unregister_netdev(dev);
283 277
284 fmvj18x_release(link); 278 fmvj18x_release(link);
285 279
@@ -425,8 +419,6 @@ static int fmvj18x_config(struct pcmcia_device *link)
425 } 419 }
426 420
427 if (link->io.NumPorts2 != 0) { 421 if (link->io.NumPorts2 != 0) {
428 link->irq.Attributes =
429 IRQ_TYPE_DYNAMIC_SHARING;
430 ret = mfc_try_io_port(link); 422 ret = mfc_try_io_port(link);
431 if (ret != 0) goto failed; 423 if (ret != 0) goto failed;
432 } else if (cardtype == UNGERMANN) { 424 } else if (cardtype == UNGERMANN) {
@@ -437,14 +429,14 @@ static int fmvj18x_config(struct pcmcia_device *link)
437 if (ret) 429 if (ret)
438 goto failed; 430 goto failed;
439 } 431 }
440 ret = pcmcia_request_irq(link, &link->irq); 432 ret = pcmcia_request_irq(link, fjn_interrupt);
441 if (ret) 433 if (ret)
442 goto failed; 434 goto failed;
443 ret = pcmcia_request_configuration(link, &link->conf); 435 ret = pcmcia_request_configuration(link, &link->conf);
444 if (ret) 436 if (ret)
445 goto failed; 437 goto failed;
446 438
447 dev->irq = link->irq.AssignedIRQ; 439 dev->irq = link->irq;
448 dev->base_addr = link->io.BasePort1; 440 dev->base_addr = link->io.BasePort1;
449 441
450 if (link->io.BasePort2 != 0) { 442 if (link->io.BasePort2 != 0) {
@@ -529,17 +521,13 @@ static int fmvj18x_config(struct pcmcia_device *link)
529 } 521 }
530 522
531 lp->cardtype = cardtype; 523 lp->cardtype = cardtype;
532 link->dev_node = &lp->node;
533 SET_NETDEV_DEV(dev, &link->dev); 524 SET_NETDEV_DEV(dev, &link->dev);
534 525
535 if (register_netdev(dev) != 0) { 526 if (register_netdev(dev) != 0) {
536 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n"); 527 printk(KERN_NOTICE "fmvj18x_cs: register_netdev() failed\n");
537 link->dev_node = NULL;
538 goto failed; 528 goto failed;
539 } 529 }
540 530
541 strcpy(lp->node.dev_name, dev->name);
542
543 /* print current configuration */ 531 /* print current configuration */
544 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, " 532 printk(KERN_INFO "%s: %s, sram %s, port %#3lx, irq %d, "
545 "hw_addr %pM\n", 533 "hw_addr %pM\n",
@@ -890,7 +878,6 @@ static netdev_tx_t fjn_start_xmit(struct sk_buff *skb,
890 lp->sent = lp->tx_queue ; 878 lp->sent = lp->tx_queue ;
891 lp->tx_queue = 0; 879 lp->tx_queue = 0;
892 lp->tx_queue_len = 0; 880 lp->tx_queue_len = 0;
893 dev->trans_start = jiffies;
894 lp->tx_started = 1; 881 lp->tx_started = 1;
895 netif_start_queue(dev); 882 netif_start_queue(dev);
896 } else { 883 } else {
@@ -1082,8 +1069,6 @@ static void fjn_rx(struct net_device *dev)
1082 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i); 1069 "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
1083 } 1070 }
1084*/ 1071*/
1085
1086 return;
1087} /* fjn_rx */ 1072} /* fjn_rx */
1088 1073
1089/*====================================================================*/ 1074/*====================================================================*/
@@ -1196,11 +1181,11 @@ static void set_rx_mode(struct net_device *dev)
1196 memset(mc_filter, 0x00, sizeof(mc_filter)); 1181 memset(mc_filter, 0x00, sizeof(mc_filter));
1197 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 1182 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
1198 } else { 1183 } else {
1199 struct dev_mc_list *mclist; 1184 struct netdev_hw_addr *ha;
1200 1185
1201 memset(mc_filter, 0, sizeof(mc_filter)); 1186 memset(mc_filter, 0, sizeof(mc_filter));
1202 netdev_for_each_mc_addr(mclist, dev) { 1187 netdev_for_each_mc_addr(ha, dev) {
1203 unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; 1188 unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
1204 mc_filter[bit >> 3] |= (1 << (bit & 7)); 1189 mc_filter[bit >> 3] |= (1 << (bit & 7));
1205 } 1190 }
1206 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 1191 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index 37f4a6fdc3ef..67ee9851a8ed 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -104,7 +104,6 @@ static void ibmtr_detach(struct pcmcia_device *p_dev);
104typedef struct ibmtr_dev_t { 104typedef struct ibmtr_dev_t {
105 struct pcmcia_device *p_dev; 105 struct pcmcia_device *p_dev;
106 struct net_device *dev; 106 struct net_device *dev;
107 dev_node_t node;
108 window_handle_t sram_win_handle; 107 window_handle_t sram_win_handle;
109 struct tok_info *ti; 108 struct tok_info *ti;
110} ibmtr_dev_t; 109} ibmtr_dev_t;
@@ -156,8 +155,6 @@ static int __devinit ibmtr_attach(struct pcmcia_device *link)
156 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 155 link->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
157 link->io.NumPorts1 = 4; 156 link->io.NumPorts1 = 4;
158 link->io.IOAddrLines = 16; 157 link->io.IOAddrLines = 16;
159 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
160 link->irq.Handler = ibmtr_interrupt;
161 link->conf.Attributes = CONF_ENABLE_IRQ; 158 link->conf.Attributes = CONF_ENABLE_IRQ;
162 link->conf.IntType = INT_MEMORY_AND_IO; 159 link->conf.IntType = INT_MEMORY_AND_IO;
163 link->conf.Present = PRESENT_OPTION; 160 link->conf.Present = PRESENT_OPTION;
@@ -192,8 +189,7 @@ static void ibmtr_detach(struct pcmcia_device *link)
192 */ 189 */
193 ti->sram_phys |= 1; 190 ti->sram_phys |= 1;
194 191
195 if (link->dev_node) 192 unregister_netdev(dev);
196 unregister_netdev(dev);
197 193
198 del_timer_sync(&(ti->tr_timer)); 194 del_timer_sync(&(ti->tr_timer));
199 195
@@ -238,11 +234,11 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
238 } 234 }
239 dev->base_addr = link->io.BasePort1; 235 dev->base_addr = link->io.BasePort1;
240 236
241 ret = pcmcia_request_irq(link, &link->irq); 237 ret = pcmcia_request_exclusive_irq(link, ibmtr_interrupt);
242 if (ret) 238 if (ret)
243 goto failed; 239 goto failed;
244 dev->irq = link->irq.AssignedIRQ; 240 dev->irq = link->irq;
245 ti->irq = link->irq.AssignedIRQ; 241 ti->irq = link->irq;
246 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq); 242 ti->global_int_enable=GLOBAL_INT_ENABLE+((dev->irq==9) ? 2 : dev->irq);
247 243
248 /* Allocate the MMIO memory window */ 244 /* Allocate the MMIO memory window */
@@ -291,18 +287,14 @@ static int __devinit ibmtr_config(struct pcmcia_device *link)
291 Adapters Technical Reference" SC30-3585 for this info. */ 287 Adapters Technical Reference" SC30-3585 for this info. */
292 ibmtr_hw_setup(dev, mmiobase); 288 ibmtr_hw_setup(dev, mmiobase);
293 289
294 link->dev_node = &info->node;
295 SET_NETDEV_DEV(dev, &link->dev); 290 SET_NETDEV_DEV(dev, &link->dev);
296 291
297 i = ibmtr_probe_card(dev); 292 i = ibmtr_probe_card(dev);
298 if (i != 0) { 293 if (i != 0) {
299 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n"); 294 printk(KERN_NOTICE "ibmtr_cs: register_netdev() failed\n");
300 link->dev_node = NULL;
301 goto failed; 295 goto failed;
302 } 296 }
303 297
304 strcpy(info->node.dev_name, dev->name);
305
306 printk(KERN_INFO 298 printk(KERN_INFO
307 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n", 299 "%s: port %#3lx, irq %d, mmio %#5lx, sram %#5lx, hwaddr=%pM\n",
308 dev->name, dev->base_addr, dev->irq, 300 dev->name, dev->base_addr, dev->irq,
@@ -402,8 +394,6 @@ static void ibmtr_hw_setup(struct net_device *dev, u_int mmiobase)
402 394
403 /* 0x40 will release the card for use */ 395 /* 0x40 will release the card for use */
404 outb(0x40, dev->base_addr); 396 outb(0x40, dev->base_addr);
405
406 return;
407} 397}
408 398
409static struct pcmcia_device_id ibmtr_ids[] = { 399static struct pcmcia_device_id ibmtr_ids[] = {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11a..9b63dec549cb 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -363,7 +363,6 @@ typedef struct _mace_statistics {
363 363
364typedef struct _mace_private { 364typedef struct _mace_private {
365 struct pcmcia_device *p_dev; 365 struct pcmcia_device *p_dev;
366 dev_node_t node;
367 struct net_device_stats linux_stats; /* Linux statistics counters */ 366 struct net_device_stats linux_stats; /* Linux statistics counters */
368 mace_statistics mace_stats; /* MACE chip statistics counters */ 367 mace_statistics mace_stats; /* MACE chip statistics counters */
369 368
@@ -463,8 +462,6 @@ static int nmclan_probe(struct pcmcia_device *link)
463 link->io.NumPorts1 = 32; 462 link->io.NumPorts1 = 32;
464 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 463 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
465 link->io.IOAddrLines = 5; 464 link->io.IOAddrLines = 5;
466 link->irq.Attributes = IRQ_TYPE_EXCLUSIVE;
467 link->irq.Handler = mace_interrupt;
468 link->conf.Attributes = CONF_ENABLE_IRQ; 465 link->conf.Attributes = CONF_ENABLE_IRQ;
469 link->conf.IntType = INT_MEMORY_AND_IO; 466 link->conf.IntType = INT_MEMORY_AND_IO;
470 link->conf.ConfigIndex = 1; 467 link->conf.ConfigIndex = 1;
@@ -493,8 +490,7 @@ static void nmclan_detach(struct pcmcia_device *link)
493 490
494 dev_dbg(&link->dev, "nmclan_detach\n"); 491 dev_dbg(&link->dev, "nmclan_detach\n");
495 492
496 if (link->dev_node) 493 unregister_netdev(dev);
497 unregister_netdev(dev);
498 494
499 nmclan_release(link); 495 nmclan_release(link);
500 496
@@ -652,14 +648,14 @@ static int nmclan_config(struct pcmcia_device *link)
652 ret = pcmcia_request_io(link, &link->io); 648 ret = pcmcia_request_io(link, &link->io);
653 if (ret) 649 if (ret)
654 goto failed; 650 goto failed;
655 ret = pcmcia_request_irq(link, &link->irq); 651 ret = pcmcia_request_exclusive_irq(link, mace_interrupt);
656 if (ret) 652 if (ret)
657 goto failed; 653 goto failed;
658 ret = pcmcia_request_configuration(link, &link->conf); 654 ret = pcmcia_request_configuration(link, &link->conf);
659 if (ret) 655 if (ret)
660 goto failed; 656 goto failed;
661 657
662 dev->irq = link->irq.AssignedIRQ; 658 dev->irq = link->irq;
663 dev->base_addr = link->io.BasePort1; 659 dev->base_addr = link->io.BasePort1;
664 660
665 ioaddr = dev->base_addr; 661 ioaddr = dev->base_addr;
@@ -698,18 +694,14 @@ static int nmclan_config(struct pcmcia_device *link)
698 else 694 else
699 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n"); 695 printk(KERN_NOTICE "nmclan_cs: invalid if_port requested\n");
700 696
701 link->dev_node = &lp->node;
702 SET_NETDEV_DEV(dev, &link->dev); 697 SET_NETDEV_DEV(dev, &link->dev);
703 698
704 i = register_netdev(dev); 699 i = register_netdev(dev);
705 if (i != 0) { 700 if (i != 0) {
706 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n"); 701 printk(KERN_NOTICE "nmclan_cs: register_netdev() failed\n");
707 link->dev_node = NULL;
708 goto failed; 702 goto failed;
709 } 703 }
710 704
711 strcpy(lp->node.dev_name, dev->name);
712
713 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port," 705 printk(KERN_INFO "%s: nmclan: port %#3lx, irq %d, %s port,"
714 " hw_addr %pM\n", 706 " hw_addr %pM\n",
715 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port], 707 dev->name, dev->base_addr, dev->irq, if_names[dev->if_port],
@@ -903,7 +895,7 @@ static void mace_tx_timeout(struct net_device *dev)
903#else /* #if RESET_ON_TIMEOUT */ 895#else /* #if RESET_ON_TIMEOUT */
904 printk("NOT resetting card\n"); 896 printk("NOT resetting card\n");
905#endif /* #if RESET_ON_TIMEOUT */ 897#endif /* #if RESET_ON_TIMEOUT */
906 dev->trans_start = jiffies; 898 dev->trans_start = jiffies; /* prevent tx timeout */
907 netif_wake_queue(dev); 899 netif_wake_queue(dev);
908} 900}
909 901
@@ -945,8 +937,6 @@ static netdev_tx_t mace_start_xmit(struct sk_buff *skb,
945 outb(skb->data[skb->len-1], ioaddr + AM2150_XMT); 937 outb(skb->data[skb->len-1], ioaddr + AM2150_XMT);
946 } 938 }
947 939
948 dev->trans_start = jiffies;
949
950#if MULTI_TX 940#if MULTI_TX
951 if (lp->tx_free_frames > 0) 941 if (lp->tx_free_frames > 0)
952 netif_start_queue(dev); 942 netif_start_queue(dev);
@@ -1315,8 +1305,6 @@ static void update_stats(unsigned int ioaddr, struct net_device *dev)
1315 lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo; 1305 lp->linux_stats.tx_fifo_errors = lp->mace_stats.uflo;
1316 lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr; 1306 lp->linux_stats.tx_heartbeat_errors = lp->mace_stats.cerr;
1317 /* lp->linux_stats.tx_window_errors; */ 1307 /* lp->linux_stats.tx_window_errors; */
1318
1319 return;
1320} /* update_stats */ 1308} /* update_stats */
1321 1309
1322/* ---------------------------------------------------------------------------- 1310/* ----------------------------------------------------------------------------
@@ -1475,7 +1463,7 @@ static void set_multicast_list(struct net_device *dev)
1475{ 1463{
1476 mace_private *lp = netdev_priv(dev); 1464 mace_private *lp = netdev_priv(dev);
1477 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ 1465 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
1478 struct dev_mc_list *dmi; 1466 struct netdev_hw_addr *ha;
1479 1467
1480#ifdef PCMCIA_DEBUG 1468#ifdef PCMCIA_DEBUG
1481 { 1469 {
@@ -1495,8 +1483,8 @@ static void set_multicast_list(struct net_device *dev)
1495 if (num_addrs > 0) { 1483 if (num_addrs > 0) {
1496 /* Calculate multicast logical address filter */ 1484 /* Calculate multicast logical address filter */
1497 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); 1485 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
1498 netdev_for_each_mc_addr(dmi, dev) { 1486 netdev_for_each_mc_addr(ha, dev) {
1499 memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN); 1487 memcpy(adr, ha->addr, ETHER_ADDR_LEN);
1500 BuildLAF(lp->multicast_ladrf, adr); 1488 BuildLAF(lp->multicast_ladrf, adr);
1501 } 1489 }
1502 } 1490 }
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 4c0368de1815..6f77a768ba88 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -208,7 +208,6 @@ static hw_info_t dl10022_info = { 0, 0, 0, 0, IS_DL10022|HAS_MII };
208 208
209typedef struct pcnet_dev_t { 209typedef struct pcnet_dev_t {
210 struct pcmcia_device *p_dev; 210 struct pcmcia_device *p_dev;
211 dev_node_t node;
212 u_int flags; 211 u_int flags;
213 void __iomem *base; 212 void __iomem *base;
214 struct timer_list watchdog; 213 struct timer_list watchdog;
@@ -264,7 +263,6 @@ static int pcnet_probe(struct pcmcia_device *link)
264 info->p_dev = link; 263 info->p_dev = link;
265 link->priv = dev; 264 link->priv = dev;
266 265
267 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
268 link->conf.Attributes = CONF_ENABLE_IRQ; 266 link->conf.Attributes = CONF_ENABLE_IRQ;
269 link->conf.IntType = INT_MEMORY_AND_IO; 267 link->conf.IntType = INT_MEMORY_AND_IO;
270 268
@@ -288,8 +286,7 @@ static void pcnet_detach(struct pcmcia_device *link)
288 286
289 dev_dbg(&link->dev, "pcnet_detach\n"); 287 dev_dbg(&link->dev, "pcnet_detach\n");
290 288
291 if (link->dev_node) 289 unregister_netdev(dev);
292 unregister_netdev(dev);
293 290
294 pcnet_release(link); 291 pcnet_release(link);
295 292
@@ -488,8 +485,6 @@ static int try_io_port(struct pcmcia_device *link)
488 if (link->io.NumPorts2 > 0) { 485 if (link->io.NumPorts2 > 0) {
489 /* for master/slave multifunction cards */ 486 /* for master/slave multifunction cards */
490 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 487 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
491 link->irq.Attributes =
492 IRQ_TYPE_DYNAMIC_SHARING;
493 } 488 }
494 } else { 489 } else {
495 /* This should be two 16-port windows */ 490 /* This should be two 16-port windows */
@@ -559,8 +554,7 @@ static int pcnet_config(struct pcmcia_device *link)
559 if (ret) 554 if (ret)
560 goto failed; 555 goto failed;
561 556
562 ret = pcmcia_request_irq(link, &link->irq); 557 if (!link->irq)
563 if (ret)
564 goto failed; 558 goto failed;
565 559
566 if (link->io.NumPorts2 == 8) { 560 if (link->io.NumPorts2 == 8) {
@@ -574,7 +568,7 @@ static int pcnet_config(struct pcmcia_device *link)
574 ret = pcmcia_request_configuration(link, &link->conf); 568 ret = pcmcia_request_configuration(link, &link->conf);
575 if (ret) 569 if (ret)
576 goto failed; 570 goto failed;
577 dev->irq = link->irq.AssignedIRQ; 571 dev->irq = link->irq;
578 dev->base_addr = link->io.BasePort1; 572 dev->base_addr = link->io.BasePort1;
579 if (info->flags & HAS_MISC_REG) { 573 if (info->flags & HAS_MISC_REG) {
580 if ((if_port == 1) || (if_port == 2)) 574 if ((if_port == 1) || (if_port == 2))
@@ -643,17 +637,13 @@ static int pcnet_config(struct pcmcia_device *link)
643 if (info->flags & (IS_DL10019|IS_DL10022)) 637 if (info->flags & (IS_DL10019|IS_DL10022))
644 mii_phy_probe(dev); 638 mii_phy_probe(dev);
645 639
646 link->dev_node = &info->node;
647 SET_NETDEV_DEV(dev, &link->dev); 640 SET_NETDEV_DEV(dev, &link->dev);
648 641
649 if (register_netdev(dev) != 0) { 642 if (register_netdev(dev) != 0) {
650 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n"); 643 printk(KERN_NOTICE "pcnet_cs: register_netdev() failed\n");
651 link->dev_node = NULL;
652 goto failed; 644 goto failed;
653 } 645 }
654 646
655 strcpy(info->node.dev_name, dev->name);
656
657 if (info->flags & (IS_DL10019|IS_DL10022)) { 647 if (info->flags & (IS_DL10019|IS_DL10022)) {
658 u_char id = inb(dev->base_addr + 0x1a); 648 u_char id = inb(dev->base_addr + 0x1a);
659 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ", 649 printk(KERN_INFO "%s: NE2000 (DL100%d rev %02x): ",
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index fd9d6e34fda4..7b6fe89f9db0 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -103,7 +103,6 @@ struct smc_private {
103 u_short manfid; 103 u_short manfid;
104 u_short cardid; 104 u_short cardid;
105 105
106 dev_node_t node;
107 struct sk_buff *saved_skb; 106 struct sk_buff *saved_skb;
108 int packets_waiting; 107 int packets_waiting;
109 void __iomem *base; 108 void __iomem *base;
@@ -323,14 +322,11 @@ static int smc91c92_probe(struct pcmcia_device *link)
323 return -ENOMEM; 322 return -ENOMEM;
324 smc = netdev_priv(dev); 323 smc = netdev_priv(dev);
325 smc->p_dev = link; 324 smc->p_dev = link;
326 link->priv = dev;
327 325
328 spin_lock_init(&smc->lock); 326 spin_lock_init(&smc->lock);
329 link->io.NumPorts1 = 16; 327 link->io.NumPorts1 = 16;
330 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; 328 link->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
331 link->io.IOAddrLines = 4; 329 link->io.IOAddrLines = 4;
332 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
333 link->irq.Handler = &smc_interrupt;
334 link->conf.Attributes = CONF_ENABLE_IRQ; 330 link->conf.Attributes = CONF_ENABLE_IRQ;
335 link->conf.IntType = INT_MEMORY_AND_IO; 331 link->conf.IntType = INT_MEMORY_AND_IO;
336 332
@@ -363,8 +359,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
363 359
364 dev_dbg(&link->dev, "smc91c92_detach\n"); 360 dev_dbg(&link->dev, "smc91c92_detach\n");
365 361
366 if (link->dev_node) 362 unregister_netdev(dev);
367 unregister_netdev(dev);
368 363
369 smc91c92_release(link); 364 smc91c92_release(link);
370 365
@@ -453,7 +448,6 @@ static int mhz_mfc_config(struct pcmcia_device *link)
453 448
454 link->conf.Attributes |= CONF_ENABLE_SPKR; 449 link->conf.Attributes |= CONF_ENABLE_SPKR;
455 link->conf.Status = CCSR_AUDIO_ENA; 450 link->conf.Status = CCSR_AUDIO_ENA;
456 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
457 link->io.IOAddrLines = 16; 451 link->io.IOAddrLines = 16;
458 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 452 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
459 link->io.NumPorts2 = 8; 453 link->io.NumPorts2 = 8;
@@ -652,7 +646,6 @@ static int osi_config(struct pcmcia_device *link)
652 646
653 link->conf.Attributes |= CONF_ENABLE_SPKR; 647 link->conf.Attributes |= CONF_ENABLE_SPKR;
654 link->conf.Status = CCSR_AUDIO_ENA; 648 link->conf.Status = CCSR_AUDIO_ENA;
655 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
656 link->io.NumPorts1 = 64; 649 link->io.NumPorts1 = 64;
657 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 650 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
658 link->io.NumPorts2 = 8; 651 link->io.NumPorts2 = 8;
@@ -877,7 +870,7 @@ static int smc91c92_config(struct pcmcia_device *link)
877 if (i) 870 if (i)
878 goto config_failed; 871 goto config_failed;
879 872
880 i = pcmcia_request_irq(link, &link->irq); 873 i = pcmcia_request_irq(link, smc_interrupt);
881 if (i) 874 if (i)
882 goto config_failed; 875 goto config_failed;
883 i = pcmcia_request_configuration(link, &link->conf); 876 i = pcmcia_request_configuration(link, &link->conf);
@@ -887,7 +880,7 @@ static int smc91c92_config(struct pcmcia_device *link)
887 if (smc->manfid == MANFID_MOTOROLA) 880 if (smc->manfid == MANFID_MOTOROLA)
888 mot_config(link); 881 mot_config(link);
889 882
890 dev->irq = link->irq.AssignedIRQ; 883 dev->irq = link->irq;
891 884
892 if ((if_port >= 0) && (if_port <= 2)) 885 if ((if_port >= 0) && (if_port <= 2))
893 dev->if_port = if_port; 886 dev->if_port = if_port;
@@ -960,17 +953,13 @@ static int smc91c92_config(struct pcmcia_device *link)
960 SMC_SELECT_BANK(0); 953 SMC_SELECT_BANK(0);
961 } 954 }
962 955
963 link->dev_node = &smc->node;
964 SET_NETDEV_DEV(dev, &link->dev); 956 SET_NETDEV_DEV(dev, &link->dev);
965 957
966 if (register_netdev(dev) != 0) { 958 if (register_netdev(dev) != 0) {
967 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n"); 959 printk(KERN_ERR "smc91c92_cs: register_netdev() failed\n");
968 link->dev_node = NULL;
969 goto config_undo; 960 goto config_undo;
970 } 961 }
971 962
972 strcpy(smc->node.dev_name, dev->name);
973
974 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, " 963 printk(KERN_INFO "%s: smc91c%s rev %d: io %#3lx, irq %d, "
975 "hw_addr %pM\n", 964 "hw_addr %pM\n",
976 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq, 965 dev->name, name, (rev & 0x0f), dev->base_addr, dev->irq,
@@ -1239,7 +1228,6 @@ static void smc_hardware_send_packet(struct net_device * dev)
1239 dev_kfree_skb_irq(skb); 1228 dev_kfree_skb_irq(skb);
1240 dev->trans_start = jiffies; 1229 dev->trans_start = jiffies;
1241 netif_start_queue(dev); 1230 netif_start_queue(dev);
1242 return;
1243} 1231}
1244 1232
1245/*====================================================================*/ 1233/*====================================================================*/
@@ -1254,7 +1242,7 @@ static void smc_tx_timeout(struct net_device *dev)
1254 dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2)); 1242 dev->name, inw(ioaddr)&0xff, inw(ioaddr + 2));
1255 dev->stats.tx_errors++; 1243 dev->stats.tx_errors++;
1256 smc_reset(dev); 1244 smc_reset(dev);
1257 dev->trans_start = jiffies; 1245 dev->trans_start = jiffies; /* prevent tx timeout */
1258 smc->saved_skb = NULL; 1246 smc->saved_skb = NULL;
1259 netif_wake_queue(dev); 1247 netif_wake_queue(dev);
1260} 1248}
@@ -1369,7 +1357,6 @@ static void smc_tx_err(struct net_device * dev)
1369 smc->packets_waiting--; 1357 smc->packets_waiting--;
1370 1358
1371 outw(saved_packet, ioaddr + PNR_ARR); 1359 outw(saved_packet, ioaddr + PNR_ARR);
1372 return;
1373} 1360}
1374 1361
1375/*====================================================================*/ 1362/*====================================================================*/
@@ -1589,8 +1576,6 @@ static void smc_rx(struct net_device *dev)
1589 } 1576 }
1590 /* Let the MMU free the memory of this packet. */ 1577 /* Let the MMU free the memory of this packet. */
1591 outw(MC_RELEASE, ioaddr + MMU_CMD); 1578 outw(MC_RELEASE, ioaddr + MMU_CMD);
1592
1593 return;
1594} 1579}
1595 1580
1596/*====================================================================== 1581/*======================================================================
@@ -1621,10 +1606,10 @@ static void set_rx_mode(struct net_device *dev)
1621 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; 1606 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
1622 else { 1607 else {
1623 if (!netdev_mc_empty(dev)) { 1608 if (!netdev_mc_empty(dev)) {
1624 struct dev_mc_list *mc_addr; 1609 struct netdev_hw_addr *ha;
1625 1610
1626 netdev_for_each_mc_addr(mc_addr, dev) { 1611 netdev_for_each_mc_addr(ha, dev) {
1627 u_int position = ether_crc(6, mc_addr->dmi_addr); 1612 u_int position = ether_crc(6, ha->addr);
1628 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); 1613 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
1629 } 1614 }
1630 } 1615 }
@@ -1640,8 +1625,6 @@ static void set_rx_mode(struct net_device *dev)
1640 outw(rx_cfg_setting, ioaddr + RCR); 1625 outw(rx_cfg_setting, ioaddr + RCR);
1641 SMC_SELECT_BANK(2); 1626 SMC_SELECT_BANK(2);
1642 spin_unlock_irqrestore(&smc->lock, flags); 1627 spin_unlock_irqrestore(&smc->lock, flags);
1643
1644 return;
1645} 1628}
1646 1629
1647/*====================================================================== 1630/*======================================================================
@@ -1804,23 +1787,30 @@ static void media_check(u_long arg)
1804 SMC_SELECT_BANK(1); 1787 SMC_SELECT_BANK(1);
1805 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1; 1788 media |= (inw(ioaddr + CONFIG) & CFG_AUI_SELECT) ? 2 : 1;
1806 1789
1790 SMC_SELECT_BANK(saved_bank);
1791 spin_unlock_irqrestore(&smc->lock, flags);
1792
1807 /* Check for pending interrupt with watchdog flag set: with 1793 /* Check for pending interrupt with watchdog flag set: with
1808 this, we can limp along even if the interrupt is blocked */ 1794 this, we can limp along even if the interrupt is blocked */
1809 if (smc->watchdog++ && ((i>>8) & i)) { 1795 if (smc->watchdog++ && ((i>>8) & i)) {
1810 if (!smc->fast_poll) 1796 if (!smc->fast_poll)
1811 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name); 1797 printk(KERN_INFO "%s: interrupt(s) dropped!\n", dev->name);
1798 local_irq_save(flags);
1812 smc_interrupt(dev->irq, dev); 1799 smc_interrupt(dev->irq, dev);
1800 local_irq_restore(flags);
1813 smc->fast_poll = HZ; 1801 smc->fast_poll = HZ;
1814 } 1802 }
1815 if (smc->fast_poll) { 1803 if (smc->fast_poll) {
1816 smc->fast_poll--; 1804 smc->fast_poll--;
1817 smc->media.expires = jiffies + HZ/100; 1805 smc->media.expires = jiffies + HZ/100;
1818 add_timer(&smc->media); 1806 add_timer(&smc->media);
1819 SMC_SELECT_BANK(saved_bank);
1820 spin_unlock_irqrestore(&smc->lock, flags);
1821 return; 1807 return;
1822 } 1808 }
1823 1809
1810 spin_lock_irqsave(&smc->lock, flags);
1811
1812 saved_bank = inw(ioaddr + BANK_SELECT);
1813
1824 if (smc->cfg & CFG_MII_SELECT) { 1814 if (smc->cfg & CFG_MII_SELECT) {
1825 if (smc->mii_if.phy_id < 0) 1815 if (smc->mii_if.phy_id < 0)
1826 goto reschedule; 1816 goto reschedule;
@@ -1978,15 +1968,16 @@ static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1978 unsigned int ioaddr = dev->base_addr; 1968 unsigned int ioaddr = dev->base_addr;
1979 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1969 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1980 int ret; 1970 int ret;
1971 unsigned long flags;
1981 1972
1982 spin_lock_irq(&smc->lock); 1973 spin_lock_irqsave(&smc->lock, flags);
1983 SMC_SELECT_BANK(3); 1974 SMC_SELECT_BANK(3);
1984 if (smc->cfg & CFG_MII_SELECT) 1975 if (smc->cfg & CFG_MII_SELECT)
1985 ret = mii_ethtool_gset(&smc->mii_if, ecmd); 1976 ret = mii_ethtool_gset(&smc->mii_if, ecmd);
1986 else 1977 else
1987 ret = smc_netdev_get_ecmd(dev, ecmd); 1978 ret = smc_netdev_get_ecmd(dev, ecmd);
1988 SMC_SELECT_BANK(saved_bank); 1979 SMC_SELECT_BANK(saved_bank);
1989 spin_unlock_irq(&smc->lock); 1980 spin_unlock_irqrestore(&smc->lock, flags);
1990 return ret; 1981 return ret;
1991} 1982}
1992 1983
@@ -1996,15 +1987,16 @@ static int smc_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1996 unsigned int ioaddr = dev->base_addr; 1987 unsigned int ioaddr = dev->base_addr;
1997 u16 saved_bank = inw(ioaddr + BANK_SELECT); 1988 u16 saved_bank = inw(ioaddr + BANK_SELECT);
1998 int ret; 1989 int ret;
1990 unsigned long flags;
1999 1991
2000 spin_lock_irq(&smc->lock); 1992 spin_lock_irqsave(&smc->lock, flags);
2001 SMC_SELECT_BANK(3); 1993 SMC_SELECT_BANK(3);
2002 if (smc->cfg & CFG_MII_SELECT) 1994 if (smc->cfg & CFG_MII_SELECT)
2003 ret = mii_ethtool_sset(&smc->mii_if, ecmd); 1995 ret = mii_ethtool_sset(&smc->mii_if, ecmd);
2004 else 1996 else
2005 ret = smc_netdev_set_ecmd(dev, ecmd); 1997 ret = smc_netdev_set_ecmd(dev, ecmd);
2006 SMC_SELECT_BANK(saved_bank); 1998 SMC_SELECT_BANK(saved_bank);
2007 spin_unlock_irq(&smc->lock); 1999 spin_unlock_irqrestore(&smc->lock, flags);
2008 return ret; 2000 return ret;
2009} 2001}
2010 2002
@@ -2014,12 +2006,13 @@ static u32 smc_get_link(struct net_device *dev)
2014 unsigned int ioaddr = dev->base_addr; 2006 unsigned int ioaddr = dev->base_addr;
2015 u16 saved_bank = inw(ioaddr + BANK_SELECT); 2007 u16 saved_bank = inw(ioaddr + BANK_SELECT);
2016 u32 ret; 2008 u32 ret;
2009 unsigned long flags;
2017 2010
2018 spin_lock_irq(&smc->lock); 2011 spin_lock_irqsave(&smc->lock, flags);
2019 SMC_SELECT_BANK(3); 2012 SMC_SELECT_BANK(3);
2020 ret = smc_link_ok(dev); 2013 ret = smc_link_ok(dev);
2021 SMC_SELECT_BANK(saved_bank); 2014 SMC_SELECT_BANK(saved_bank);
2022 spin_unlock_irq(&smc->lock); 2015 spin_unlock_irqrestore(&smc->lock, flags);
2023 return ret; 2016 return ret;
2024} 2017}
2025 2018
@@ -2056,16 +2049,17 @@ static int smc_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
2056 int rc = 0; 2049 int rc = 0;
2057 u16 saved_bank; 2050 u16 saved_bank;
2058 unsigned int ioaddr = dev->base_addr; 2051 unsigned int ioaddr = dev->base_addr;
2052 unsigned long flags;
2059 2053
2060 if (!netif_running(dev)) 2054 if (!netif_running(dev))
2061 return -EINVAL; 2055 return -EINVAL;
2062 2056
2063 spin_lock_irq(&smc->lock); 2057 spin_lock_irqsave(&smc->lock, flags);
2064 saved_bank = inw(ioaddr + BANK_SELECT); 2058 saved_bank = inw(ioaddr + BANK_SELECT);
2065 SMC_SELECT_BANK(3); 2059 SMC_SELECT_BANK(3);
2066 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL); 2060 rc = generic_mii_ioctl(&smc->mii_if, mii, cmd, NULL);
2067 SMC_SELECT_BANK(saved_bank); 2061 SMC_SELECT_BANK(saved_bank);
2068 spin_unlock_irq(&smc->lock); 2062 spin_unlock_irqrestore(&smc->lock, flags);
2069 return rc; 2063 return rc;
2070} 2064}
2071 2065
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457be..b6c3644888cd 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -297,31 +297,9 @@ static void xirc2ps_detach(struct pcmcia_device *p_dev);
297 297
298static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id); 298static irqreturn_t xirc2ps_interrupt(int irq, void *dev_id);
299 299
300/****************
301 * A linked list of "instances" of the device. Each actual
302 * PCMCIA card corresponds to one device instance, and is described
303 * by one struct pcmcia_device structure (defined in ds.h).
304 *
305 * You may not want to use a linked list for this -- for example, the
306 * memory card driver uses an array of struct pcmcia_device pointers, where minor
307 * device numbers are used to derive the corresponding array index.
308 */
309
310/****************
311 * A driver needs to provide a dev_node_t structure for each device
312 * on a card. In some cases, there is only one device per card (for
313 * example, ethernet cards, modems). In other cases, there may be
314 * many actual or logical devices (SCSI adapters, memory cards with
315 * multiple partitions). The dev_node_t structures need to be kept
316 * in a linked list starting at the 'dev' field of a struct pcmcia_device
317 * structure. We allocate them in the card's private data structure,
318 * because they generally can't be allocated dynamically.
319 */
320
321typedef struct local_info_t { 300typedef struct local_info_t {
322 struct net_device *dev; 301 struct net_device *dev;
323 struct pcmcia_device *p_dev; 302 struct pcmcia_device *p_dev;
324 dev_node_t node;
325 303
326 int card_type; 304 int card_type;
327 int probe_port; 305 int probe_port;
@@ -555,7 +533,6 @@ xirc2ps_probe(struct pcmcia_device *link)
555 link->conf.Attributes = CONF_ENABLE_IRQ; 533 link->conf.Attributes = CONF_ENABLE_IRQ;
556 link->conf.IntType = INT_MEMORY_AND_IO; 534 link->conf.IntType = INT_MEMORY_AND_IO;
557 link->conf.ConfigIndex = 1; 535 link->conf.ConfigIndex = 1;
558 link->irq.Handler = xirc2ps_interrupt;
559 536
560 /* Fill in card specific entries */ 537 /* Fill in card specific entries */
561 dev->netdev_ops = &netdev_ops; 538 dev->netdev_ops = &netdev_ops;
@@ -580,8 +557,7 @@ xirc2ps_detach(struct pcmcia_device *link)
580 557
581 dev_dbg(&link->dev, "detach\n"); 558 dev_dbg(&link->dev, "detach\n");
582 559
583 if (link->dev_node) 560 unregister_netdev(dev);
584 unregister_netdev(dev);
585 561
586 xirc2ps_release(link); 562 xirc2ps_release(link);
587 563
@@ -841,7 +817,6 @@ xirc2ps_config(struct pcmcia_device * link)
841 link->conf.Attributes |= CONF_ENABLE_SPKR; 817 link->conf.Attributes |= CONF_ENABLE_SPKR;
842 link->conf.Status |= CCSR_AUDIO_ENA; 818 link->conf.Status |= CCSR_AUDIO_ENA;
843 } 819 }
844 link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
845 link->io.NumPorts2 = 8; 820 link->io.NumPorts2 = 8;
846 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8; 821 link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
847 if (local->dingo) { 822 if (local->dingo) {
@@ -866,7 +841,6 @@ xirc2ps_config(struct pcmcia_device * link)
866 } 841 }
867 printk(KNOT_XIRC "no ports available\n"); 842 printk(KNOT_XIRC "no ports available\n");
868 } else { 843 } else {
869 link->irq.Attributes |= IRQ_TYPE_DYNAMIC_SHARING;
870 link->io.NumPorts1 = 16; 844 link->io.NumPorts1 = 16;
871 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) { 845 for (ioaddr = 0x300; ioaddr < 0x400; ioaddr += 0x10) {
872 link->io.BasePort1 = ioaddr; 846 link->io.BasePort1 = ioaddr;
@@ -885,7 +859,7 @@ xirc2ps_config(struct pcmcia_device * link)
885 * Now allocate an interrupt line. Note that this does not 859 * Now allocate an interrupt line. Note that this does not
886 * actually assign a handler to the interrupt. 860 * actually assign a handler to the interrupt.
887 */ 861 */
888 if ((err=pcmcia_request_irq(link, &link->irq))) 862 if ((err=pcmcia_request_irq(link, xirc2ps_interrupt)))
889 goto config_error; 863 goto config_error;
890 864
891 /**************** 865 /****************
@@ -982,23 +956,19 @@ xirc2ps_config(struct pcmcia_device * link)
982 printk(KNOT_XIRC "invalid if_port requested\n"); 956 printk(KNOT_XIRC "invalid if_port requested\n");
983 957
984 /* we can now register the device with the net subsystem */ 958 /* we can now register the device with the net subsystem */
985 dev->irq = link->irq.AssignedIRQ; 959 dev->irq = link->irq;
986 dev->base_addr = link->io.BasePort1; 960 dev->base_addr = link->io.BasePort1;
987 961
988 if (local->dingo) 962 if (local->dingo)
989 do_reset(dev, 1); /* a kludge to make the cem56 work */ 963 do_reset(dev, 1); /* a kludge to make the cem56 work */
990 964
991 link->dev_node = &local->node;
992 SET_NETDEV_DEV(dev, &link->dev); 965 SET_NETDEV_DEV(dev, &link->dev);
993 966
994 if ((err=register_netdev(dev))) { 967 if ((err=register_netdev(dev))) {
995 printk(KNOT_XIRC "register_netdev() failed\n"); 968 printk(KNOT_XIRC "register_netdev() failed\n");
996 link->dev_node = NULL;
997 goto config_error; 969 goto config_error;
998 } 970 }
999 971
1000 strcpy(local->node.dev_name, dev->name);
1001
1002 /* give some infos about the hardware */ 972 /* give some infos about the hardware */
1003 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n", 973 printk(KERN_INFO "%s: %s: port %#3lx, irq %d, hwaddr %pM\n",
1004 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq, 974 dev->name, local->manf_str,(u_long)dev->base_addr, (int)dev->irq,
@@ -1295,7 +1265,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
1295 struct net_device *dev = local->dev; 1265 struct net_device *dev = local->dev;
1296 /* reset the card */ 1266 /* reset the card */
1297 do_reset(dev,1); 1267 do_reset(dev,1);
1298 dev->trans_start = jiffies; 1268 dev->trans_start = jiffies; /* prevent tx timeout */
1299 netif_wake_queue(dev); 1269 netif_wake_queue(dev);
1300} 1270}
1301 1271
@@ -1358,7 +1328,6 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
1358 PutByte(XIRCREG_CR, TransmitPacket|EnableIntr); 1328 PutByte(XIRCREG_CR, TransmitPacket|EnableIntr);
1359 1329
1360 dev_kfree_skb (skb); 1330 dev_kfree_skb (skb);
1361 dev->trans_start = jiffies;
1362 dev->stats.tx_bytes += pktlen; 1331 dev->stats.tx_bytes += pktlen;
1363 netif_start_queue(dev); 1332 netif_start_queue(dev);
1364 return NETDEV_TX_OK; 1333 return NETDEV_TX_OK;
@@ -1398,7 +1367,7 @@ static void set_addresses(struct net_device *dev)
1398{ 1367{
1399 unsigned int ioaddr = dev->base_addr; 1368 unsigned int ioaddr = dev->base_addr;
1400 local_info_t *lp = netdev_priv(dev); 1369 local_info_t *lp = netdev_priv(dev);
1401 struct dev_mc_list *dmi; 1370 struct netdev_hw_addr *ha;
1402 struct set_address_info sa_info; 1371 struct set_address_info sa_info;
1403 int i; 1372 int i;
1404 1373
@@ -1413,10 +1382,10 @@ static void set_addresses(struct net_device *dev)
1413 1382
1414 set_address(&sa_info, dev->dev_addr); 1383 set_address(&sa_info, dev->dev_addr);
1415 i = 0; 1384 i = 0;
1416 netdev_for_each_mc_addr(dmi, dev) { 1385 netdev_for_each_mc_addr(ha, dev) {
1417 if (i++ == 9) 1386 if (i++ == 9)
1418 break; 1387 break;
1419 set_address(&sa_info, dmi->dmi_addr); 1388 set_address(&sa_info, ha->addr);
1420 } 1389 }
1421 while (i++ < 9) 1390 while (i++ < 9)
1422 set_address(&sa_info, dev->dev_addr); 1391 set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78dd1637..c200c2821730 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
448{ 448{
449 struct pcnet32_private *lp = netdev_priv(dev); 449 struct pcnet32_private *lp = netdev_priv(dev);
450 450
451 dev->trans_start = jiffies; 451 dev->trans_start = jiffies; /* prevent tx timeout */
452 napi_disable(&lp->napi); 452 napi_disable(&lp->napi);
453 netif_tx_disable(dev); 453 netif_tx_disable(dev);
454} 454}
@@ -647,7 +647,6 @@ free_new_rx_ring:
647 (1 << size), 647 (1 << size),
648 new_rx_ring, 648 new_rx_ring,
649 new_ring_dma_addr); 649 new_ring_dma_addr);
650 return;
651} 650}
652 651
653static void pcnet32_purge_rx_ring(struct net_device *dev) 652static void pcnet32_purge_rx_ring(struct net_device *dev)
@@ -1215,7 +1214,6 @@ static void pcnet32_rx_entry(struct net_device *dev,
1215 skb->protocol = eth_type_trans(skb, dev); 1214 skb->protocol = eth_type_trans(skb, dev);
1216 netif_receive_skb(skb); 1215 netif_receive_skb(skb);
1217 dev->stats.rx_packets++; 1216 dev->stats.rx_packets++;
1218 return;
1219} 1217}
1220 1218
1221static int pcnet32_rx(struct net_device *dev, int budget) 1219static int pcnet32_rx(struct net_device *dev, int budget)
@@ -2398,7 +2396,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
2398 } 2396 }
2399 pcnet32_restart(dev, CSR0_NORMAL); 2397 pcnet32_restart(dev, CSR0_NORMAL);
2400 2398
2401 dev->trans_start = jiffies; 2399 dev->trans_start = jiffies; /* prevent tx timeout */
2402 netif_wake_queue(dev); 2400 netif_wake_queue(dev);
2403 2401
2404 spin_unlock_irqrestore(&lp->lock, flags); 2402 spin_unlock_irqrestore(&lp->lock, flags);
@@ -2449,8 +2447,6 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
2449 /* Trigger an immediate send poll. */ 2447 /* Trigger an immediate send poll. */
2450 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL); 2448 lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
2451 2449
2452 dev->trans_start = jiffies;
2453
2454 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) { 2450 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
2455 lp->tx_full = 1; 2451 lp->tx_full = 1;
2456 netif_stop_queue(dev); 2452 netif_stop_queue(dev);
@@ -2590,7 +2586,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
2590 struct pcnet32_private *lp = netdev_priv(dev); 2586 struct pcnet32_private *lp = netdev_priv(dev);
2591 volatile struct pcnet32_init_block *ib = lp->init_block; 2587 volatile struct pcnet32_init_block *ib = lp->init_block;
2592 volatile __le16 *mcast_table = (__le16 *)ib->filter; 2588 volatile __le16 *mcast_table = (__le16 *)ib->filter;
2593 struct dev_mc_list *dmi; 2589 struct netdev_hw_addr *ha;
2594 unsigned long ioaddr = dev->base_addr; 2590 unsigned long ioaddr = dev->base_addr;
2595 char *addrs; 2591 char *addrs;
2596 int i; 2592 int i;
@@ -2611,8 +2607,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
2611 ib->filter[1] = 0; 2607 ib->filter[1] = 0;
2612 2608
2613 /* Add addresses */ 2609 /* Add addresses */
2614 netdev_for_each_mc_addr(dmi, dev) { 2610 netdev_for_each_mc_addr(ha, dev) {
2615 addrs = dmi->dmi_addr; 2611 addrs = ha->addr;
2616 2612
2617 /* multicast address? */ 2613 /* multicast address? */
2618 if (!(*addrs & 1)) 2614 if (!(*addrs & 1))
@@ -2625,7 +2621,6 @@ static void pcnet32_load_multicast(struct net_device *dev)
2625 for (i = 0; i < 4; i++) 2621 for (i = 0; i < 4; i++)
2626 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i, 2622 lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
2627 le16_to_cpu(mcast_table[i])); 2623 le16_to_cpu(mcast_table[i]));
2628 return;
2629} 2624}
2630 2625
2631/* 2626/*
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fc5938ba3d78..a527e37728cd 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -88,6 +88,11 @@ config LSI_ET1011C_PHY
88 ---help--- 88 ---help---
89 Supports the LSI ET1011C PHY. 89 Supports the LSI ET1011C PHY.
90 90
91config MICREL_PHY
92 tristate "Driver for Micrel PHYs"
93 ---help---
94 Supports the KSZ9021, VSC8201, KS8001 PHYs.
95
91config FIXED_PHY 96config FIXED_PHY
92 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" 97 bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
93 depends on PHYLIB=y 98 depends on PHYLIB=y
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 1342585af381..13bebab65d02 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -20,4 +20,5 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o 20obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
21obj-$(CONFIG_NATIONAL_PHY) += national.o 21obj-$(CONFIG_NATIONAL_PHY) += national.o
22obj-$(CONFIG_STE10XP) += ste10Xp.o 22obj-$(CONFIG_STE10XP) += ste10Xp.o
23obj-$(CONFIG_MICREL_PHY) += micrel.o
23obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o 24obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e8350e..c12815679837 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
130 130
131module_init(bcm63xx_phy_init); 131module_init(bcm63xx_phy_init);
132module_exit(bcm63xx_phy_exit); 132module_exit(bcm63xx_phy_exit);
133
134static struct mdio_device_id bcm63xx_tbl[] = {
135 { 0x00406000, 0xfffffc00 },
136 { 0x002bdc00, 0xfffffc00 },
137 { }
138};
139
140MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4f8cf1..cecdbbd549ec 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
908 908
909module_init(broadcom_init); 909module_init(broadcom_init);
910module_exit(broadcom_exit); 910module_exit(broadcom_exit);
911
912static struct mdio_device_id broadcom_tbl[] = {
913 { 0x00206070, 0xfffffff0 },
914 { 0x002060e0, 0xfffffff0 },
915 { 0x002060c0, 0xfffffff0 },
916 { 0x002060b0, 0xfffffff0 },
917 { 0x0143bca0, 0xfffffff0 },
918 { 0x0143bcb0, 0xfffffff0 },
919 { PHY_ID_BCM50610, 0xfffffff0 },
920 { PHY_ID_BCM50610M, 0xfffffff0 },
921 { PHY_ID_BCM57780, 0xfffffff0 },
922 { PHY_ID_BCMAC131, 0xfffffff0 },
923 { }
924};
925
926MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b31d94b..1a325d63756b 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
158 158
159module_init(cicada_init); 159module_init(cicada_init);
160module_exit(cicada_exit); 160module_exit(cicada_exit);
161
162static struct mdio_device_id cicada_tbl[] = {
163 { 0x000fc410, 0x000ffff0 },
164 { 0x000fc440, 0x000fffc0 },
165 { }
166};
167
168MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95853ff..29c17617a2ec 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
218 218
219module_init(davicom_init); 219module_init(davicom_init);
220module_exit(davicom_exit); 220module_exit(davicom_exit);
221
222static struct mdio_device_id davicom_tbl[] = {
223 { 0x0181b880, 0x0ffffff0 },
224 { 0x0181b8a0, 0x0ffffff0 },
225 { 0x00181b80, 0x0ffffff0 },
226 { }
227};
228
229MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebeba9bf..13995f52d6af 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
110 110
111module_init(et1011c_init); 111module_init(et1011c_init);
112module_exit(et1011c_exit); 112module_exit(et1011c_exit);
113
114static struct mdio_device_id et1011c_tbl[] = {
115 { 0x0282f014, 0xfffffff0 },
116 { }
117};
118
119MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b95d4b..439adafeacb1 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
131 131
132module_init(ip175c_init); 132module_init(ip175c_init);
133module_exit(ip175c_exit); 133module_exit(ip175c_exit);
134
135static struct mdio_device_id icplus_tbl[] = {
136 { 0x02430d80, 0x0ffffff0 },
137 { }
138};
139
140MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaacde6b..8ee929b796d8 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
173 173
174module_init(lxt_init); 174module_init(lxt_init);
175module_exit(lxt_exit); 175module_exit(lxt_exit);
176
177static struct mdio_device_id lxt_tbl[] = {
178 { 0x78100000, 0xfffffff0 },
179 { 0x001378e0, 0xfffffff0 },
180 { }
181};
182
183MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe0a8e7..78b74e83ce5d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
648 648
649module_init(marvell_init); 649module_init(marvell_init);
650module_exit(marvell_exit); 650module_exit(marvell_exit);
651
652static struct mdio_device_id marvell_tbl[] = {
653 { 0x01410c60, 0xfffffff0 },
654 { 0x01410c90, 0xfffffff0 },
655 { 0x01410cc0, 0xfffffff0 },
656 { 0x01410e10, 0xfffffff0 },
657 { 0x01410cb0, 0xfffffff0 },
658 { 0x01410cd0, 0xfffffff0 },
659 { 0x01410e30, 0xfffffff0 },
660 { }
661};
662
663MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7e27ab..65391891d8c4 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24 24
25#define MDIO_READ 1 25#define MDIO_READ 2
26#define MDIO_WRITE 0 26#define MDIO_WRITE 1
27
28#define MDIO_C45 (1<<15)
29#define MDIO_C45_ADDR (MDIO_C45 | 0)
30#define MDIO_C45_READ (MDIO_C45 | 3)
31#define MDIO_C45_WRITE (MDIO_C45 | 1)
27 32
28#define MDIO_SETUP_TIME 10 33#define MDIO_SETUP_TIME 10
29#define MDIO_HOLD_TIME 10 34#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
89/* Utility to send the preamble, address, and 94/* Utility to send the preamble, address, and
90 * register (common to read and write). 95 * register (common to read and write).
91 */ 96 */
92static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) 97static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
93{ 98{
94 const struct mdiobb_ops *ops = ctrl->ops; 99 const struct mdiobb_ops *ops = ctrl->ops;
95 int i; 100 int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
108 for (i = 0; i < 32; i++) 113 for (i = 0; i < 32; i++)
109 mdiobb_send_bit(ctrl, 1); 114 mdiobb_send_bit(ctrl, 1);
110 115
111 /* send the start bit (01) and the read opcode (10) or write (10) */ 116 /* send the start bit (01) and the read opcode (10) or write (10).
117 Clause 45 operation uses 00 for the start and 11, 10 for
118 read/write */
112 mdiobb_send_bit(ctrl, 0); 119 mdiobb_send_bit(ctrl, 0);
113 mdiobb_send_bit(ctrl, 1); 120 if (op & MDIO_C45)
114 mdiobb_send_bit(ctrl, read); 121 mdiobb_send_bit(ctrl, 0);
115 mdiobb_send_bit(ctrl, !read); 122 else
123 mdiobb_send_bit(ctrl, 1);
124 mdiobb_send_bit(ctrl, (op >> 1) & 1);
125 mdiobb_send_bit(ctrl, (op >> 0) & 1);
116 126
117 mdiobb_send_num(ctrl, phy, 5); 127 mdiobb_send_num(ctrl, phy, 5);
118 mdiobb_send_num(ctrl, reg, 5); 128 mdiobb_send_num(ctrl, reg, 5);
119} 129}
120 130
131/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
132 lower 16 bits of the 21 bit address. This transfer is done identically to a
133 MDIO_WRITE except for a different code. To enable clause 45 mode or
134 MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
135 can exist on the same bus. Normal devices should ignore the MDIO_ADDR
136 phase. */
137static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
138{
139 unsigned int dev_addr = (addr >> 16) & 0x1F;
140 unsigned int reg = addr & 0xFFFF;
141 mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
142
143 /* send the turnaround (10) */
144 mdiobb_send_bit(ctrl, 1);
145 mdiobb_send_bit(ctrl, 0);
146
147 mdiobb_send_num(ctrl, reg, 16);
148
149 ctrl->ops->set_mdio_dir(ctrl, 0);
150 mdiobb_get_bit(ctrl);
151
152 return dev_addr;
153}
121 154
122static int mdiobb_read(struct mii_bus *bus, int phy, int reg) 155static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
123{ 156{
124 struct mdiobb_ctrl *ctrl = bus->priv; 157 struct mdiobb_ctrl *ctrl = bus->priv;
125 int ret, i; 158 int ret, i;
126 159
127 mdiobb_cmd(ctrl, MDIO_READ, phy, reg); 160 if (reg & MII_ADDR_C45) {
161 reg = mdiobb_cmd_addr(ctrl, phy, reg);
162 mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
163 } else
164 mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
165
128 ctrl->ops->set_mdio_dir(ctrl, 0); 166 ctrl->ops->set_mdio_dir(ctrl, 0);
129 167
130 /* check the turnaround bit: the PHY should be driving it to zero */ 168 /* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
147{ 185{
148 struct mdiobb_ctrl *ctrl = bus->priv; 186 struct mdiobb_ctrl *ctrl = bus->priv;
149 187
150 mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); 188 if (reg & MII_ADDR_C45) {
189 reg = mdiobb_cmd_addr(ctrl, phy, reg);
190 mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
191 } else
192 mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
151 193
152 /* send the turnaround (10) */ 194 /* send the turnaround (10) */
153 mdiobb_send_bit(ctrl, 1); 195 mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index a872aea4ed74..f443d43edd80 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -88,6 +88,7 @@ static int octeon_mdiobus_write(struct mii_bus *bus, int phy_id,
88static int __init octeon_mdiobus_probe(struct platform_device *pdev) 88static int __init octeon_mdiobus_probe(struct platform_device *pdev)
89{ 89{
90 struct octeon_mdiobus *bus; 90 struct octeon_mdiobus *bus;
91 union cvmx_smix_en smi_en;
91 int i; 92 int i;
92 int err = -ENOENT; 93 int err = -ENOENT;
93 94
@@ -103,6 +104,10 @@ static int __init octeon_mdiobus_probe(struct platform_device *pdev)
103 if (!bus->mii_bus) 104 if (!bus->mii_bus)
104 goto err; 105 goto err;
105 106
107 smi_en.u64 = 0;
108 smi_en.s.en = 1;
109 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
110
106 /* 111 /*
107 * Standard Octeon evaluation boards don't support phy 112 * Standard Octeon evaluation boards don't support phy
108 * interrupts, we need to poll. 113 * interrupts, we need to poll.
@@ -133,17 +138,22 @@ err_register:
133 138
134err: 139err:
135 devm_kfree(&pdev->dev, bus); 140 devm_kfree(&pdev->dev, bus);
141 smi_en.u64 = 0;
142 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
136 return err; 143 return err;
137} 144}
138 145
139static int __exit octeon_mdiobus_remove(struct platform_device *pdev) 146static int __exit octeon_mdiobus_remove(struct platform_device *pdev)
140{ 147{
141 struct octeon_mdiobus *bus; 148 struct octeon_mdiobus *bus;
149 union cvmx_smix_en smi_en;
142 150
143 bus = dev_get_drvdata(&pdev->dev); 151 bus = dev_get_drvdata(&pdev->dev);
144 152
145 mdiobus_unregister(bus->mii_bus); 153 mdiobus_unregister(bus->mii_bus);
146 mdiobus_free(bus->mii_bus); 154 mdiobus_free(bus->mii_bus);
155 smi_en.u64 = 0;
156 cvmx_write_csr(CVMX_SMIX_EN(bus->unit), smi_en.u64);
147 return 0; 157 return 0;
148} 158}
149 159
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b70291bbc..6a6b8199a0d6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
208 * because the bus read/write functions may wait for an interrupt 208 * because the bus read/write functions may wait for an interrupt
209 * to conclude the operation. 209 * to conclude the operation.
210 */ 210 */
211int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum) 211int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
212{ 212{
213 int retval; 213 int retval;
214 214
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
233 * because the bus read/write functions may wait for an interrupt 233 * because the bus read/write functions may wait for an interrupt
234 * to conclude the operation. 234 * to conclude the operation.
235 */ 235 */
236int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val) 236int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
237{ 237{
238 int err; 238 int err;
239 239
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
new file mode 100644
index 000000000000..0692f750c404
--- /dev/null
+++ b/drivers/net/phy/micrel.c
@@ -0,0 +1,114 @@
1/*
2 * drivers/net/phy/micrel.c
3 *
4 * Driver for Micrel PHYs
5 *
6 * Author: David J. Choi
7 *
8 * Copyright (c) 2010 Micrel, Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * Support : ksz9021 , vsc8201, ks8001
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/phy.h>
21
22#define PHY_ID_KSZ9021 0x00221611
23#define PHY_ID_VSC8201 0x000FC413
24#define PHY_ID_KS8001 0x0022161A
25
26
27static int kszphy_config_init(struct phy_device *phydev)
28{
29 return 0;
30}
31
32
33static struct phy_driver ks8001_driver = {
34 .phy_id = PHY_ID_KS8001,
35 .name = "Micrel KS8001",
36 .phy_id_mask = 0x00fffff0,
37 .features = PHY_BASIC_FEATURES,
38 .flags = PHY_POLL,
39 .config_init = kszphy_config_init,
40 .config_aneg = genphy_config_aneg,
41 .read_status = genphy_read_status,
42 .driver = { .owner = THIS_MODULE,},
43};
44
45static struct phy_driver vsc8201_driver = {
46 .phy_id = PHY_ID_VSC8201,
47 .name = "Micrel VSC8201",
48 .phy_id_mask = 0x00fffff0,
49 .features = PHY_BASIC_FEATURES,
50 .flags = PHY_POLL,
51 .config_init = kszphy_config_init,
52 .config_aneg = genphy_config_aneg,
53 .read_status = genphy_read_status,
54 .driver = { .owner = THIS_MODULE,},
55};
56
57static struct phy_driver ksz9021_driver = {
58 .phy_id = PHY_ID_KSZ9021,
59 .phy_id_mask = 0x000fff10,
60 .name = "Micrel KSZ9021 Gigabit PHY",
61 .features = PHY_GBIT_FEATURES | SUPPORTED_Pause,
62 .flags = PHY_POLL,
63 .config_init = kszphy_config_init,
64 .config_aneg = genphy_config_aneg,
65 .read_status = genphy_read_status,
66 .driver = { .owner = THIS_MODULE, },
67};
68
69static int __init ksphy_init(void)
70{
71 int ret;
72
73 ret = phy_driver_register(&ks8001_driver);
74 if (ret)
75 goto err1;
76 ret = phy_driver_register(&vsc8201_driver);
77 if (ret)
78 goto err2;
79
80 ret = phy_driver_register(&ksz9021_driver);
81 if (ret)
82 goto err3;
83 return 0;
84
85err3:
86 phy_driver_unregister(&vsc8201_driver);
87err2:
88 phy_driver_unregister(&ks8001_driver);
89err1:
90 return ret;
91}
92
93static void __exit ksphy_exit(void)
94{
95 phy_driver_unregister(&ks8001_driver);
96 phy_driver_unregister(&vsc8201_driver);
97 phy_driver_unregister(&ksz9021_driver);
98}
99
100module_init(ksphy_init);
101module_exit(ksphy_exit);
102
103MODULE_DESCRIPTION("Micrel PHY driver");
104MODULE_AUTHOR("David J. Choi");
105MODULE_LICENSE("GPL");
106
107static struct mdio_device_id micrel_tbl[] = {
108 { PHY_ID_KSZ9021, 0x000fff10 },
109 { PHY_ID_VSC8201, 0x00fffff0 },
110 { PHY_ID_KS8001, 0x00fffff0 },
111 { }
112};
113
114MODULE_DEVICE_TABLE(mdio, micrel_tbl);
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb72089..a73ba0bcc0ce 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -97,7 +97,6 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
97 phy_write(phydev, NS_EXP_MEM_DATA, 0x0008); 97 phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
98 phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN)); 98 phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
99 phy_write(phydev, LED_CTRL_REG, mode); 99 phy_write(phydev, LED_CTRL_REG, mode);
100 return;
101} 100}
102 101
103static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable) 102static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
@@ -110,8 +109,6 @@ static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
110 109
111 printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n", 110 printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
112 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on"); 111 (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
113
114 return;
115} 112}
116 113
117static int ns_config_init(struct phy_device *phydev) 114static int ns_config_init(struct phy_device *phydev)
@@ -153,3 +150,10 @@ MODULE_LICENSE("GPL");
153 150
154module_init(ns_init); 151module_init(ns_init);
155module_exit(ns_exit); 152module_exit(ns_exit);
153
154static struct mdio_device_id ns_tbl[] = {
155 { DP83865_PHY_ID, 0xfffffff0 },
156 { }
157};
158
159MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1794546c56..1a99bb244106 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
150{ 150{
151 struct phy_device *dev; 151 struct phy_device *dev;
152
152 /* We allocate the device, and initialize the 153 /* We allocate the device, and initialize the
153 * default values */ 154 * default values */
154 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 155 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
179 mutex_init(&dev->lock); 180 mutex_init(&dev->lock);
180 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 181 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
181 182
183 /* Request the appropriate module unconditionally; don't
184 bother trying to do so only if it isn't already loaded,
185 because that gets complicated. A hotplug event would have
186 done an unconditional modprobe anyway.
187 We don't do normal hotplug because it won't work for MDIO
188 -- because it relies on the device staying around for long
189 enough for the driver to get loaded. With MDIO, the NIC
190 driver will get bored and give up as soon as it finds that
191 there's no driver _already_ loaded. */
192 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
193
182 return dev; 194 return dev;
183} 195}
184EXPORT_SYMBOL(phy_device_create); 196EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f73c32..6736b23f1b28 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
137 137
138module_init(qs6612_init); 138module_init(qs6612_init);
139module_exit(qs6612_exit); 139module_exit(qs6612_exit);
140
141static struct mdio_device_id qs6612_tbl[] = {
142 { 0x00181440, 0xfffffff0 },
143 { }
144};
145
146MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a6744a51..f567c0e1aaa1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
78 78
79module_init(realtek_init); 79module_init(realtek_init);
80module_exit(realtek_exit); 80module_exit(realtek_exit);
81
82static struct mdio_device_id realtek_tbl[] = {
83 { 0x001cc912, 0x001fffff },
84 { }
85};
86
87MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a57500..78fa988256fc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
253 253
254module_init(smsc_init); 254module_init(smsc_init);
255module_exit(smsc_exit); 255module_exit(smsc_exit);
256
257static struct mdio_device_id smsc_tbl[] = {
258 { 0x0007c0a0, 0xfffffff0 },
259 { 0x0007c0b0, 0xfffffff0 },
260 { 0x0007c0c0, 0xfffffff0 },
261 { 0x0007c0d0, 0xfffffff0 },
262 { 0x0007c0f0, 0xfffffff0 },
263 { }
264};
265
266MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d53aaf9..72290099e5e1 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
132module_init(ste10Xp_init); 132module_init(ste10Xp_init);
133module_exit(ste10Xp_exit); 133module_exit(ste10Xp_exit);
134 134
135static struct mdio_device_id ste10Xp_tbl[] = {
136 { STE101P_PHY_ID, 0xfffffff0 },
137 { STE100P_PHY_ID, 0xffffffff },
138 { }
139};
140
141MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
142
135MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver"); 143MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
136MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 144MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
137MODULE_LICENSE("GPL"); 145MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b2447e85a..45cce50a2799 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
191 191
192module_init(vsc82xx_init); 192module_init(vsc82xx_init);
193module_exit(vsc82xx_exit); 193module_exit(vsc82xx_exit);
194
195static struct mdio_device_id vitesse_tbl[] = {
196 { PHY_ID_VSC8244, 0x000fffc0 },
197 { PHY_ID_VSC8221, 0x000ffff0 },
198 { }
199};
200
201MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/plip.c b/drivers/net/plip.c
index 9a2103a69e17..ec0349e84a8a 100644
--- a/drivers/net/plip.c
+++ b/drivers/net/plip.c
@@ -979,7 +979,6 @@ plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
979 printk(KERN_DEBUG "%s: send request\n", dev->name); 979 printk(KERN_DEBUG "%s: send request\n", dev->name);
980 980
981 spin_lock_irq(&nl->lock); 981 spin_lock_irq(&nl->lock);
982 dev->trans_start = jiffies;
983 snd->skb = skb; 982 snd->skb = skb;
984 snd->length.h = skb->len; 983 snd->length.h = skb->len;
985 snd->state = PLIP_PK_TRIGGER; 984 snd->state = PLIP_PK_TRIGGER;
@@ -1192,8 +1191,6 @@ plip_wakeup(void *handle)
1192 /* Clear the data port. */ 1191 /* Clear the data port. */
1193 write_data (dev, 0x00); 1192 write_data (dev, 0x00);
1194 } 1193 }
1195
1196 return;
1197} 1194}
1198 1195
1199static int 1196static int
@@ -1309,7 +1306,6 @@ err_parport_unregister:
1309 parport_unregister_device(nl->pardev); 1306 parport_unregister_device(nl->pardev);
1310err_free_dev: 1307err_free_dev:
1311 free_netdev(dev); 1308 free_netdev(dev);
1312 return;
1313} 1309}
1314 1310
1315/* plip_detach() is called (by the parport code) when a port is 1311/* plip_detach() is called (by the parport code) when a port is
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6e281bc825e5..5441688daba7 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -405,6 +405,7 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
405 DECLARE_WAITQUEUE(wait, current); 405 DECLARE_WAITQUEUE(wait, current);
406 ssize_t ret; 406 ssize_t ret;
407 struct sk_buff *skb = NULL; 407 struct sk_buff *skb = NULL;
408 struct iovec iov;
408 409
409 ret = count; 410 ret = count;
410 411
@@ -448,7 +449,9 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
448 if (skb->len > count) 449 if (skb->len > count)
449 goto outf; 450 goto outf;
450 ret = -EFAULT; 451 ret = -EFAULT;
451 if (copy_to_user(buf, skb->data, skb->len)) 452 iov.iov_base = buf;
453 iov.iov_len = count;
454 if (skb_copy_datagram_iovec(skb, 0, &iov, skb->len))
452 goto outf; 455 goto outf;
453 ret = skb->len; 456 ret = skb->len;
454 457
@@ -1567,13 +1570,22 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1567 struct channel *pch = chan->ppp; 1570 struct channel *pch = chan->ppp;
1568 int proto; 1571 int proto;
1569 1572
1570 if (!pch || skb->len == 0) { 1573 if (!pch) {
1571 kfree_skb(skb); 1574 kfree_skb(skb);
1572 return; 1575 return;
1573 } 1576 }
1574 1577
1575 proto = PPP_PROTO(skb);
1576 read_lock_bh(&pch->upl); 1578 read_lock_bh(&pch->upl);
1579 if (!pskb_may_pull(skb, 2)) {
1580 kfree_skb(skb);
1581 if (pch->ppp) {
1582 ++pch->ppp->dev->stats.rx_length_errors;
1583 ppp_receive_error(pch->ppp);
1584 }
1585 goto done;
1586 }
1587
1588 proto = PPP_PROTO(skb);
1577 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) { 1589 if (!pch->ppp || proto >= 0xc000 || proto == PPP_CCPFRAG) {
1578 /* put it on the channel queue */ 1590 /* put it on the channel queue */
1579 skb_queue_tail(&pch->file.rq, skb); 1591 skb_queue_tail(&pch->file.rq, skb);
@@ -1585,6 +1597,8 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb)
1585 } else { 1597 } else {
1586 ppp_do_recv(pch->ppp, skb, pch); 1598 ppp_do_recv(pch->ppp, skb, pch);
1587 } 1599 }
1600
1601done:
1588 read_unlock_bh(&pch->upl); 1602 read_unlock_bh(&pch->upl);
1589} 1603}
1590 1604
@@ -1617,7 +1631,8 @@ ppp_input_error(struct ppp_channel *chan, int code)
1617static void 1631static void
1618ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) 1632ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1619{ 1633{
1620 if (pskb_may_pull(skb, 2)) { 1634 /* note: a 0-length skb is used as an error indication */
1635 if (skb->len > 0) {
1621#ifdef CONFIG_PPP_MULTILINK 1636#ifdef CONFIG_PPP_MULTILINK
1622 /* XXX do channel-level decompression here */ 1637 /* XXX do channel-level decompression here */
1623 if (PPP_PROTO(skb) == PPP_MP) 1638 if (PPP_PROTO(skb) == PPP_MP)
@@ -1625,15 +1640,10 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
1625 else 1640 else
1626#endif /* CONFIG_PPP_MULTILINK */ 1641#endif /* CONFIG_PPP_MULTILINK */
1627 ppp_receive_nonmp_frame(ppp, skb); 1642 ppp_receive_nonmp_frame(ppp, skb);
1628 return; 1643 } else {
1644 kfree_skb(skb);
1645 ppp_receive_error(ppp);
1629 } 1646 }
1630
1631 if (skb->len > 0)
1632 /* note: a 0-length skb is used as an error indication */
1633 ++ppp->dev->stats.rx_length_errors;
1634
1635 kfree_skb(skb);
1636 ppp_receive_error(ppp);
1637} 1647}
1638 1648
1639static void 1649static void
@@ -2164,6 +2174,24 @@ int ppp_unit_number(struct ppp_channel *chan)
2164} 2174}
2165 2175
2166/* 2176/*
2177 * Return the PPP device interface name of a channel.
2178 */
2179char *ppp_dev_name(struct ppp_channel *chan)
2180{
2181 struct channel *pch = chan->ppp;
2182 char *name = NULL;
2183
2184 if (pch) {
2185 read_lock_bh(&pch->upl);
2186 if (pch->ppp && pch->ppp->dev)
2187 name = pch->ppp->dev->name;
2188 read_unlock_bh(&pch->upl);
2189 }
2190 return name;
2191}
2192
2193
2194/*
2167 * Disconnect a channel from the generic layer. 2195 * Disconnect a channel from the generic layer.
2168 * This must be called in process context. 2196 * This must be called in process context.
2169 */ 2197 */
@@ -2891,6 +2919,7 @@ EXPORT_SYMBOL(ppp_register_channel);
2891EXPORT_SYMBOL(ppp_unregister_channel); 2919EXPORT_SYMBOL(ppp_unregister_channel);
2892EXPORT_SYMBOL(ppp_channel_index); 2920EXPORT_SYMBOL(ppp_channel_index);
2893EXPORT_SYMBOL(ppp_unit_number); 2921EXPORT_SYMBOL(ppp_unit_number);
2922EXPORT_SYMBOL(ppp_dev_name);
2894EXPORT_SYMBOL(ppp_input); 2923EXPORT_SYMBOL(ppp_input);
2895EXPORT_SYMBOL(ppp_input_error); 2924EXPORT_SYMBOL(ppp_input_error);
2896EXPORT_SYMBOL(ppp_output_wakeup); 2925EXPORT_SYMBOL(ppp_output_wakeup);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index cdd11ba100ea..b1b93ff2351f 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -89,7 +89,6 @@
89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS) 89#define PPPOE_HASH_SIZE (1 << PPPOE_HASH_BITS)
90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1) 90#define PPPOE_HASH_MASK (PPPOE_HASH_SIZE - 1)
91 91
92static int pppoe_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
93static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb); 92static int pppoe_xmit(struct ppp_channel *chan, struct sk_buff *skb);
94static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb); 93static int __pppoe_xmit(struct sock *sk, struct sk_buff *skb);
95 94
@@ -258,7 +257,7 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev); 257 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
259 if (dev) { 258 if (dev) {
260 ifindex = dev->ifindex; 259 ifindex = dev->ifindex;
261 pn = net_generic(net, pppoe_net_id); 260 pn = pppoe_pernet(net);
262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid, 261 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
263 sp->sa_addr.pppoe.remote, ifindex); 262 sp->sa_addr.pppoe.remote, ifindex);
264 } 263 }
@@ -290,12 +289,6 @@ static void pppoe_flush_dev(struct net_device *dev)
290 struct pppoe_net *pn; 289 struct pppoe_net *pn;
291 int i; 290 int i;
292 291
293 BUG_ON(dev == NULL);
294
295 pn = pppoe_pernet(dev_net(dev));
296 if (!pn) /* already freed */
297 return;
298
299 write_lock_bh(&pn->hash_lock); 292 write_lock_bh(&pn->hash_lock);
300 for (i = 0; i < PPPOE_HASH_SIZE; i++) { 293 for (i = 0; i < PPPOE_HASH_SIZE; i++) {
301 struct pppox_sock *po = pn->hash_table[i]; 294 struct pppox_sock *po = pn->hash_table[i];
@@ -368,7 +361,7 @@ static int pppoe_device_event(struct notifier_block *this,
368 361
369 default: 362 default:
370 break; 363 break;
371 }; 364 }
372 365
373 return NOTIFY_DONE; 366 return NOTIFY_DONE;
374} 367}
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a9825200d..000000000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 1.0.0
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * License:
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24/* This driver handles only L2TP data frames; control frames are handled by a
25 * userspace application.
26 *
27 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
28 * attaches it to a bound UDP socket with local tunnel_id / session_id and
29 * peer tunnel_id / session_id set. Data can then be sent or received using
30 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
31 * can be read or modified using ioctl() or [gs]etsockopt() calls.
32 *
33 * When a PPPoL2TP socket is connected with local and peer session_id values
34 * zero, the socket is treated as a special tunnel management socket.
35 *
36 * Here's example userspace code to create a socket for sending/receiving data
37 * over an L2TP session:-
38 *
39 * struct sockaddr_pppol2tp sax;
40 * int fd;
41 * int session_fd;
42 *
43 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
44 *
45 * sax.sa_family = AF_PPPOX;
46 * sax.sa_protocol = PX_PROTO_OL2TP;
47 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
48 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
49 * sax.pppol2tp.addr.sin_port = addr->sin_port;
50 * sax.pppol2tp.addr.sin_family = AF_INET;
51 * sax.pppol2tp.s_tunnel = tunnel_id;
52 * sax.pppol2tp.s_session = session_id;
53 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
54 * sax.pppol2tp.d_session = peer_session_id;
55 *
56 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
57 *
58 * A pppd plugin that allows PPP traffic to be carried over L2TP using
59 * this driver is available from the OpenL2TP project at
60 * http://openl2tp.sourceforge.net.
61 */
62
63#include <linux/module.h>
64#include <linux/string.h>
65#include <linux/list.h>
66#include <asm/uaccess.h>
67
68#include <linux/kernel.h>
69#include <linux/spinlock.h>
70#include <linux/kthread.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
73#include <linux/errno.h>
74#include <linux/jiffies.h>
75
76#include <linux/netdevice.h>
77#include <linux/net.h>
78#include <linux/inetdevice.h>
79#include <linux/skbuff.h>
80#include <linux/init.h>
81#include <linux/ip.h>
82#include <linux/udp.h>
83#include <linux/if_pppox.h>
84#include <linux/if_pppol2tp.h>
85#include <net/sock.h>
86#include <linux/ppp_channel.h>
87#include <linux/ppp_defs.h>
88#include <linux/if_ppp.h>
89#include <linux/file.h>
90#include <linux/hash.h>
91#include <linux/sort.h>
92#include <linux/proc_fs.h>
93#include <linux/nsproxy.h>
94#include <net/net_namespace.h>
95#include <net/netns/generic.h>
96#include <net/dst.h>
97#include <net/ip.h>
98#include <net/udp.h>
99#include <net/xfrm.h>
100
101#include <asm/byteorder.h>
102#include <asm/atomic.h>
103
104
105#define PPPOL2TP_DRV_VERSION "V1.0"
106
107/* L2TP header constants */
108#define L2TP_HDRFLAG_T 0x8000
109#define L2TP_HDRFLAG_L 0x4000
110#define L2TP_HDRFLAG_S 0x0800
111#define L2TP_HDRFLAG_O 0x0200
112#define L2TP_HDRFLAG_P 0x0100
113
114#define L2TP_HDR_VER_MASK 0x000F
115#define L2TP_HDR_VER 0x0002
116
117/* Space for UDP, L2TP and PPP headers */
118#define PPPOL2TP_HEADER_OVERHEAD 40
119
120/* Just some random numbers */
121#define L2TP_TUNNEL_MAGIC 0x42114DDA
122#define L2TP_SESSION_MAGIC 0x0C04EB7D
123
124#define PPPOL2TP_HASH_BITS 4
125#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
126
127/* Default trace flags */
128#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
129
130#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
131 do { \
132 if ((_mask) & (_type)) \
133 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
134 } while(0)
135
136/* Number of bytes to build transmit L2TP headers.
137 * Unfortunately the size is different depending on whether sequence numbers
138 * are enabled.
139 */
140#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
141#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
142
143struct pppol2tp_tunnel;
144
145/* Describes a session. It is the sk_user_data field in the PPPoL2TP
146 * socket. Contains information to determine incoming packets and transmit
147 * outgoing ones.
148 */
149struct pppol2tp_session
150{
151 int magic; /* should be
152 * L2TP_SESSION_MAGIC */
153 int owner; /* pid that opened the socket */
154
155 struct sock *sock; /* Pointer to the session
156 * PPPoX socket */
157 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
158 * socket */
159
160 struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
161
162 struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
163 * context */
164
165 char name[20]; /* "sess xxxxx/yyyyy", where
166 * x=tunnel_id, y=session_id */
167 int mtu;
168 int mru;
169 int flags; /* accessed by PPPIOCGFLAGS.
170 * Unused. */
171 unsigned recv_seq:1; /* expect receive packets with
172 * sequence numbers? */
173 unsigned send_seq:1; /* send packets with sequence
174 * numbers? */
175 unsigned lns_mode:1; /* behave as LNS? LAC enables
176 * sequence numbers under
177 * control of LNS. */
178 int debug; /* bitmask of debug message
179 * categories */
180 int reorder_timeout; /* configured reorder timeout
181 * (in jiffies) */
182 u16 nr; /* session NR state (receive) */
183 u16 ns; /* session NR state (send) */
184 struct sk_buff_head reorder_q; /* receive reorder queue */
185 struct pppol2tp_ioc_stats stats;
186 struct hlist_node hlist; /* Hash list node */
187};
188
189/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
190 * all the associated sessions so incoming packets can be sorted out
191 */
192struct pppol2tp_tunnel
193{
194 int magic; /* Should be L2TP_TUNNEL_MAGIC */
195 rwlock_t hlist_lock; /* protect session_hlist */
196 struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
197 /* hashed list of sessions,
198 * hashed by id */
199 int debug; /* bitmask of debug message
200 * categories */
201 char name[12]; /* "tunl xxxxx" */
202 struct pppol2tp_ioc_stats stats;
203
204 void (*old_sk_destruct)(struct sock *);
205
206 struct sock *sock; /* Parent socket */
207 struct list_head list; /* Keep a list of all open
208 * prepared sockets */
209 struct net *pppol2tp_net; /* the net we belong to */
210
211 atomic_t ref_count;
212};
213
214/* Private data stored for received packets in the skb.
215 */
216struct pppol2tp_skb_cb {
217 u16 ns;
218 u16 nr;
219 u16 has_seq;
220 u16 length;
221 unsigned long expires;
222};
223
224#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
225
226static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
227static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
228
229static atomic_t pppol2tp_tunnel_count;
230static atomic_t pppol2tp_session_count;
231static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
232static const struct proto_ops pppol2tp_ops;
233
234/* per-net private data for this module */
235static int pppol2tp_net_id __read_mostly;
236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock;
239};
240
241static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
242{
243 BUG_ON(!net);
244
245 return net_generic(net, pppol2tp_net_id);
246}
247
248/* Helpers to obtain tunnel/session contexts from sockets.
249 */
250static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
251{
252 struct pppol2tp_session *session;
253
254 if (sk == NULL)
255 return NULL;
256
257 sock_hold(sk);
258 session = (struct pppol2tp_session *)(sk->sk_user_data);
259 if (session == NULL) {
260 sock_put(sk);
261 goto out;
262 }
263
264 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
265out:
266 return session;
267}
268
269static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
270{
271 struct pppol2tp_tunnel *tunnel;
272
273 if (sk == NULL)
274 return NULL;
275
276 sock_hold(sk);
277 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
278 if (tunnel == NULL) {
279 sock_put(sk);
280 goto out;
281 }
282
283 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
284out:
285 return tunnel;
286}
287
288/* Tunnel reference counts. Incremented per session that is added to
289 * the tunnel.
290 */
291static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
292{
293 atomic_inc(&tunnel->ref_count);
294}
295
296static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
297{
298 if (atomic_dec_and_test(&tunnel->ref_count))
299 pppol2tp_tunnel_free(tunnel);
300}
301
302/* Session hash list.
303 * The session_id SHOULD be random according to RFC2661, but several
304 * L2TP implementations (Cisco and Microsoft) use incrementing
305 * session_ids. So we do a real hash on the session_id, rather than a
306 * simple bitmask.
307 */
308static inline struct hlist_head *
309pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
310{
311 unsigned long hash_val = (unsigned long) session_id;
312 return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
313}
314
315/* Lookup a session by id
316 */
317static struct pppol2tp_session *
318pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
319{
320 struct hlist_head *session_list =
321 pppol2tp_session_id_hash(tunnel, session_id);
322 struct pppol2tp_session *session;
323 struct hlist_node *walk;
324
325 read_lock_bh(&tunnel->hlist_lock);
326 hlist_for_each_entry(session, walk, session_list, hlist) {
327 if (session->tunnel_addr.s_session == session_id) {
328 read_unlock_bh(&tunnel->hlist_lock);
329 return session;
330 }
331 }
332 read_unlock_bh(&tunnel->hlist_lock);
333
334 return NULL;
335}
336
337/* Lookup a tunnel by id
338 */
339static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
340{
341 struct pppol2tp_tunnel *tunnel;
342 struct pppol2tp_net *pn = pppol2tp_pernet(net);
343
344 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
345 list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
346 if (tunnel->stats.tunnel_id == tunnel_id) {
347 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
348 return tunnel;
349 }
350 }
351 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
352
353 return NULL;
354}
355
356/*****************************************************************************
357 * Receive data handling
358 *****************************************************************************/
359
360/* Queue a skb in order. We come here only if the skb has an L2TP sequence
361 * number.
362 */
363static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
364{
365 struct sk_buff *skbp;
366 struct sk_buff *tmp;
367 u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
368
369 spin_lock_bh(&session->reorder_q.lock);
370 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
371 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
372 __skb_queue_before(&session->reorder_q, skbp, skb);
373 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
374 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
375 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
376 skb_queue_len(&session->reorder_q));
377 session->stats.rx_oos_packets++;
378 goto out;
379 }
380 }
381
382 __skb_queue_tail(&session->reorder_q, skb);
383
384out:
385 spin_unlock_bh(&session->reorder_q.lock);
386}
387
388/* Dequeue a single skb.
389 */
390static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
391{
392 struct pppol2tp_tunnel *tunnel = session->tunnel;
393 int length = PPPOL2TP_SKB_CB(skb)->length;
394 struct sock *session_sock = NULL;
395
396 /* We're about to requeue the skb, so return resources
397 * to its current owner (a socket receive buffer).
398 */
399 skb_orphan(skb);
400
401 tunnel->stats.rx_packets++;
402 tunnel->stats.rx_bytes += length;
403 session->stats.rx_packets++;
404 session->stats.rx_bytes += length;
405
406 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
407 /* Bump our Nr */
408 session->nr++;
409 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
410 "%s: updated nr to %hu\n", session->name, session->nr);
411 }
412
413 /* If the socket is bound, send it in to PPP's input queue. Otherwise
414 * queue it on the session socket.
415 */
416 session_sock = session->sock;
417 if (session_sock->sk_state & PPPOX_BOUND) {
418 struct pppox_sock *po;
419 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
420 "%s: recv %d byte data frame, passing to ppp\n",
421 session->name, length);
422
423 /* We need to forget all info related to the L2TP packet
424 * gathered in the skb as we are going to reuse the same
425 * skb for the inner packet.
426 * Namely we need to:
427 * - reset xfrm (IPSec) information as it applies to
428 * the outer L2TP packet and not to the inner one
429 * - release the dst to force a route lookup on the inner
430 * IP packet since skb->dst currently points to the dst
431 * of the UDP tunnel
432 * - reset netfilter information as it doesn't apply
433 * to the inner packet either
434 */
435 secpath_reset(skb);
436 skb_dst_drop(skb);
437 nf_reset(skb);
438
439 po = pppox_sk(session_sock);
440 ppp_input(&po->chan, skb);
441 } else {
442 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
443 "%s: socket not bound\n", session->name);
444
445 /* Not bound. Nothing we can do, so discard. */
446 session->stats.rx_errors++;
447 kfree_skb(skb);
448 }
449
450 sock_put(session->sock);
451}
452
453/* Dequeue skbs from the session's reorder_q, subject to packet order.
454 * Skbs that have been in the queue for too long are simply discarded.
455 */
456static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
457{
458 struct sk_buff *skb;
459 struct sk_buff *tmp;
460
461 /* If the pkt at the head of the queue has the nr that we
462 * expect to send up next, dequeue it and any other
463 * in-sequence packets behind it.
464 */
465 spin_lock_bh(&session->reorder_q.lock);
466 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
467 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
468 session->stats.rx_seq_discards++;
469 session->stats.rx_errors++;
470 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
471 "%s: oos pkt %hu len %d discarded (too old), "
472 "waiting for %hu, reorder_q_len=%d\n",
473 session->name, PPPOL2TP_SKB_CB(skb)->ns,
474 PPPOL2TP_SKB_CB(skb)->length, session->nr,
475 skb_queue_len(&session->reorder_q));
476 __skb_unlink(skb, &session->reorder_q);
477 kfree_skb(skb);
478 sock_put(session->sock);
479 continue;
480 }
481
482 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
483 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
484 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
485 "%s: holding oos pkt %hu len %d, "
486 "waiting for %hu, reorder_q_len=%d\n",
487 session->name, PPPOL2TP_SKB_CB(skb)->ns,
488 PPPOL2TP_SKB_CB(skb)->length, session->nr,
489 skb_queue_len(&session->reorder_q));
490 goto out;
491 }
492 }
493 __skb_unlink(skb, &session->reorder_q);
494
495 /* Process the skb. We release the queue lock while we
496 * do so to let other contexts process the queue.
497 */
498 spin_unlock_bh(&session->reorder_q.lock);
499 pppol2tp_recv_dequeue_skb(session, skb);
500 spin_lock_bh(&session->reorder_q.lock);
501 }
502
503out:
504 spin_unlock_bh(&session->reorder_q.lock);
505}
506
507static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
508 struct sk_buff *skb)
509{
510 struct udphdr *uh = udp_hdr(skb);
511 u16 ulen = ntohs(uh->len);
512 struct inet_sock *inet;
513 __wsum psum;
514
515 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
516 return 0;
517
518 inet = inet_sk(sk);
519 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
520 IPPROTO_UDP, 0);
521
522 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
523 !csum_fold(csum_add(psum, skb->csum)))
524 return 0;
525
526 skb->csum = psum;
527
528 return __skb_checksum_complete(skb);
529}
530
531/* Internal receive frame. Do the real work of receiving an L2TP data frame
532 * here. The skb is not on a list when we get here.
533 * Returns 0 if the packet was a data packet and was successfully passed on.
534 * Returns 1 if the packet was not a good data packet and could not be
535 * forwarded. All such packets are passed up to userspace to deal with.
536 */
537static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
538{
539 struct pppol2tp_session *session = NULL;
540 struct pppol2tp_tunnel *tunnel;
541 unsigned char *ptr, *optr;
542 u16 hdrflags;
543 u16 tunnel_id, session_id;
544 int length;
545 int offset;
546
547 tunnel = pppol2tp_sock_to_tunnel(sock);
548 if (tunnel == NULL)
549 goto no_tunnel;
550
551 if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
552 goto discard_bad_csum;
553
554 /* UDP always verifies the packet length. */
555 __skb_pull(skb, sizeof(struct udphdr));
556
557 /* Short packet? */
558 if (!pskb_may_pull(skb, 12)) {
559 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
560 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
561 goto error;
562 }
563
564 /* Point to L2TP header */
565 optr = ptr = skb->data;
566
567 /* Get L2TP header flags */
568 hdrflags = ntohs(*(__be16*)ptr);
569
570 /* Trace packet contents, if enabled */
571 if (tunnel->debug & PPPOL2TP_MSG_DATA) {
572 length = min(16u, skb->len);
573 if (!pskb_may_pull(skb, length))
574 goto error;
575
576 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
577
578 offset = 0;
579 do {
580 printk(" %02X", ptr[offset]);
581 } while (++offset < length);
582
583 printk("\n");
584 }
585
586 /* Get length of L2TP packet */
587 length = skb->len;
588
589 /* If type is control packet, it is handled by userspace. */
590 if (hdrflags & L2TP_HDRFLAG_T) {
591 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
592 "%s: recv control packet, len=%d\n", tunnel->name, length);
593 goto error;
594 }
595
596 /* Skip flags */
597 ptr += 2;
598
599 /* If length is present, skip it */
600 if (hdrflags & L2TP_HDRFLAG_L)
601 ptr += 2;
602
603 /* Extract tunnel and session ID */
604 tunnel_id = ntohs(*(__be16 *) ptr);
605 ptr += 2;
606 session_id = ntohs(*(__be16 *) ptr);
607 ptr += 2;
608
609 /* Find the session context */
610 session = pppol2tp_session_find(tunnel, session_id);
611 if (!session) {
612 /* Not found? Pass to userspace to deal with */
613 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
614 "%s: no socket found (%hu/%hu). Passing up.\n",
615 tunnel->name, tunnel_id, session_id);
616 goto error;
617 }
618 sock_hold(session->sock);
619
620 /* The ref count on the socket was increased by the above call since
621 * we now hold a pointer to the session. Take care to do sock_put()
622 * when exiting this function from now on...
623 */
624
625 /* Handle the optional sequence numbers. If we are the LAC,
626 * enable/disable sequence numbers under the control of the LNS. If
627 * no sequence numbers present but we were expecting them, discard
628 * frame.
629 */
630 if (hdrflags & L2TP_HDRFLAG_S) {
631 u16 ns, nr;
632 ns = ntohs(*(__be16 *) ptr);
633 ptr += 2;
634 nr = ntohs(*(__be16 *) ptr);
635 ptr += 2;
636
637 /* Received a packet with sequence numbers. If we're the LNS,
638 * check if we sre sending sequence numbers and if not,
639 * configure it so.
640 */
641 if ((!session->lns_mode) && (!session->send_seq)) {
642 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
643 "%s: requested to enable seq numbers by LNS\n",
644 session->name);
645 session->send_seq = -1;
646 }
647
648 /* Store L2TP info in the skb */
649 PPPOL2TP_SKB_CB(skb)->ns = ns;
650 PPPOL2TP_SKB_CB(skb)->nr = nr;
651 PPPOL2TP_SKB_CB(skb)->has_seq = 1;
652
653 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
654 "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
655 session->name, ns, nr, session->nr);
656 } else {
657 /* No sequence numbers.
658 * If user has configured mandatory sequence numbers, discard.
659 */
660 if (session->recv_seq) {
661 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
662 "%s: recv data has no seq numbers when required. "
663 "Discarding\n", session->name);
664 session->stats.rx_seq_discards++;
665 goto discard;
666 }
667
668 /* If we're the LAC and we're sending sequence numbers, the
669 * LNS has requested that we no longer send sequence numbers.
670 * If we're the LNS and we're sending sequence numbers, the
671 * LAC is broken. Discard the frame.
672 */
673 if ((!session->lns_mode) && (session->send_seq)) {
674 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
675 "%s: requested to disable seq numbers by LNS\n",
676 session->name);
677 session->send_seq = 0;
678 } else if (session->send_seq) {
679 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
680 "%s: recv data has no seq numbers when required. "
681 "Discarding\n", session->name);
682 session->stats.rx_seq_discards++;
683 goto discard;
684 }
685
686 /* Store L2TP info in the skb */
687 PPPOL2TP_SKB_CB(skb)->has_seq = 0;
688 }
689
690 /* If offset bit set, skip it. */
691 if (hdrflags & L2TP_HDRFLAG_O) {
692 offset = ntohs(*(__be16 *)ptr);
693 ptr += 2 + offset;
694 }
695
696 offset = ptr - optr;
697 if (!pskb_may_pull(skb, offset))
698 goto discard;
699
700 __skb_pull(skb, offset);
701
702 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
703 * don't send the PPP header (PPP header compression enabled), but
704 * other clients can include the header. So we cope with both cases
705 * here. The PPP header is always FF03 when using L2TP.
706 *
707 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
708 * the field may be unaligned.
709 */
710 if (!pskb_may_pull(skb, 2))
711 goto discard;
712
713 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
714 skb_pull(skb, 2);
715
716 /* Prepare skb for adding to the session's reorder_q. Hold
717 * packets for max reorder_timeout or 1 second if not
718 * reordering.
719 */
720 PPPOL2TP_SKB_CB(skb)->length = length;
721 PPPOL2TP_SKB_CB(skb)->expires = jiffies +
722 (session->reorder_timeout ? session->reorder_timeout : HZ);
723
724 /* Add packet to the session's receive queue. Reordering is done here, if
725 * enabled. Saved L2TP protocol info is stored in skb->sb[].
726 */
727 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
728 if (session->reorder_timeout != 0) {
729 /* Packet reordering enabled. Add skb to session's
730 * reorder queue, in order of ns.
731 */
732 pppol2tp_recv_queue_skb(session, skb);
733 } else {
734 /* Packet reordering disabled. Discard out-of-sequence
735 * packets
736 */
737 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
738 session->stats.rx_seq_discards++;
739 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
740 "%s: oos pkt %hu len %d discarded, "
741 "waiting for %hu, reorder_q_len=%d\n",
742 session->name, PPPOL2TP_SKB_CB(skb)->ns,
743 PPPOL2TP_SKB_CB(skb)->length, session->nr,
744 skb_queue_len(&session->reorder_q));
745 goto discard;
746 }
747 skb_queue_tail(&session->reorder_q, skb);
748 }
749 } else {
750 /* No sequence numbers. Add the skb to the tail of the
751 * reorder queue. This ensures that it will be
752 * delivered after all previous sequenced skbs.
753 */
754 skb_queue_tail(&session->reorder_q, skb);
755 }
756
757 /* Try to dequeue as many skbs from reorder_q as we can. */
758 pppol2tp_recv_dequeue(session);
759 sock_put(sock);
760
761 return 0;
762
763discard:
764 session->stats.rx_errors++;
765 kfree_skb(skb);
766 sock_put(session->sock);
767 sock_put(sock);
768
769 return 0;
770
771discard_bad_csum:
772 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
773 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
774 tunnel->stats.rx_errors++;
775 kfree_skb(skb);
776 sock_put(sock);
777
778 return 0;
779
780error:
781 /* Put UDP header back */
782 __skb_push(skb, sizeof(struct udphdr));
783 sock_put(sock);
784
785no_tunnel:
786 return 1;
787}
788
789/* UDP encapsulation receive handler. See net/ipv4/udp.c.
790 * Return codes:
791 * 0 : success.
792 * <0: error
793 * >0: skb should be passed up to userspace as UDP.
794 */
795static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
796{
797 struct pppol2tp_tunnel *tunnel;
798
799 tunnel = pppol2tp_sock_to_tunnel(sk);
800 if (tunnel == NULL)
801 goto pass_up;
802
803 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
804 "%s: received %d bytes\n", tunnel->name, skb->len);
805
806 if (pppol2tp_recv_core(sk, skb))
807 goto pass_up_put;
808
809 sock_put(sk);
810 return 0;
811
812pass_up_put:
813 sock_put(sk);
814pass_up:
815 return 1;
816}
817
818/* Receive message. This is the recvmsg for the PPPoL2TP socket.
819 */
820static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
821 struct msghdr *msg, size_t len,
822 int flags)
823{
824 int err;
825 struct sk_buff *skb;
826 struct sock *sk = sock->sk;
827
828 err = -EIO;
829 if (sk->sk_state & PPPOX_BOUND)
830 goto end;
831
832 msg->msg_namelen = 0;
833
834 err = 0;
835 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
836 flags & MSG_DONTWAIT, &err);
837 if (!skb)
838 goto end;
839
840 if (len > skb->len)
841 len = skb->len;
842 else if (len < skb->len)
843 msg->msg_flags |= MSG_TRUNC;
844
845 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
846 if (likely(err == 0))
847 err = len;
848
849 kfree_skb(skb);
850end:
851 return err;
852}
853
854/************************************************************************
855 * Transmit handling
856 ***********************************************************************/
857
858/* Tell how big L2TP headers are for a particular session. This
859 * depends on whether sequence numbers are being used.
860 */
861static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
862{
863 if (session->send_seq)
864 return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
865
866 return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
867}
868
869/* Build an L2TP header for the session into the buffer provided.
870 */
871static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
872 void *buf)
873{
874 __be16 *bufp = buf;
875 u16 flags = L2TP_HDR_VER;
876
877 if (session->send_seq)
878 flags |= L2TP_HDRFLAG_S;
879
880 /* Setup L2TP header.
881 * FIXME: Can this ever be unaligned? Is direct dereferencing of
882 * 16-bit header fields safe here for all architectures?
883 */
884 *bufp++ = htons(flags);
885 *bufp++ = htons(session->tunnel_addr.d_tunnel);
886 *bufp++ = htons(session->tunnel_addr.d_session);
887 if (session->send_seq) {
888 *bufp++ = htons(session->ns);
889 *bufp++ = 0;
890 session->ns++;
891 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
892 "%s: updated ns to %hu\n", session->name, session->ns);
893 }
894}
895
896/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
897 * when a user application does a sendmsg() on the session socket. L2TP and
898 * PPP headers must be inserted into the user's data.
899 */
900static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
901 size_t total_len)
902{
903 static const unsigned char ppph[2] = { 0xff, 0x03 };
904 struct sock *sk = sock->sk;
905 struct inet_sock *inet;
906 __wsum csum;
907 struct sk_buff *skb;
908 int error;
909 int hdr_len;
910 struct pppol2tp_session *session;
911 struct pppol2tp_tunnel *tunnel;
912 struct udphdr *uh;
913 unsigned int len;
914 struct sock *sk_tun;
915 u16 udp_len;
916
917 error = -ENOTCONN;
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto error;
920
921 /* Get session and tunnel contexts */
922 error = -EBADF;
923 session = pppol2tp_sock_to_session(sk);
924 if (session == NULL)
925 goto error;
926
927 sk_tun = session->tunnel_sock;
928 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
929 if (tunnel == NULL)
930 goto error_put_sess;
931
932 /* What header length is configured for this session? */
933 hdr_len = pppol2tp_l2tp_header_len(session);
934
935 /* Allocate a socket buffer */
936 error = -ENOMEM;
937 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
938 sizeof(struct udphdr) + hdr_len +
939 sizeof(ppph) + total_len,
940 0, GFP_KERNEL);
941 if (!skb)
942 goto error_put_sess_tun;
943
944 /* Reserve space for headers. */
945 skb_reserve(skb, NET_SKB_PAD);
946 skb_reset_network_header(skb);
947 skb_reserve(skb, sizeof(struct iphdr));
948 skb_reset_transport_header(skb);
949
950 /* Build UDP header */
951 inet = inet_sk(sk_tun);
952 udp_len = hdr_len + sizeof(ppph) + total_len;
953 uh = (struct udphdr *) skb->data;
954 uh->source = inet->inet_sport;
955 uh->dest = inet->inet_dport;
956 uh->len = htons(udp_len);
957 uh->check = 0;
958 skb_put(skb, sizeof(struct udphdr));
959
960 /* Build L2TP header */
961 pppol2tp_build_l2tp_header(session, skb->data);
962 skb_put(skb, hdr_len);
963
964 /* Add PPP header */
965 skb->data[0] = ppph[0];
966 skb->data[1] = ppph[1];
967 skb_put(skb, 2);
968
969 /* Copy user data into skb */
970 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
971 if (error < 0) {
972 kfree_skb(skb);
973 goto error_put_sess_tun;
974 }
975 skb_put(skb, total_len);
976
977 /* Calculate UDP checksum if configured to do so */
978 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
979 skb->ip_summed = CHECKSUM_NONE;
980 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
981 skb->ip_summed = CHECKSUM_COMPLETE;
982 csum = skb_checksum(skb, 0, udp_len, 0);
983 uh->check = csum_tcpudp_magic(inet->inet_saddr,
984 inet->inet_daddr,
985 udp_len, IPPROTO_UDP, csum);
986 if (uh->check == 0)
987 uh->check = CSUM_MANGLED_0;
988 } else {
989 skb->ip_summed = CHECKSUM_PARTIAL;
990 skb->csum_start = skb_transport_header(skb) - skb->head;
991 skb->csum_offset = offsetof(struct udphdr, check);
992 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
993 inet->inet_daddr,
994 udp_len, IPPROTO_UDP, 0);
995 }
996
997 /* Debug */
998 if (session->send_seq)
999 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1000 "%s: send %Zd bytes, ns=%hu\n", session->name,
1001 total_len, session->ns - 1);
1002 else
1003 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1004 "%s: send %Zd bytes\n", session->name, total_len);
1005
1006 if (session->debug & PPPOL2TP_MSG_DATA) {
1007 int i;
1008 unsigned char *datap = skb->data;
1009
1010 printk(KERN_DEBUG "%s: xmit:", session->name);
1011 for (i = 0; i < total_len; i++) {
1012 printk(" %02X", *datap++);
1013 if (i == 15) {
1014 printk(" ...");
1015 break;
1016 }
1017 }
1018 printk("\n");
1019 }
1020
1021 /* Queue the packet to IP for output */
1022 len = skb->len;
1023 error = ip_queue_xmit(skb, 1);
1024
1025 /* Update stats */
1026 if (error >= 0) {
1027 tunnel->stats.tx_packets++;
1028 tunnel->stats.tx_bytes += len;
1029 session->stats.tx_packets++;
1030 session->stats.tx_bytes += len;
1031 } else {
1032 tunnel->stats.tx_errors++;
1033 session->stats.tx_errors++;
1034 }
1035
1036 return error;
1037
1038error_put_sess_tun:
1039 sock_put(session->tunnel_sock);
1040error_put_sess:
1041 sock_put(sk);
1042error:
1043 return error;
1044}
1045
1046/* Automatically called when the skb is freed.
1047 */
1048static void pppol2tp_sock_wfree(struct sk_buff *skb)
1049{
1050 sock_put(skb->sk);
1051}
1052
1053/* For data skbs that we transmit, we associate with the tunnel socket
1054 * but don't do accounting.
1055 */
1056static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1057{
1058 sock_hold(sk);
1059 skb->sk = sk;
1060 skb->destructor = pppol2tp_sock_wfree;
1061}
1062
1063/* Transmit function called by generic PPP driver. Sends PPP frame
1064 * over PPPoL2TP socket.
1065 *
1066 * This is almost the same as pppol2tp_sendmsg(), but rather than
1067 * being called with a msghdr from userspace, it is called with a skb
1068 * from the kernel.
1069 *
1070 * The supplied skb from ppp doesn't have enough headroom for the
1071 * insertion of L2TP, UDP and IP headers so we need to allocate more
1072 * headroom in the skb. This will create a cloned skb. But we must be
1073 * careful in the error case because the caller will expect to free
1074 * the skb it supplied, not our cloned skb. So we take care to always
1075 * leave the original skb unfreed if we return an error.
1076 */
1077static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1078{
1079 static const u8 ppph[2] = { 0xff, 0x03 };
1080 struct sock *sk = (struct sock *) chan->private;
1081 struct sock *sk_tun;
1082 int hdr_len;
1083 u16 udp_len;
1084 struct pppol2tp_session *session;
1085 struct pppol2tp_tunnel *tunnel;
1086 int rc;
1087 int headroom;
1088 int data_len = skb->len;
1089 struct inet_sock *inet;
1090 __wsum csum;
1091 struct udphdr *uh;
1092 unsigned int len;
1093 int old_headroom;
1094 int new_headroom;
1095
1096 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
1097 goto abort;
1098
1099 /* Get session and tunnel contexts from the socket */
1100 session = pppol2tp_sock_to_session(sk);
1101 if (session == NULL)
1102 goto abort;
1103
1104 sk_tun = session->tunnel_sock;
1105 if (sk_tun == NULL)
1106 goto abort_put_sess;
1107 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
1108 if (tunnel == NULL)
1109 goto abort_put_sess;
1110
1111 /* What header length is configured for this session? */
1112 hdr_len = pppol2tp_l2tp_header_len(session);
1113
1114 /* Check that there's enough headroom in the skb to insert IP,
1115 * UDP and L2TP and PPP headers. If not enough, expand it to
1116 * make room. Adjust truesize.
1117 */
1118 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1119 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
1120 old_headroom = skb_headroom(skb);
1121 if (skb_cow_head(skb, headroom))
1122 goto abort_put_sess_tun;
1123
1124 new_headroom = skb_headroom(skb);
1125 skb_orphan(skb);
1126 skb->truesize += new_headroom - old_headroom;
1127
1128 /* Setup PPP header */
1129 __skb_push(skb, sizeof(ppph));
1130 skb->data[0] = ppph[0];
1131 skb->data[1] = ppph[1];
1132
1133 /* Setup L2TP header */
1134 pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
1135
1136 udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
1137
1138 /* Setup UDP header */
1139 inet = inet_sk(sk_tun);
1140 __skb_push(skb, sizeof(*uh));
1141 skb_reset_transport_header(skb);
1142 uh = udp_hdr(skb);
1143 uh->source = inet->inet_sport;
1144 uh->dest = inet->inet_dport;
1145 uh->len = htons(udp_len);
1146 uh->check = 0;
1147
1148 /* Debug */
1149 if (session->send_seq)
1150 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1151 "%s: send %d bytes, ns=%hu\n", session->name,
1152 data_len, session->ns - 1);
1153 else
1154 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1155 "%s: send %d bytes\n", session->name, data_len);
1156
1157 if (session->debug & PPPOL2TP_MSG_DATA) {
1158 int i;
1159 unsigned char *datap = skb->data;
1160
1161 printk(KERN_DEBUG "%s: xmit:", session->name);
1162 for (i = 0; i < data_len; i++) {
1163 printk(" %02X", *datap++);
1164 if (i == 31) {
1165 printk(" ...");
1166 break;
1167 }
1168 }
1169 printk("\n");
1170 }
1171
1172 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1173 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1174 IPSKB_REROUTED);
1175 nf_reset(skb);
1176
1177 /* Get routing info from the tunnel socket */
1178 skb_dst_drop(skb);
1179 skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
1180 pppol2tp_skb_set_owner_w(skb, sk_tun);
1181
1182 /* Calculate UDP checksum if configured to do so */
1183 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1184 skb->ip_summed = CHECKSUM_NONE;
1185 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1186 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1187 skb->ip_summed = CHECKSUM_COMPLETE;
1188 csum = skb_checksum(skb, 0, udp_len, 0);
1189 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1190 inet->inet_daddr,
1191 udp_len, IPPROTO_UDP, csum);
1192 if (uh->check == 0)
1193 uh->check = CSUM_MANGLED_0;
1194 } else {
1195 skb->ip_summed = CHECKSUM_PARTIAL;
1196 skb->csum_start = skb_transport_header(skb) - skb->head;
1197 skb->csum_offset = offsetof(struct udphdr, check);
1198 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1199 inet->inet_daddr,
1200 udp_len, IPPROTO_UDP, 0);
1201 }
1202
1203 /* Queue the packet to IP for output */
1204 len = skb->len;
1205 rc = ip_queue_xmit(skb, 1);
1206
1207 /* Update stats */
1208 if (rc >= 0) {
1209 tunnel->stats.tx_packets++;
1210 tunnel->stats.tx_bytes += len;
1211 session->stats.tx_packets++;
1212 session->stats.tx_bytes += len;
1213 } else {
1214 tunnel->stats.tx_errors++;
1215 session->stats.tx_errors++;
1216 }
1217
1218 sock_put(sk_tun);
1219 sock_put(sk);
1220 return 1;
1221
1222abort_put_sess_tun:
1223 sock_put(sk_tun);
1224abort_put_sess:
1225 sock_put(sk);
1226abort:
1227 /* Free the original skb */
1228 kfree_skb(skb);
1229 return 1;
1230}
1231
1232/*****************************************************************************
1233 * Session (and tunnel control) socket create/destroy.
1234 *****************************************************************************/
1235
1236/* When the tunnel UDP socket is closed, all the attached sockets need to go
1237 * too.
1238 */
1239static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1240{
1241 int hash;
1242 struct hlist_node *walk;
1243 struct hlist_node *tmp;
1244 struct pppol2tp_session *session;
1245 struct sock *sk;
1246
1247 BUG_ON(tunnel == NULL);
1248
1249 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1250 "%s: closing all sessions...\n", tunnel->name);
1251
1252 write_lock_bh(&tunnel->hlist_lock);
1253 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1254again:
1255 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1256 struct sk_buff *skb;
1257
1258 session = hlist_entry(walk, struct pppol2tp_session, hlist);
1259
1260 sk = session->sock;
1261
1262 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1263 "%s: closing session\n", session->name);
1264
1265 hlist_del_init(&session->hlist);
1266
1267 /* Since we should hold the sock lock while
1268 * doing any unbinding, we need to release the
1269 * lock we're holding before taking that lock.
1270 * Hold a reference to the sock so it doesn't
1271 * disappear as we're jumping between locks.
1272 */
1273 sock_hold(sk);
1274 write_unlock_bh(&tunnel->hlist_lock);
1275 lock_sock(sk);
1276
1277 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
1278 pppox_unbind_sock(sk);
1279 sk->sk_state = PPPOX_DEAD;
1280 sk->sk_state_change(sk);
1281 }
1282
1283 /* Purge any queued data */
1284 skb_queue_purge(&sk->sk_receive_queue);
1285 skb_queue_purge(&sk->sk_write_queue);
1286 while ((skb = skb_dequeue(&session->reorder_q))) {
1287 kfree_skb(skb);
1288 sock_put(sk);
1289 }
1290
1291 release_sock(sk);
1292 sock_put(sk);
1293
1294 /* Now restart from the beginning of this hash
1295 * chain. We always remove a session from the
1296 * list so we are guaranteed to make forward
1297 * progress.
1298 */
1299 write_lock_bh(&tunnel->hlist_lock);
1300 goto again;
1301 }
1302 }
1303 write_unlock_bh(&tunnel->hlist_lock);
1304}
1305
1306/* Really kill the tunnel.
1307 * Come here only when all sessions have been cleared from the tunnel.
1308 */
1309static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1310{
1311 struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
1312
1313 /* Remove from socket list */
1314 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1315 list_del_init(&tunnel->list);
1316 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1317
1318 atomic_dec(&pppol2tp_tunnel_count);
1319 kfree(tunnel);
1320}
1321
1322/* Tunnel UDP socket destruct hook.
1323 * The tunnel context is deleted only when all session sockets have been
1324 * closed.
1325 */
1326static void pppol2tp_tunnel_destruct(struct sock *sk)
1327{
1328 struct pppol2tp_tunnel *tunnel;
1329
1330 tunnel = sk->sk_user_data;
1331 if (tunnel == NULL)
1332 goto end;
1333
1334 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1335 "%s: closing...\n", tunnel->name);
1336
1337 /* Close all sessions */
1338 pppol2tp_tunnel_closeall(tunnel);
1339
1340 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1341 (udp_sk(sk))->encap_type = 0;
1342 (udp_sk(sk))->encap_rcv = NULL;
1343
1344 /* Remove hooks into tunnel socket */
1345 tunnel->sock = NULL;
1346 sk->sk_destruct = tunnel->old_sk_destruct;
1347 sk->sk_user_data = NULL;
1348
1349 /* Call original (UDP) socket descructor */
1350 if (sk->sk_destruct != NULL)
1351 (*sk->sk_destruct)(sk);
1352
1353 pppol2tp_tunnel_dec_refcount(tunnel);
1354
1355end:
1356 return;
1357}
1358
1359/* Really kill the session socket. (Called from sock_put() if
1360 * refcnt == 0.)
1361 */
1362static void pppol2tp_session_destruct(struct sock *sk)
1363{
1364 struct pppol2tp_session *session = NULL;
1365
1366 if (sk->sk_user_data != NULL) {
1367 struct pppol2tp_tunnel *tunnel;
1368
1369 session = sk->sk_user_data;
1370 if (session == NULL)
1371 goto out;
1372
1373 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
1374
1375 /* Don't use pppol2tp_sock_to_tunnel() here to
1376 * get the tunnel context because the tunnel
1377 * socket might have already been closed (its
1378 * sk->sk_user_data will be NULL) so use the
1379 * session's private tunnel ptr instead.
1380 */
1381 tunnel = session->tunnel;
1382 if (tunnel != NULL) {
1383 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1384
1385 /* If session_id is zero, this is a null
1386 * session context, which was created for a
1387 * socket that is being used only to manage
1388 * tunnels.
1389 */
1390 if (session->tunnel_addr.s_session != 0) {
1391 /* Delete the session socket from the
1392 * hash
1393 */
1394 write_lock_bh(&tunnel->hlist_lock);
1395 hlist_del_init(&session->hlist);
1396 write_unlock_bh(&tunnel->hlist_lock);
1397
1398 atomic_dec(&pppol2tp_session_count);
1399 }
1400
1401 /* This will delete the tunnel context if this
1402 * is the last session on the tunnel.
1403 */
1404 session->tunnel = NULL;
1405 session->tunnel_sock = NULL;
1406 pppol2tp_tunnel_dec_refcount(tunnel);
1407 }
1408 }
1409
1410 kfree(session);
1411out:
1412 return;
1413}
1414
1415/* Called when the PPPoX socket (session) is closed.
1416 */
1417static int pppol2tp_release(struct socket *sock)
1418{
1419 struct sock *sk = sock->sk;
1420 struct pppol2tp_session *session;
1421 int error;
1422
1423 if (!sk)
1424 return 0;
1425
1426 error = -EBADF;
1427 lock_sock(sk);
1428 if (sock_flag(sk, SOCK_DEAD) != 0)
1429 goto error;
1430
1431 pppox_unbind_sock(sk);
1432
1433 /* Signal the death of the socket. */
1434 sk->sk_state = PPPOX_DEAD;
1435 sock_orphan(sk);
1436 sock->sk = NULL;
1437
1438 session = pppol2tp_sock_to_session(sk);
1439
1440 /* Purge any queued data */
1441 skb_queue_purge(&sk->sk_receive_queue);
1442 skb_queue_purge(&sk->sk_write_queue);
1443 if (session != NULL) {
1444 struct sk_buff *skb;
1445 while ((skb = skb_dequeue(&session->reorder_q))) {
1446 kfree_skb(skb);
1447 sock_put(sk);
1448 }
1449 sock_put(sk);
1450 }
1451
1452 release_sock(sk);
1453
1454 /* This will delete the session context via
1455 * pppol2tp_session_destruct() if the socket's refcnt drops to
1456 * zero.
1457 */
1458 sock_put(sk);
1459
1460 return 0;
1461
1462error:
1463 release_sock(sk);
1464 return error;
1465}
1466
1467/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1468 * sockets attached to it.
1469 */
1470static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1471 int fd, u16 tunnel_id, int *error)
1472{
1473 int err;
1474 struct socket *sock = NULL;
1475 struct sock *sk;
1476 struct pppol2tp_tunnel *tunnel;
1477 struct pppol2tp_net *pn;
1478 struct sock *ret = NULL;
1479
1480 /* Get the tunnel UDP socket from the fd, which was opened by
1481 * the userspace L2TP daemon.
1482 */
1483 err = -EBADF;
1484 sock = sockfd_lookup(fd, &err);
1485 if (!sock) {
1486 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1487 "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1488 tunnel_id, fd, err);
1489 goto err;
1490 }
1491
1492 sk = sock->sk;
1493
1494 /* Quick sanity checks */
1495 err = -EPROTONOSUPPORT;
1496 if (sk->sk_protocol != IPPROTO_UDP) {
1497 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1498 "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1499 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1500 goto err;
1501 }
1502 err = -EAFNOSUPPORT;
1503 if (sock->ops->family != AF_INET) {
1504 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1505 "tunl %hu: fd %d wrong family, got %d, expected %d\n",
1506 tunnel_id, fd, sock->ops->family, AF_INET);
1507 goto err;
1508 }
1509
1510 err = -ENOTCONN;
1511
1512 /* Check if this socket has already been prepped */
1513 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
1514 if (tunnel != NULL) {
1515 /* User-data field already set */
1516 err = -EBUSY;
1517 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1518
1519 /* This socket has already been prepped */
1520 ret = tunnel->sock;
1521 goto out;
1522 }
1523
1524 /* This socket is available and needs prepping. Create a new tunnel
1525 * context and init it.
1526 */
1527 sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
1528 if (sk->sk_user_data == NULL) {
1529 err = -ENOMEM;
1530 goto err;
1531 }
1532
1533 tunnel->magic = L2TP_TUNNEL_MAGIC;
1534 sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
1535
1536 tunnel->stats.tunnel_id = tunnel_id;
1537 tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
1538
1539 /* Hook on the tunnel socket destructor so that we can cleanup
1540 * if the tunnel socket goes away.
1541 */
1542 tunnel->old_sk_destruct = sk->sk_destruct;
1543 sk->sk_destruct = pppol2tp_tunnel_destruct;
1544
1545 tunnel->sock = sk;
1546 sk->sk_allocation = GFP_ATOMIC;
1547
1548 /* Misc init */
1549 rwlock_init(&tunnel->hlist_lock);
1550
1551 /* The net we belong to */
1552 tunnel->pppol2tp_net = net;
1553 pn = pppol2tp_pernet(net);
1554
1555 /* Add tunnel to our list */
1556 INIT_LIST_HEAD(&tunnel->list);
1557 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1558 list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
1559 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1560 atomic_inc(&pppol2tp_tunnel_count);
1561
1562 /* Bump the reference count. The tunnel context is deleted
1563 * only when this drops to zero.
1564 */
1565 pppol2tp_tunnel_inc_refcount(tunnel);
1566
1567 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1568 (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
1569 (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
1570
1571 ret = tunnel->sock;
1572
1573 *error = 0;
1574out:
1575 if (sock)
1576 sockfd_put(sock);
1577
1578 return ret;
1579
1580err:
1581 *error = err;
1582 goto out;
1583}
1584
1585static struct proto pppol2tp_sk_proto = {
1586 .name = "PPPOL2TP",
1587 .owner = THIS_MODULE,
1588 .obj_size = sizeof(struct pppox_sock),
1589};
1590
1591/* socket() handler. Initialize a new struct sock.
1592 */
1593static int pppol2tp_create(struct net *net, struct socket *sock)
1594{
1595 int error = -ENOMEM;
1596 struct sock *sk;
1597
1598 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
1599 if (!sk)
1600 goto out;
1601
1602 sock_init_data(sock, sk);
1603
1604 sock->state = SS_UNCONNECTED;
1605 sock->ops = &pppol2tp_ops;
1606
1607 sk->sk_backlog_rcv = pppol2tp_recv_core;
1608 sk->sk_protocol = PX_PROTO_OL2TP;
1609 sk->sk_family = PF_PPPOX;
1610 sk->sk_state = PPPOX_NONE;
1611 sk->sk_type = SOCK_STREAM;
1612 sk->sk_destruct = pppol2tp_session_destruct;
1613
1614 error = 0;
1615
1616out:
1617 return error;
1618}
1619
1620/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
1621 */
1622static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1623 int sockaddr_len, int flags)
1624{
1625 struct sock *sk = sock->sk;
1626 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
1627 struct pppox_sock *po = pppox_sk(sk);
1628 struct sock *tunnel_sock = NULL;
1629 struct pppol2tp_session *session = NULL;
1630 struct pppol2tp_tunnel *tunnel;
1631 struct dst_entry *dst;
1632 int error = 0;
1633
1634 lock_sock(sk);
1635
1636 error = -EINVAL;
1637 if (sp->sa_protocol != PX_PROTO_OL2TP)
1638 goto end;
1639
1640 /* Check for already bound sockets */
1641 error = -EBUSY;
1642 if (sk->sk_state & PPPOX_CONNECTED)
1643 goto end;
1644
1645 /* We don't supporting rebinding anyway */
1646 error = -EALREADY;
1647 if (sk->sk_user_data)
1648 goto end; /* socket is already attached */
1649
1650 /* Don't bind if s_tunnel is 0 */
1651 error = -EINVAL;
1652 if (sp->pppol2tp.s_tunnel == 0)
1653 goto end;
1654
1655 /* Special case: prepare tunnel socket if s_session and
1656 * d_session is 0. Otherwise look up tunnel using supplied
1657 * tunnel id.
1658 */
1659 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1660 tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
1661 sp->pppol2tp.fd,
1662 sp->pppol2tp.s_tunnel,
1663 &error);
1664 if (tunnel_sock == NULL)
1665 goto end;
1666
1667 sock_hold(tunnel_sock);
1668 tunnel = tunnel_sock->sk_user_data;
1669 } else {
1670 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1671
1672 /* Error if we can't find the tunnel */
1673 error = -ENOENT;
1674 if (tunnel == NULL)
1675 goto end;
1676
1677 tunnel_sock = tunnel->sock;
1678 }
1679
1680 /* Check that this session doesn't already exist */
1681 error = -EEXIST;
1682 session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
1683 if (session != NULL)
1684 goto end;
1685
1686 /* Allocate and initialize a new session context. */
1687 session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
1688 if (session == NULL) {
1689 error = -ENOMEM;
1690 goto end;
1691 }
1692
1693 skb_queue_head_init(&session->reorder_q);
1694
1695 session->magic = L2TP_SESSION_MAGIC;
1696 session->owner = current->pid;
1697 session->sock = sk;
1698 session->tunnel = tunnel;
1699 session->tunnel_sock = tunnel_sock;
1700 session->tunnel_addr = sp->pppol2tp;
1701 sprintf(&session->name[0], "sess %hu/%hu",
1702 session->tunnel_addr.s_tunnel,
1703 session->tunnel_addr.s_session);
1704
1705 session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
1706 session->stats.session_id = session->tunnel_addr.s_session;
1707
1708 INIT_HLIST_NODE(&session->hlist);
1709
1710 /* Inherit debug options from tunnel */
1711 session->debug = tunnel->debug;
1712
1713 /* Default MTU must allow space for UDP/L2TP/PPP
1714 * headers.
1715 */
1716 session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
1717
1718 /* If PMTU discovery was enabled, use the MTU that was discovered */
1719 dst = sk_dst_get(sk);
1720 if (dst != NULL) {
1721 u32 pmtu = dst_mtu(__sk_dst_get(sk));
1722 if (pmtu != 0)
1723 session->mtu = session->mru = pmtu -
1724 PPPOL2TP_HEADER_OVERHEAD;
1725 dst_release(dst);
1726 }
1727
1728 /* Special case: if source & dest session_id == 0x0000, this socket is
1729 * being created to manage the tunnel. Don't add the session to the
1730 * session hash list, just set up the internal context for use by
1731 * ioctl() and sockopt() handlers.
1732 */
1733 if ((session->tunnel_addr.s_session == 0) &&
1734 (session->tunnel_addr.d_session == 0)) {
1735 error = 0;
1736 sk->sk_user_data = session;
1737 goto out_no_ppp;
1738 }
1739
1740 /* Get tunnel context from the tunnel socket */
1741 tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
1742 if (tunnel == NULL) {
1743 error = -EBADF;
1744 goto end;
1745 }
1746
1747 /* Right now, because we don't have a way to push the incoming skb's
1748 * straight through the UDP layer, the only header we need to worry
1749 * about is the L2TP header. This size is different depending on
1750 * whether sequence numbers are enabled for the data channel.
1751 */
1752 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1753
1754 po->chan.private = sk;
1755 po->chan.ops = &pppol2tp_chan_ops;
1756 po->chan.mtu = session->mtu;
1757
1758 error = ppp_register_net_channel(sock_net(sk), &po->chan);
1759 if (error)
1760 goto end_put_tun;
1761
1762 /* This is how we get the session context from the socket. */
1763 sk->sk_user_data = session;
1764
1765 /* Add session to the tunnel's hash list */
1766 write_lock_bh(&tunnel->hlist_lock);
1767 hlist_add_head(&session->hlist,
1768 pppol2tp_session_id_hash(tunnel,
1769 session->tunnel_addr.s_session));
1770 write_unlock_bh(&tunnel->hlist_lock);
1771
1772 atomic_inc(&pppol2tp_session_count);
1773
1774out_no_ppp:
1775 pppol2tp_tunnel_inc_refcount(tunnel);
1776 sk->sk_state = PPPOX_CONNECTED;
1777 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1778 "%s: created\n", session->name);
1779
1780end_put_tun:
1781 sock_put(tunnel_sock);
1782end:
1783 release_sock(sk);
1784
1785 if (error != 0) {
1786 if (session)
1787 PRINTK(session->debug,
1788 PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1789 "%s: connect failed: %d\n",
1790 session->name, error);
1791 else
1792 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1793 "connect failed: %d\n", error);
1794 }
1795
1796 return error;
1797}
1798
1799/* getname() support.
1800 */
1801static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
1802 int *usockaddr_len, int peer)
1803{
1804 int len = sizeof(struct sockaddr_pppol2tp);
1805 struct sockaddr_pppol2tp sp;
1806 int error = 0;
1807 struct pppol2tp_session *session;
1808
1809 error = -ENOTCONN;
1810 if (sock->sk->sk_state != PPPOX_CONNECTED)
1811 goto end;
1812
1813 session = pppol2tp_sock_to_session(sock->sk);
1814 if (session == NULL) {
1815 error = -EBADF;
1816 goto end;
1817 }
1818
1819 sp.sa_family = AF_PPPOX;
1820 sp.sa_protocol = PX_PROTO_OL2TP;
1821 memcpy(&sp.pppol2tp, &session->tunnel_addr,
1822 sizeof(struct pppol2tp_addr));
1823
1824 memcpy(uaddr, &sp, len);
1825
1826 *usockaddr_len = len;
1827
1828 error = 0;
1829 sock_put(sock->sk);
1830
1831end:
1832 return error;
1833}
1834
1835/****************************************************************************
1836 * ioctl() handlers.
1837 *
1838 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1839 * sockets. However, in order to control kernel tunnel features, we allow
1840 * userspace to create a special "tunnel" PPPoX socket which is used for
1841 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
1842 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
1843 * calls.
1844 ****************************************************************************/
1845
1846/* Session ioctl helper.
1847 */
1848static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
1849 unsigned int cmd, unsigned long arg)
1850{
1851 struct ifreq ifr;
1852 int err = 0;
1853 struct sock *sk = session->sock;
1854 int val = (int) arg;
1855
1856 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1857 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
1858 session->name, cmd, arg);
1859
1860 sock_hold(sk);
1861
1862 switch (cmd) {
1863 case SIOCGIFMTU:
1864 err = -ENXIO;
1865 if (!(sk->sk_state & PPPOX_CONNECTED))
1866 break;
1867
1868 err = -EFAULT;
1869 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1870 break;
1871 ifr.ifr_mtu = session->mtu;
1872 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1873 break;
1874
1875 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1876 "%s: get mtu=%d\n", session->name, session->mtu);
1877 err = 0;
1878 break;
1879
1880 case SIOCSIFMTU:
1881 err = -ENXIO;
1882 if (!(sk->sk_state & PPPOX_CONNECTED))
1883 break;
1884
1885 err = -EFAULT;
1886 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1887 break;
1888
1889 session->mtu = ifr.ifr_mtu;
1890
1891 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1892 "%s: set mtu=%d\n", session->name, session->mtu);
1893 err = 0;
1894 break;
1895
1896 case PPPIOCGMRU:
1897 err = -ENXIO;
1898 if (!(sk->sk_state & PPPOX_CONNECTED))
1899 break;
1900
1901 err = -EFAULT;
1902 if (put_user(session->mru, (int __user *) arg))
1903 break;
1904
1905 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1906 "%s: get mru=%d\n", session->name, session->mru);
1907 err = 0;
1908 break;
1909
1910 case PPPIOCSMRU:
1911 err = -ENXIO;
1912 if (!(sk->sk_state & PPPOX_CONNECTED))
1913 break;
1914
1915 err = -EFAULT;
1916 if (get_user(val,(int __user *) arg))
1917 break;
1918
1919 session->mru = val;
1920 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1921 "%s: set mru=%d\n", session->name, session->mru);
1922 err = 0;
1923 break;
1924
1925 case PPPIOCGFLAGS:
1926 err = -EFAULT;
1927 if (put_user(session->flags, (int __user *) arg))
1928 break;
1929
1930 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1931 "%s: get flags=%d\n", session->name, session->flags);
1932 err = 0;
1933 break;
1934
1935 case PPPIOCSFLAGS:
1936 err = -EFAULT;
1937 if (get_user(val, (int __user *) arg))
1938 break;
1939 session->flags = val;
1940 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1941 "%s: set flags=%d\n", session->name, session->flags);
1942 err = 0;
1943 break;
1944
1945 case PPPIOCGL2TPSTATS:
1946 err = -ENXIO;
1947 if (!(sk->sk_state & PPPOX_CONNECTED))
1948 break;
1949
1950 if (copy_to_user((void __user *) arg, &session->stats,
1951 sizeof(session->stats)))
1952 break;
1953 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1954 "%s: get L2TP stats\n", session->name);
1955 err = 0;
1956 break;
1957
1958 default:
1959 err = -ENOSYS;
1960 break;
1961 }
1962
1963 sock_put(sk);
1964
1965 return err;
1966}
1967
1968/* Tunnel ioctl helper.
1969 *
1970 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1971 * specifies a session_id, the session ioctl handler is called. This allows an
1972 * application to retrieve session stats via a tunnel socket.
1973 */
1974static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
1975 unsigned int cmd, unsigned long arg)
1976{
1977 int err = 0;
1978 struct sock *sk = tunnel->sock;
1979 struct pppol2tp_ioc_stats stats_req;
1980
1981 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1982 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
1983 cmd, arg);
1984
1985 sock_hold(sk);
1986
1987 switch (cmd) {
1988 case PPPIOCGL2TPSTATS:
1989 err = -ENXIO;
1990 if (!(sk->sk_state & PPPOX_CONNECTED))
1991 break;
1992
1993 if (copy_from_user(&stats_req, (void __user *) arg,
1994 sizeof(stats_req))) {
1995 err = -EFAULT;
1996 break;
1997 }
1998 if (stats_req.session_id != 0) {
1999 /* resend to session ioctl handler */
2000 struct pppol2tp_session *session =
2001 pppol2tp_session_find(tunnel, stats_req.session_id);
2002 if (session != NULL)
2003 err = pppol2tp_session_ioctl(session, cmd, arg);
2004 else
2005 err = -EBADR;
2006 break;
2007 }
2008#ifdef CONFIG_XFRM
2009 tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
2010#endif
2011 if (copy_to_user((void __user *) arg, &tunnel->stats,
2012 sizeof(tunnel->stats))) {
2013 err = -EFAULT;
2014 break;
2015 }
2016 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2017 "%s: get L2TP stats\n", tunnel->name);
2018 err = 0;
2019 break;
2020
2021 default:
2022 err = -ENOSYS;
2023 break;
2024 }
2025
2026 sock_put(sk);
2027
2028 return err;
2029}
2030
2031/* Main ioctl() handler.
2032 * Dispatch to tunnel or session helpers depending on the socket.
2033 */
2034static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
2035 unsigned long arg)
2036{
2037 struct sock *sk = sock->sk;
2038 struct pppol2tp_session *session;
2039 struct pppol2tp_tunnel *tunnel;
2040 int err;
2041
2042 if (!sk)
2043 return 0;
2044
2045 err = -EBADF;
2046 if (sock_flag(sk, SOCK_DEAD) != 0)
2047 goto end;
2048
2049 err = -ENOTCONN;
2050 if ((sk->sk_user_data == NULL) ||
2051 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
2052 goto end;
2053
2054 /* Get session context from the socket */
2055 err = -EBADF;
2056 session = pppol2tp_sock_to_session(sk);
2057 if (session == NULL)
2058 goto end;
2059
2060 /* Special case: if session's session_id is zero, treat ioctl as a
2061 * tunnel ioctl
2062 */
2063 if ((session->tunnel_addr.s_session == 0) &&
2064 (session->tunnel_addr.d_session == 0)) {
2065 err = -EBADF;
2066 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2067 if (tunnel == NULL)
2068 goto end_put_sess;
2069
2070 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
2071 sock_put(session->tunnel_sock);
2072 goto end_put_sess;
2073 }
2074
2075 err = pppol2tp_session_ioctl(session, cmd, arg);
2076
2077end_put_sess:
2078 sock_put(sk);
2079end:
2080 return err;
2081}
2082
2083/*****************************************************************************
2084 * setsockopt() / getsockopt() support.
2085 *
2086 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
2087 * sockets. In order to control kernel tunnel features, we allow userspace to
2088 * create a special "tunnel" PPPoX socket which is used for control only.
2089 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
2090 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
2091 *****************************************************************************/
2092
2093/* Tunnel setsockopt() helper.
2094 */
2095static int pppol2tp_tunnel_setsockopt(struct sock *sk,
2096 struct pppol2tp_tunnel *tunnel,
2097 int optname, int val)
2098{
2099 int err = 0;
2100
2101 switch (optname) {
2102 case PPPOL2TP_SO_DEBUG:
2103 tunnel->debug = val;
2104 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2105 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
2106 break;
2107
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2111 }
2112
2113 return err;
2114}
2115
2116/* Session setsockopt helper.
2117 */
2118static int pppol2tp_session_setsockopt(struct sock *sk,
2119 struct pppol2tp_session *session,
2120 int optname, int val)
2121{
2122 int err = 0;
2123
2124 switch (optname) {
2125 case PPPOL2TP_SO_RECVSEQ:
2126 if ((val != 0) && (val != 1)) {
2127 err = -EINVAL;
2128 break;
2129 }
2130 session->recv_seq = val ? -1 : 0;
2131 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2132 "%s: set recv_seq=%d\n", session->name,
2133 session->recv_seq);
2134 break;
2135
2136 case PPPOL2TP_SO_SENDSEQ:
2137 if ((val != 0) && (val != 1)) {
2138 err = -EINVAL;
2139 break;
2140 }
2141 session->send_seq = val ? -1 : 0;
2142 {
2143 struct sock *ssk = session->sock;
2144 struct pppox_sock *po = pppox_sk(ssk);
2145 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
2146 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
2147 }
2148 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2149 "%s: set send_seq=%d\n", session->name, session->send_seq);
2150 break;
2151
2152 case PPPOL2TP_SO_LNSMODE:
2153 if ((val != 0) && (val != 1)) {
2154 err = -EINVAL;
2155 break;
2156 }
2157 session->lns_mode = val ? -1 : 0;
2158 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2159 "%s: set lns_mode=%d\n", session->name,
2160 session->lns_mode);
2161 break;
2162
2163 case PPPOL2TP_SO_DEBUG:
2164 session->debug = val;
2165 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2166 "%s: set debug=%x\n", session->name, session->debug);
2167 break;
2168
2169 case PPPOL2TP_SO_REORDERTO:
2170 session->reorder_timeout = msecs_to_jiffies(val);
2171 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2172 "%s: set reorder_timeout=%d\n", session->name,
2173 session->reorder_timeout);
2174 break;
2175
2176 default:
2177 err = -ENOPROTOOPT;
2178 break;
2179 }
2180
2181 return err;
2182}
2183
2184/* Main setsockopt() entry point.
2185 * Does API checks, then calls either the tunnel or session setsockopt
2186 * handler, according to whether the PPPoL2TP socket is a for a regular
2187 * session or the special tunnel type.
2188 */
2189static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2190 char __user *optval, unsigned int optlen)
2191{
2192 struct sock *sk = sock->sk;
2193 struct pppol2tp_session *session = sk->sk_user_data;
2194 struct pppol2tp_tunnel *tunnel;
2195 int val;
2196 int err;
2197
2198 if (level != SOL_PPPOL2TP)
2199 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
2200
2201 if (optlen < sizeof(int))
2202 return -EINVAL;
2203
2204 if (get_user(val, (int __user *)optval))
2205 return -EFAULT;
2206
2207 err = -ENOTCONN;
2208 if (sk->sk_user_data == NULL)
2209 goto end;
2210
2211 /* Get session context from the socket */
2212 err = -EBADF;
2213 session = pppol2tp_sock_to_session(sk);
2214 if (session == NULL)
2215 goto end;
2216
2217 /* Special case: if session_id == 0x0000, treat as operation on tunnel
2218 */
2219 if ((session->tunnel_addr.s_session == 0) &&
2220 (session->tunnel_addr.d_session == 0)) {
2221 err = -EBADF;
2222 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2223 if (tunnel == NULL)
2224 goto end_put_sess;
2225
2226 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
2227 sock_put(session->tunnel_sock);
2228 } else
2229 err = pppol2tp_session_setsockopt(sk, session, optname, val);
2230
2231 err = 0;
2232
2233end_put_sess:
2234 sock_put(sk);
2235end:
2236 return err;
2237}
2238
2239/* Tunnel getsockopt helper. Called with sock locked.
2240 */
2241static int pppol2tp_tunnel_getsockopt(struct sock *sk,
2242 struct pppol2tp_tunnel *tunnel,
2243 int optname, int *val)
2244{
2245 int err = 0;
2246
2247 switch (optname) {
2248 case PPPOL2TP_SO_DEBUG:
2249 *val = tunnel->debug;
2250 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2251 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
2252 break;
2253
2254 default:
2255 err = -ENOPROTOOPT;
2256 break;
2257 }
2258
2259 return err;
2260}
2261
2262/* Session getsockopt helper. Called with sock locked.
2263 */
2264static int pppol2tp_session_getsockopt(struct sock *sk,
2265 struct pppol2tp_session *session,
2266 int optname, int *val)
2267{
2268 int err = 0;
2269
2270 switch (optname) {
2271 case PPPOL2TP_SO_RECVSEQ:
2272 *val = session->recv_seq;
2273 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2274 "%s: get recv_seq=%d\n", session->name, *val);
2275 break;
2276
2277 case PPPOL2TP_SO_SENDSEQ:
2278 *val = session->send_seq;
2279 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2280 "%s: get send_seq=%d\n", session->name, *val);
2281 break;
2282
2283 case PPPOL2TP_SO_LNSMODE:
2284 *val = session->lns_mode;
2285 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2286 "%s: get lns_mode=%d\n", session->name, *val);
2287 break;
2288
2289 case PPPOL2TP_SO_DEBUG:
2290 *val = session->debug;
2291 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2292 "%s: get debug=%d\n", session->name, *val);
2293 break;
2294
2295 case PPPOL2TP_SO_REORDERTO:
2296 *val = (int) jiffies_to_msecs(session->reorder_timeout);
2297 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2298 "%s: get reorder_timeout=%d\n", session->name, *val);
2299 break;
2300
2301 default:
2302 err = -ENOPROTOOPT;
2303 }
2304
2305 return err;
2306}
2307
2308/* Main getsockopt() entry point.
2309 * Does API checks, then calls either the tunnel or session getsockopt
2310 * handler, according to whether the PPPoX socket is a for a regular session
2311 * or the special tunnel type.
2312 */
2313static int pppol2tp_getsockopt(struct socket *sock, int level,
2314 int optname, char __user *optval, int __user *optlen)
2315{
2316 struct sock *sk = sock->sk;
2317 struct pppol2tp_session *session = sk->sk_user_data;
2318 struct pppol2tp_tunnel *tunnel;
2319 int val, len;
2320 int err;
2321
2322 if (level != SOL_PPPOL2TP)
2323 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
2324
2325 if (get_user(len, (int __user *) optlen))
2326 return -EFAULT;
2327
2328 len = min_t(unsigned int, len, sizeof(int));
2329
2330 if (len < 0)
2331 return -EINVAL;
2332
2333 err = -ENOTCONN;
2334 if (sk->sk_user_data == NULL)
2335 goto end;
2336
2337 /* Get the session context */
2338 err = -EBADF;
2339 session = pppol2tp_sock_to_session(sk);
2340 if (session == NULL)
2341 goto end;
2342
2343 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
2344 if ((session->tunnel_addr.s_session == 0) &&
2345 (session->tunnel_addr.d_session == 0)) {
2346 err = -EBADF;
2347 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2348 if (tunnel == NULL)
2349 goto end_put_sess;
2350
2351 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
2352 sock_put(session->tunnel_sock);
2353 } else
2354 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
2355
2356 err = -EFAULT;
2357 if (put_user(len, (int __user *) optlen))
2358 goto end_put_sess;
2359
2360 if (copy_to_user((void __user *) optval, &val, len))
2361 goto end_put_sess;
2362
2363 err = 0;
2364
2365end_put_sess:
2366 sock_put(sk);
2367end:
2368 return err;
2369}
2370
2371/*****************************************************************************
2372 * /proc filesystem for debug
2373 *****************************************************************************/
2374
2375#ifdef CONFIG_PROC_FS
2376
2377#include <linux/seq_file.h>
2378
2379struct pppol2tp_seq_data {
2380 struct seq_net_private p;
2381 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2382 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2383};
2384
2385static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
2386{
2387 struct pppol2tp_session *session = NULL;
2388 struct hlist_node *walk;
2389 int found = 0;
2390 int next = 0;
2391 int i;
2392
2393 read_lock_bh(&tunnel->hlist_lock);
2394 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
2395 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
2396 if (curr == NULL) {
2397 found = 1;
2398 goto out;
2399 }
2400 if (session == curr) {
2401 next = 1;
2402 continue;
2403 }
2404 if (next) {
2405 found = 1;
2406 goto out;
2407 }
2408 }
2409 }
2410out:
2411 read_unlock_bh(&tunnel->hlist_lock);
2412 if (!found)
2413 session = NULL;
2414
2415 return session;
2416}
2417
2418static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
2419 struct pppol2tp_tunnel *curr)
2420{
2421 struct pppol2tp_tunnel *tunnel = NULL;
2422
2423 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
2424 if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
2425 goto out;
2426 }
2427 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2428out:
2429 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
2430
2431 return tunnel;
2432}
2433
2434static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2435{
2436 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2437 struct pppol2tp_net *pn;
2438 loff_t pos = *offs;
2439
2440 if (!pos)
2441 goto out;
2442
2443 BUG_ON(m->private == NULL);
2444 pd = m->private;
2445 pn = pppol2tp_pernet(seq_file_net(m));
2446
2447 if (pd->tunnel == NULL) {
2448 if (!list_empty(&pn->pppol2tp_tunnel_list))
2449 pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2450 } else {
2451 pd->session = next_session(pd->tunnel, pd->session);
2452 if (pd->session == NULL) {
2453 pd->tunnel = next_tunnel(pn, pd->tunnel);
2454 }
2455 }
2456
2457 /* NULL tunnel and session indicates end of list */
2458 if ((pd->tunnel == NULL) && (pd->session == NULL))
2459 pd = NULL;
2460
2461out:
2462 return pd;
2463}
2464
2465static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
2466{
2467 (*pos)++;
2468 return NULL;
2469}
2470
2471static void pppol2tp_seq_stop(struct seq_file *p, void *v)
2472{
2473 /* nothing to do */
2474}
2475
2476static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
2477{
2478 struct pppol2tp_tunnel *tunnel = v;
2479
2480 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
2481 tunnel->name,
2482 (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
2483 atomic_read(&tunnel->ref_count) - 1);
2484 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
2485 tunnel->debug,
2486 (unsigned long long)tunnel->stats.tx_packets,
2487 (unsigned long long)tunnel->stats.tx_bytes,
2488 (unsigned long long)tunnel->stats.tx_errors,
2489 (unsigned long long)tunnel->stats.rx_packets,
2490 (unsigned long long)tunnel->stats.rx_bytes,
2491 (unsigned long long)tunnel->stats.rx_errors);
2492}
2493
2494static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2495{
2496 struct pppol2tp_session *session = v;
2497
2498 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
2499 "%04X/%04X %d %c\n",
2500 session->name,
2501 ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
2502 ntohs(session->tunnel_addr.addr.sin_port),
2503 session->tunnel_addr.s_tunnel,
2504 session->tunnel_addr.s_session,
2505 session->tunnel_addr.d_tunnel,
2506 session->tunnel_addr.d_session,
2507 session->sock->sk_state,
2508 (session == session->sock->sk_user_data) ?
2509 'Y' : 'N');
2510 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
2511 session->mtu, session->mru,
2512 session->recv_seq ? 'R' : '-',
2513 session->send_seq ? 'S' : '-',
2514 session->lns_mode ? "LNS" : "LAC",
2515 session->debug,
2516 jiffies_to_msecs(session->reorder_timeout));
2517 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
2518 session->nr, session->ns,
2519 (unsigned long long)session->stats.tx_packets,
2520 (unsigned long long)session->stats.tx_bytes,
2521 (unsigned long long)session->stats.tx_errors,
2522 (unsigned long long)session->stats.rx_packets,
2523 (unsigned long long)session->stats.rx_bytes,
2524 (unsigned long long)session->stats.rx_errors);
2525}
2526
2527static int pppol2tp_seq_show(struct seq_file *m, void *v)
2528{
2529 struct pppol2tp_seq_data *pd = v;
2530
2531 /* display header on line 1 */
2532 if (v == SEQ_START_TOKEN) {
2533 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
2534 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
2535 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2536 seq_puts(m, " SESSION name, addr/port src-tid/sid "
2537 "dest-tid/sid state user-data-ok\n");
2538 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
2539 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2540 goto out;
2541 }
2542
2543 /* Show the tunnel or session context.
2544 */
2545 if (pd->session == NULL)
2546 pppol2tp_seq_tunnel_show(m, pd->tunnel);
2547 else
2548 pppol2tp_seq_session_show(m, pd->session);
2549
2550out:
2551 return 0;
2552}
2553
2554static const struct seq_operations pppol2tp_seq_ops = {
2555 .start = pppol2tp_seq_start,
2556 .next = pppol2tp_seq_next,
2557 .stop = pppol2tp_seq_stop,
2558 .show = pppol2tp_seq_show,
2559};
2560
2561/* Called when our /proc file is opened. We allocate data for use when
2562 * iterating our tunnel / session contexts and store it in the private
2563 * data of the seq_file.
2564 */
2565static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2566{
2567 return seq_open_net(inode, file, &pppol2tp_seq_ops,
2568 sizeof(struct pppol2tp_seq_data));
2569}
2570
2571static const struct file_operations pppol2tp_proc_fops = {
2572 .owner = THIS_MODULE,
2573 .open = pppol2tp_proc_open,
2574 .read = seq_read,
2575 .llseek = seq_lseek,
2576 .release = seq_release_net,
2577};
2578
2579#endif /* CONFIG_PROC_FS */
2580
2581/*****************************************************************************
2582 * Init and cleanup
2583 *****************************************************************************/
2584
2585static const struct proto_ops pppol2tp_ops = {
2586 .family = AF_PPPOX,
2587 .owner = THIS_MODULE,
2588 .release = pppol2tp_release,
2589 .bind = sock_no_bind,
2590 .connect = pppol2tp_connect,
2591 .socketpair = sock_no_socketpair,
2592 .accept = sock_no_accept,
2593 .getname = pppol2tp_getname,
2594 .poll = datagram_poll,
2595 .listen = sock_no_listen,
2596 .shutdown = sock_no_shutdown,
2597 .setsockopt = pppol2tp_setsockopt,
2598 .getsockopt = pppol2tp_getsockopt,
2599 .sendmsg = pppol2tp_sendmsg,
2600 .recvmsg = pppol2tp_recvmsg,
2601 .mmap = sock_no_mmap,
2602 .ioctl = pppox_ioctl,
2603};
2604
2605static struct pppox_proto pppol2tp_proto = {
2606 .create = pppol2tp_create,
2607 .ioctl = pppol2tp_ioctl
2608};
2609
2610static __net_init int pppol2tp_init_net(struct net *net)
2611{
2612 struct pppol2tp_net *pn = pppol2tp_pernet(net);
2613 struct proc_dir_entry *pde;
2614
2615 INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
2616 rwlock_init(&pn->pppol2tp_tunnel_list_lock);
2617
2618 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
2619#ifdef CONFIG_PROC_FS
2620 if (!pde)
2621 return -ENOMEM;
2622#endif
2623
2624 return 0;
2625}
2626
2627static __net_exit void pppol2tp_exit_net(struct net *net)
2628{
2629 proc_net_remove(net, "pppol2tp");
2630}
2631
2632static struct pernet_operations pppol2tp_net_ops = {
2633 .init = pppol2tp_init_net,
2634 .exit = pppol2tp_exit_net,
2635 .id = &pppol2tp_net_id,
2636 .size = sizeof(struct pppol2tp_net),
2637};
2638
2639static int __init pppol2tp_init(void)
2640{
2641 int err;
2642
2643 err = proto_register(&pppol2tp_sk_proto, 0);
2644 if (err)
2645 goto out;
2646 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
2647 if (err)
2648 goto out_unregister_pppol2tp_proto;
2649
2650 err = register_pernet_device(&pppol2tp_net_ops);
2651 if (err)
2652 goto out_unregister_pppox_proto;
2653
2654 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2655 PPPOL2TP_DRV_VERSION);
2656
2657out:
2658 return err;
2659out_unregister_pppox_proto:
2660 unregister_pppox_proto(PX_PROTO_OL2TP);
2661out_unregister_pppol2tp_proto:
2662 proto_unregister(&pppol2tp_sk_proto);
2663 goto out;
2664}
2665
2666static void __exit pppol2tp_exit(void)
2667{
2668 unregister_pppox_proto(PX_PROTO_OL2TP);
2669 unregister_pernet_device(&pppol2tp_net_ops);
2670 proto_unregister(&pppol2tp_sk_proto);
2671}
2672
2673module_init(pppol2tp_init);
2674module_exit(pppol2tp_exit);
2675
2676MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
2677 "James Chapman <jchapman@katalix.com>");
2678MODULE_DESCRIPTION("PPP over L2TP over UDP");
2679MODULE_LICENSE("GPL");
2680MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229bb34c2..87d6b8f36304 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
327 unsigned int bufsize; 327 unsigned int bufsize;
328 328
329 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) 329 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
330 dev_info(ctodev(card), "%s: ERROR status \n", __func__); 330 dev_info(ctodev(card), "%s: ERROR status\n", __func__);
331 /* we need to round up the buffer size to a multiple of 128 */ 331 /* we need to round up the buffer size to a multiple of 128 */
332 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); 332 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
333 333
@@ -547,7 +547,7 @@ out:
547void gelic_net_set_multi(struct net_device *netdev) 547void gelic_net_set_multi(struct net_device *netdev)
548{ 548{
549 struct gelic_card *card = netdev_card(netdev); 549 struct gelic_card *card = netdev_card(netdev);
550 struct dev_mc_list *mc; 550 struct netdev_hw_addr *ha;
551 unsigned int i; 551 unsigned int i;
552 uint8_t *p; 552 uint8_t *p;
553 u64 addr; 553 u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
581 } 581 }
582 582
583 /* set multicast addresses */ 583 /* set multicast addresses */
584 netdev_for_each_mc_addr(mc, netdev) { 584 netdev_for_each_mc_addr(ha, netdev) {
585 addr = 0; 585 addr = 0;
586 p = mc->dmi_addr; 586 p = ha->addr;
587 for (i = 0; i < ETH_ALEN; i++) { 587 for (i = 0; i < ETH_ALEN; i++) {
588 addr <<= 8; 588 addr <<= 8;
589 addr |= *p++; 589 addr |= *p++;
@@ -903,9 +903,6 @@ int gelic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
903 gelic_descr_release_tx(card, descr->next); 903 gelic_descr_release_tx(card, descr->next);
904 card->tx_chain.tail = descr->next->next; 904 card->tx_chain.tail = descr->next->next;
905 dev_info(ctodev(card), "%s: kick failure\n", __func__); 905 dev_info(ctodev(card), "%s: kick failure\n", __func__);
906 } else {
907 /* OK, DMA started/reserved */
908 netdev->trans_start = jiffies;
909 } 906 }
910 907
911 spin_unlock_irqrestore(&card->tx_lock, flags); 908 spin_unlock_irqrestore(&card->tx_lock, flags);
@@ -1435,7 +1432,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
1435 container_of(work, struct gelic_card, tx_timeout_task); 1432 container_of(work, struct gelic_card, tx_timeout_task);
1436 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0]; 1433 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
1437 1434
1438 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); 1435 dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
1439 1436
1440 if (!(netdev->flags & IFF_UP)) 1437 if (!(netdev->flags & IFF_UP))
1441 goto out; 1438 goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index f0be507e5324..43b8d7797f0a 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -96,7 +96,7 @@ static inline int precise_ie(void)
96 * post_eurus_cmd helpers 96 * post_eurus_cmd helpers
97 */ 97 */
98struct eurus_cmd_arg_info { 98struct eurus_cmd_arg_info {
99 int pre_arg; /* command requres arg1, arg2 at POST COMMAND */ 99 int pre_arg; /* command requires arg1, arg2 at POST COMMAND */
100 int post_arg; /* command requires arg1, arg2 at GET_RESULT */ 100 int post_arg; /* command requires arg1, arg2 at GET_RESULT */
101}; 101};
102 102
@@ -301,7 +301,6 @@ static void gelic_wl_get_ch_info(struct gelic_wl_info *wl)
301 /* 16 bits of MSB has available channels */ 301 /* 16 bits of MSB has available channels */
302 wl->ch_info = ch_info_raw >> 48; 302 wl->ch_info = ch_info_raw >> 48;
303 } 303 }
304 return;
305} 304}
306 305
307/* SIOGIWRANGE */ 306/* SIOGIWRANGE */
@@ -528,7 +527,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
528 u8 item_len; 527 u8 item_len;
529 u8 item_id; 528 u8 item_id;
530 529
531 pr_debug("%s: data=%p len=%ld \n", __func__, 530 pr_debug("%s: data=%p len=%ld\n", __func__,
532 data, len); 531 data, len);
533 memset(ie_info, 0, sizeof(struct ie_info)); 532 memset(ie_info, 0, sizeof(struct ie_info));
534 533
@@ -897,7 +896,7 @@ static int gelic_wl_set_auth(struct net_device *netdev,
897 default: 896 default:
898 ret = -EOPNOTSUPP; 897 ret = -EOPNOTSUPP;
899 break; 898 break;
900 }; 899 }
901 900
902 if (!ret) 901 if (!ret)
903 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 902 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -979,7 +978,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
979 pr_debug("%s: essid = '%s'\n", __func__, extra); 978 pr_debug("%s: essid = '%s'\n", __func__, extra);
980 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 979 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
981 } else { 980 } else {
982 pr_debug("%s: ESSID any \n", __func__); 981 pr_debug("%s: ESSID any\n", __func__);
983 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 982 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
984 } 983 }
985 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 984 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +986,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
987 986
988 987
989 gelic_wl_try_associate(netdev); /* FIXME */ 988 gelic_wl_try_associate(netdev); /* FIXME */
990 pr_debug("%s: -> \n", __func__); 989 pr_debug("%s: ->\n", __func__);
991 return 0; 990 return 0;
992} 991}
993 992
@@ -998,7 +997,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
998 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 997 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
999 unsigned long irqflag; 998 unsigned long irqflag;
1000 999
1001 pr_debug("%s: <- \n", __func__); 1000 pr_debug("%s: <-\n", __func__);
1002 mutex_lock(&wl->assoc_stat_lock); 1001 mutex_lock(&wl->assoc_stat_lock);
1003 spin_lock_irqsave(&wl->lock, irqflag); 1002 spin_lock_irqsave(&wl->lock, irqflag);
1004 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || 1003 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1010,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
1011 1010
1012 mutex_unlock(&wl->assoc_stat_lock); 1011 mutex_unlock(&wl->assoc_stat_lock);
1013 spin_unlock_irqrestore(&wl->lock, irqflag); 1012 spin_unlock_irqrestore(&wl->lock, irqflag);
1014 pr_debug("%s: -> len=%d \n", __func__, data->essid.length); 1013 pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
1015 1014
1016 return 0; 1015 return 0;
1017} 1016}
@@ -1028,7 +1027,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1028 int key_index, index_specified; 1027 int key_index, index_specified;
1029 int ret = 0; 1028 int ret = 0;
1030 1029
1031 pr_debug("%s: <- \n", __func__); 1030 pr_debug("%s: <-\n", __func__);
1032 flags = enc->flags & IW_ENCODE_FLAGS; 1031 flags = enc->flags & IW_ENCODE_FLAGS;
1033 key_index = enc->flags & IW_ENCODE_INDEX; 1032 key_index = enc->flags & IW_ENCODE_INDEX;
1034 1033
@@ -1087,7 +1086,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1087 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1086 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1088done: 1087done:
1089 spin_unlock_irqrestore(&wl->lock, irqflag); 1088 spin_unlock_irqrestore(&wl->lock, irqflag);
1090 pr_debug("%s: -> \n", __func__); 1089 pr_debug("%s: ->\n", __func__);
1091 return ret; 1090 return ret;
1092} 1091}
1093 1092
@@ -1101,7 +1100,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
1101 unsigned int key_index, index_specified; 1100 unsigned int key_index, index_specified;
1102 int ret = 0; 1101 int ret = 0;
1103 1102
1104 pr_debug("%s: <- \n", __func__); 1103 pr_debug("%s: <-\n", __func__);
1105 key_index = enc->flags & IW_ENCODE_INDEX; 1104 key_index = enc->flags & IW_ENCODE_INDEX;
1106 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, 1105 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
1107 enc->flags, enc->pointer, enc->length, extra); 1106 enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1214,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1215 int key_index; 1214 int key_index;
1216 int ret = 0; 1215 int ret = 0;
1217 1216
1218 pr_debug("%s: <- \n", __func__); 1217 pr_debug("%s: <-\n", __func__);
1219 flags = enc->flags & IW_ENCODE_FLAGS; 1218 flags = enc->flags & IW_ENCODE_FLAGS;
1220 alg = ext->alg; 1219 alg = ext->alg;
1221 key_index = enc->flags & IW_ENCODE_INDEX; 1220 key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1287,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1288 } 1287 }
1289done: 1288done:
1290 spin_unlock_irqrestore(&wl->lock, irqflag); 1289 spin_unlock_irqrestore(&wl->lock, irqflag);
1291 pr_debug("%s: -> \n", __func__); 1290 pr_debug("%s: ->\n", __func__);
1292 return ret; 1291 return ret;
1293} 1292}
1294 1293
@@ -1304,7 +1303,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1304 int ret = 0; 1303 int ret = 0;
1305 int max_key_len; 1304 int max_key_len;
1306 1305
1307 pr_debug("%s: <- \n", __func__); 1306 pr_debug("%s: <-\n", __func__);
1308 1307
1309 max_key_len = enc->length - sizeof(struct iw_encode_ext); 1308 max_key_len = enc->length - sizeof(struct iw_encode_ext);
1310 if (max_key_len < 0) 1309 if (max_key_len < 0)
@@ -1359,7 +1358,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1359 } 1358 }
1360out: 1359out:
1361 spin_unlock_irqrestore(&wl->lock, irqflag); 1360 spin_unlock_irqrestore(&wl->lock, irqflag);
1362 pr_debug("%s: -> \n", __func__); 1361 pr_debug("%s: ->\n", __func__);
1363 return ret; 1362 return ret;
1364} 1363}
1365/* SIOC{S,G}IWMODE */ 1364/* SIOC{S,G}IWMODE */
@@ -1370,7 +1369,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
1370 __u32 mode = data->mode; 1369 __u32 mode = data->mode;
1371 int ret; 1370 int ret;
1372 1371
1373 pr_debug("%s: <- \n", __func__); 1372 pr_debug("%s: <-\n", __func__);
1374 if (mode == IW_MODE_INFRA) 1373 if (mode == IW_MODE_INFRA)
1375 ret = 0; 1374 ret = 0;
1376 else 1375 else
@@ -1384,7 +1383,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
1384 union iwreq_data *data, char *extra) 1383 union iwreq_data *data, char *extra)
1385{ 1384{
1386 __u32 *mode = &data->mode; 1385 __u32 *mode = &data->mode;
1387 pr_debug("%s: <- \n", __func__); 1386 pr_debug("%s: <-\n", __func__);
1388 *mode = IW_MODE_INFRA; 1387 *mode = IW_MODE_INFRA;
1389 pr_debug("%s: ->\n", __func__); 1388 pr_debug("%s: ->\n", __func__);
1390 return 0; 1389 return 0;
@@ -1992,7 +1991,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
1992 case GELIC_WL_WPA_LEVEL_WPA2: 1991 case GELIC_WL_WPA_LEVEL_WPA2:
1993 ret = gelic_wl_do_wpa_setup(wl); 1992 ret = gelic_wl_do_wpa_setup(wl);
1994 break; 1993 break;
1995 }; 1994 }
1996 1995
1997 if (ret) { 1996 if (ret) {
1998 pr_debug("%s: WEP/WPA setup failed %d\n", __func__, 1997 pr_debug("%s: WEP/WPA setup failed %d\n", __func__,
@@ -2022,7 +2021,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2022 2021
2023 if (!rc) { 2022 if (!rc) {
2024 /* timeouted. Maybe key or cyrpt mode is wrong */ 2023 /* timeouted. Maybe key or cyrpt mode is wrong */
2025 pr_info("%s: connect timeout \n", __func__); 2024 pr_info("%s: connect timeout\n", __func__);
2026 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, 2025 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
2027 NULL, 0); 2026 NULL, 0);
2028 kfree(cmd); 2027 kfree(cmd);
@@ -2063,7 +2062,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
2063 } 2062 }
2064 2063
2065 if (desired_event == event) { 2064 if (desired_event == event) {
2066 pr_debug("%s: completed \n", __func__); 2065 pr_debug("%s: completed\n", __func__);
2067 complete(&wl->assoc_done); 2066 complete(&wl->assoc_done);
2068 netif_carrier_on(port_to_netdev(wl_port(wl))); 2067 netif_carrier_on(port_to_netdev(wl_port(wl)));
2069 } else 2068 } else
@@ -2280,26 +2279,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
2280/* 2279/*
2281 * driver helpers 2280 * driver helpers
2282 */ 2281 */
2283#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
2284static const iw_handler gelic_wl_wext_handler[] = 2282static const iw_handler gelic_wl_wext_handler[] =
2285{ 2283{
2286 IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name, 2284 IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
2287 IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range, 2285 IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
2288 IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan, 2286 IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
2289 IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan, 2287 IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
2290 IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth, 2288 IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
2291 IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth, 2289 IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
2292 IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid, 2290 IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
2293 IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid, 2291 IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
2294 IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode, 2292 IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
2295 IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode, 2293 IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
2296 IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap, 2294 IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
2297 IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap, 2295 IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
2298 IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext, 2296 IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
2299 IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext, 2297 IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
2300 IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode, 2298 IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
2301 IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode, 2299 IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
2302 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick, 2300 IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
2303}; 2301};
2304 2302
2305static const struct iw_handler_def gelic_wl_wext_handler_def = { 2303static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2316,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
2318 pr_debug("%s:start\n", __func__); 2316 pr_debug("%s:start\n", __func__);
2319 netdev = alloc_etherdev(sizeof(struct gelic_port) + 2317 netdev = alloc_etherdev(sizeof(struct gelic_port) +
2320 sizeof(struct gelic_wl_info)); 2318 sizeof(struct gelic_wl_info));
2321 pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card); 2319 pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
2322 if (!netdev) 2320 if (!netdev)
2323 return NULL; 2321 return NULL;
2324 2322
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afbcbe1b..54ebb65ada18 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -222,7 +222,6 @@ static void ql_write_common_reg_l(struct ql3_adapter *qdev,
222 writel(value, reg); 222 writel(value, reg);
223 readl(reg); 223 readl(reg);
224 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 224 spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
225 return;
226} 225}
227 226
228static void ql_write_common_reg(struct ql3_adapter *qdev, 227static void ql_write_common_reg(struct ql3_adapter *qdev,
@@ -230,7 +229,6 @@ static void ql_write_common_reg(struct ql3_adapter *qdev,
230{ 229{
231 writel(value, reg); 230 writel(value, reg);
232 readl(reg); 231 readl(reg);
233 return;
234} 232}
235 233
236static void ql_write_nvram_reg(struct ql3_adapter *qdev, 234static void ql_write_nvram_reg(struct ql3_adapter *qdev,
@@ -239,7 +237,6 @@ static void ql_write_nvram_reg(struct ql3_adapter *qdev,
239 writel(value, reg); 237 writel(value, reg);
240 readl(reg); 238 readl(reg);
241 udelay(1); 239 udelay(1);
242 return;
243} 240}
244 241
245static void ql_write_page0_reg(struct ql3_adapter *qdev, 242static void ql_write_page0_reg(struct ql3_adapter *qdev,
@@ -249,7 +246,6 @@ static void ql_write_page0_reg(struct ql3_adapter *qdev,
249 ql_set_register_page(qdev,0); 246 ql_set_register_page(qdev,0);
250 writel(value, reg); 247 writel(value, reg);
251 readl(reg); 248 readl(reg);
252 return;
253} 249}
254 250
255/* 251/*
@@ -262,7 +258,6 @@ static void ql_write_page1_reg(struct ql3_adapter *qdev,
262 ql_set_register_page(qdev,1); 258 ql_set_register_page(qdev,1);
263 writel(value, reg); 259 writel(value, reg);
264 readl(reg); 260 readl(reg);
265 return;
266} 261}
267 262
268/* 263/*
@@ -275,7 +270,6 @@ static void ql_write_page2_reg(struct ql3_adapter *qdev,
275 ql_set_register_page(qdev,2); 270 ql_set_register_page(qdev,2);
276 writel(value, reg); 271 writel(value, reg);
277 readl(reg); 272 readl(reg);
278 return;
279} 273}
280 274
281static void ql_disable_interrupts(struct ql3_adapter *qdev) 275static void ql_disable_interrupts(struct ql3_adapter *qdev)
@@ -343,8 +337,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
343 cpu_to_le32(LS_64BITS(map)); 337 cpu_to_le32(LS_64BITS(map));
344 lrg_buf_cb->buf_phy_addr_high = 338 lrg_buf_cb->buf_phy_addr_high =
345 cpu_to_le32(MS_64BITS(map)); 339 cpu_to_le32(MS_64BITS(map));
346 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 340 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
347 pci_unmap_len_set(lrg_buf_cb, maplen, 341 dma_unmap_len_set(lrg_buf_cb, maplen,
348 qdev->lrg_buffer_len - 342 qdev->lrg_buffer_len -
349 QL_HEADER_SPACE); 343 QL_HEADER_SPACE);
350 } 344 }
@@ -1924,8 +1918,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1924 cpu_to_le32(LS_64BITS(map)); 1918 cpu_to_le32(LS_64BITS(map));
1925 lrg_buf_cb->buf_phy_addr_high = 1919 lrg_buf_cb->buf_phy_addr_high =
1926 cpu_to_le32(MS_64BITS(map)); 1920 cpu_to_le32(MS_64BITS(map));
1927 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1921 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1928 pci_unmap_len_set(lrg_buf_cb, maplen, 1922 dma_unmap_len_set(lrg_buf_cb, maplen,
1929 qdev->lrg_buffer_len - 1923 qdev->lrg_buffer_len -
1930 QL_HEADER_SPACE); 1924 QL_HEADER_SPACE);
1931 --qdev->lrg_buf_skb_check; 1925 --qdev->lrg_buf_skb_check;
@@ -2041,16 +2035,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2041 } 2035 }
2042 2036
2043 pci_unmap_single(qdev->pdev, 2037 pci_unmap_single(qdev->pdev,
2044 pci_unmap_addr(&tx_cb->map[0], mapaddr), 2038 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2045 pci_unmap_len(&tx_cb->map[0], maplen), 2039 dma_unmap_len(&tx_cb->map[0], maplen),
2046 PCI_DMA_TODEVICE); 2040 PCI_DMA_TODEVICE);
2047 tx_cb->seg_count--; 2041 tx_cb->seg_count--;
2048 if (tx_cb->seg_count) { 2042 if (tx_cb->seg_count) {
2049 for (i = 1; i < tx_cb->seg_count; i++) { 2043 for (i = 1; i < tx_cb->seg_count; i++) {
2050 pci_unmap_page(qdev->pdev, 2044 pci_unmap_page(qdev->pdev,
2051 pci_unmap_addr(&tx_cb->map[i], 2045 dma_unmap_addr(&tx_cb->map[i],
2052 mapaddr), 2046 mapaddr),
2053 pci_unmap_len(&tx_cb->map[i], maplen), 2047 dma_unmap_len(&tx_cb->map[i], maplen),
2054 PCI_DMA_TODEVICE); 2048 PCI_DMA_TODEVICE);
2055 } 2049 }
2056 } 2050 }
@@ -2119,8 +2113,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2119 2113
2120 skb_put(skb, length); 2114 skb_put(skb, length);
2121 pci_unmap_single(qdev->pdev, 2115 pci_unmap_single(qdev->pdev,
2122 pci_unmap_addr(lrg_buf_cb2, mapaddr), 2116 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2123 pci_unmap_len(lrg_buf_cb2, maplen), 2117 dma_unmap_len(lrg_buf_cb2, maplen),
2124 PCI_DMA_FROMDEVICE); 2118 PCI_DMA_FROMDEVICE);
2125 prefetch(skb->data); 2119 prefetch(skb->data);
2126 skb->ip_summed = CHECKSUM_NONE; 2120 skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2159,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2165 2159
2166 skb_put(skb2, length); /* Just the second buffer length here. */ 2160 skb_put(skb2, length); /* Just the second buffer length here. */
2167 pci_unmap_single(qdev->pdev, 2161 pci_unmap_single(qdev->pdev,
2168 pci_unmap_addr(lrg_buf_cb2, mapaddr), 2162 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2169 pci_unmap_len(lrg_buf_cb2, maplen), 2163 dma_unmap_len(lrg_buf_cb2, maplen),
2170 PCI_DMA_FROMDEVICE); 2164 PCI_DMA_FROMDEVICE);
2171 prefetch(skb2->data); 2165 prefetch(skb2->data);
2172 2166
@@ -2258,7 +2252,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2258 "%x.\n", 2252 "%x.\n",
2259 ndev->name, net_rsp->opcode); 2253 ndev->name, net_rsp->opcode);
2260 printk(KERN_ERR PFX 2254 printk(KERN_ERR PFX
2261 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", 2255 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2262 (unsigned long int)tmp[0], 2256 (unsigned long int)tmp[0],
2263 (unsigned long int)tmp[1], 2257 (unsigned long int)tmp[1],
2264 (unsigned long int)tmp[2], 2258 (unsigned long int)tmp[2],
@@ -2454,8 +2448,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
2454 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2448 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2455 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2449 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2456 oal_entry->len = cpu_to_le32(len); 2450 oal_entry->len = cpu_to_le32(len);
2457 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2451 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2458 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2452 dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2459 seg++; 2453 seg++;
2460 2454
2461 if (seg_cnt == 1) { 2455 if (seg_cnt == 1) {
@@ -2488,9 +2482,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2488 oal_entry->len = 2482 oal_entry->len =
2489 cpu_to_le32(sizeof(struct oal) | 2483 cpu_to_le32(sizeof(struct oal) |
2490 OAL_CONT_ENTRY); 2484 OAL_CONT_ENTRY);
2491 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, 2485 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2492 map); 2486 map);
2493 pci_unmap_len_set(&tx_cb->map[seg], maplen, 2487 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2494 sizeof(struct oal)); 2488 sizeof(struct oal));
2495 oal_entry = (struct oal_entry *)oal; 2489 oal_entry = (struct oal_entry *)oal;
2496 oal++; 2490 oal++;
@@ -2512,8 +2506,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
2512 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2506 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2513 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2507 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2514 oal_entry->len = cpu_to_le32(frag->size); 2508 oal_entry->len = cpu_to_le32(frag->size);
2515 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2509 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2516 pci_unmap_len_set(&tx_cb->map[seg], maplen, 2510 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2517 frag->size); 2511 frag->size);
2518 } 2512 }
2519 /* Terminate the last segment. */ 2513 /* Terminate the last segment. */
@@ -2539,22 +2533,22 @@ map_error:
2539 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2533 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2540 (seg == 17 && seg_cnt > 18)) { 2534 (seg == 17 && seg_cnt > 18)) {
2541 pci_unmap_single(qdev->pdev, 2535 pci_unmap_single(qdev->pdev,
2542 pci_unmap_addr(&tx_cb->map[seg], mapaddr), 2536 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2543 pci_unmap_len(&tx_cb->map[seg], maplen), 2537 dma_unmap_len(&tx_cb->map[seg], maplen),
2544 PCI_DMA_TODEVICE); 2538 PCI_DMA_TODEVICE);
2545 oal++; 2539 oal++;
2546 seg++; 2540 seg++;
2547 } 2541 }
2548 2542
2549 pci_unmap_page(qdev->pdev, 2543 pci_unmap_page(qdev->pdev,
2550 pci_unmap_addr(&tx_cb->map[seg], mapaddr), 2544 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2551 pci_unmap_len(&tx_cb->map[seg], maplen), 2545 dma_unmap_len(&tx_cb->map[seg], maplen),
2552 PCI_DMA_TODEVICE); 2546 PCI_DMA_TODEVICE);
2553 } 2547 }
2554 2548
2555 pci_unmap_single(qdev->pdev, 2549 pci_unmap_single(qdev->pdev,
2556 pci_unmap_addr(&tx_cb->map[0], mapaddr), 2550 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2557 pci_unmap_addr(&tx_cb->map[0], maplen), 2551 dma_unmap_addr(&tx_cb->map[0], maplen),
2558 PCI_DMA_TODEVICE); 2552 PCI_DMA_TODEVICE);
2559 2553
2560 return NETDEV_TX_BUSY; 2554 return NETDEV_TX_BUSY;
@@ -2841,8 +2835,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
2841 if (lrg_buf_cb->skb) { 2835 if (lrg_buf_cb->skb) {
2842 dev_kfree_skb(lrg_buf_cb->skb); 2836 dev_kfree_skb(lrg_buf_cb->skb);
2843 pci_unmap_single(qdev->pdev, 2837 pci_unmap_single(qdev->pdev,
2844 pci_unmap_addr(lrg_buf_cb, mapaddr), 2838 dma_unmap_addr(lrg_buf_cb, mapaddr),
2845 pci_unmap_len(lrg_buf_cb, maplen), 2839 dma_unmap_len(lrg_buf_cb, maplen),
2846 PCI_DMA_FROMDEVICE); 2840 PCI_DMA_FROMDEVICE);
2847 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2841 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2848 } else { 2842 } else {
@@ -2912,8 +2906,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2912 return -ENOMEM; 2906 return -ENOMEM;
2913 } 2907 }
2914 2908
2915 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2909 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2916 pci_unmap_len_set(lrg_buf_cb, maplen, 2910 dma_unmap_len_set(lrg_buf_cb, maplen,
2917 qdev->lrg_buffer_len - 2911 qdev->lrg_buffer_len -
2918 QL_HEADER_SPACE); 2912 QL_HEADER_SPACE);
2919 lrg_buf_cb->buf_phy_addr_low = 2913 lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3787,13 @@ static void ql_reset_work(struct work_struct *work)
3793 "%s: Freeing lost SKB.\n", 3787 "%s: Freeing lost SKB.\n",
3794 qdev->ndev->name); 3788 qdev->ndev->name);
3795 pci_unmap_single(qdev->pdev, 3789 pci_unmap_single(qdev->pdev,
3796 pci_unmap_addr(&tx_cb->map[0], mapaddr), 3790 dma_unmap_addr(&tx_cb->map[0], mapaddr),
3797 pci_unmap_len(&tx_cb->map[0], maplen), 3791 dma_unmap_len(&tx_cb->map[0], maplen),
3798 PCI_DMA_TODEVICE); 3792 PCI_DMA_TODEVICE);
3799 for(j=1;j<tx_cb->seg_count;j++) { 3793 for(j=1;j<tx_cb->seg_count;j++) {
3800 pci_unmap_page(qdev->pdev, 3794 pci_unmap_page(qdev->pdev,
3801 pci_unmap_addr(&tx_cb->map[j],mapaddr), 3795 dma_unmap_addr(&tx_cb->map[j],mapaddr),
3802 pci_unmap_len(&tx_cb->map[j],maplen), 3796 dma_unmap_len(&tx_cb->map[j],maplen),
3803 PCI_DMA_TODEVICE); 3797 PCI_DMA_TODEVICE);
3804 } 3798 }
3805 dev_kfree_skb(tx_cb->skb); 3799 dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71b15a1..3362a661248c 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
998struct ql_rcv_buf_cb { 998struct ql_rcv_buf_cb {
999 struct ql_rcv_buf_cb *next; 999 struct ql_rcv_buf_cb *next;
1000 struct sk_buff *skb; 1000 struct sk_buff *skb;
1001 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1001 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1002 DECLARE_PCI_UNMAP_LEN(maplen); 1002 DEFINE_DMA_UNMAP_LEN(maplen);
1003 __le32 buf_phy_addr_low; 1003 __le32 buf_phy_addr_low;
1004 __le32 buf_phy_addr_high; 1004 __le32 buf_phy_addr_high;
1005 int index; 1005 int index;
@@ -1029,8 +1029,8 @@ struct oal {
1029}; 1029};
1030 1030
1031struct map_list { 1031struct map_list {
1032 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1032 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1033 DECLARE_PCI_UNMAP_LEN(maplen); 1033 DEFINE_DMA_UNMAP_LEN(maplen);
1034}; 1034};
1035 1035
1036struct ql_tx_buf_cb { 1036struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b208db1..896d40df9a13 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,9 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0 54#define _QLCNIC_LINUX_SUBVERSION 2
55#define QLCNIC_LINUX_VERSIONID "5.0.0" 55#define QLCNIC_LINUX_VERSIONID "5.0.2"
56#define QLCNIC_DRV_IDC_VER 0x01
56 57
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 58#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff) 59#define _major(v) (((v) >> 24) & 0xff)
@@ -98,8 +99,6 @@
98#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048 99#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
99#define QLCNIC_LRO_BUFFER_EXTRA 2048 100#define QLCNIC_LRO_BUFFER_EXTRA 2048
100 101
101#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
102
103/* Opcodes to be used with the commands */ 102/* Opcodes to be used with the commands */
104#define TX_ETHER_PKT 0x01 103#define TX_ETHER_PKT 0x01
105#define TX_TCP_PKT 0x02 104#define TX_TCP_PKT 0x02
@@ -133,7 +132,6 @@
133 132
134#define RCV_RING_NORMAL 0 133#define RCV_RING_NORMAL 0
135#define RCV_RING_JUMBO 1 134#define RCV_RING_JUMBO 1
136#define RCV_RING_LRO 2
137 135
138#define MIN_CMD_DESCRIPTORS 64 136#define MIN_CMD_DESCRIPTORS 64
139#define MIN_RCV_DESCRIPTORS 64 137#define MIN_RCV_DESCRIPTORS 64
@@ -144,7 +142,6 @@
144#define MAX_RCV_DESCRIPTORS_10G 8192 142#define MAX_RCV_DESCRIPTORS_10G 8192
145#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512 143#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
146#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024 144#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
147#define MAX_LRO_RCV_DESCRIPTORS 8
148 145
149#define DEFAULT_RCV_DESCRIPTORS_1G 2048 146#define DEFAULT_RCV_DESCRIPTORS_1G 2048
150#define DEFAULT_RCV_DESCRIPTORS_10G 4096 147#define DEFAULT_RCV_DESCRIPTORS_10G 4096
@@ -152,8 +149,6 @@
152#define get_next_index(index, length) \ 149#define get_next_index(index, length) \
153 (((index) + 1) & ((length) - 1)) 150 (((index) + 1) & ((length) - 1))
154 151
155#define MPORT_MULTI_FUNCTION_MODE 0x2222
156
157/* 152/*
158 * Following data structures describe the descriptors that will be used. 153 * Following data structures describe the descriptors that will be used.
159 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when 154 * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
@@ -399,13 +394,9 @@ struct qlcnic_hardware_context {
399 394
400 unsigned long pci_len0; 395 unsigned long pci_len0;
401 396
402 u32 ocm_win;
403 u32 crb_win;
404
405 rwlock_t crb_lock; 397 rwlock_t crb_lock;
406 struct mutex mem_lock; 398 struct mutex mem_lock;
407 399
408 u8 cut_through;
409 u8 revision_id; 400 u8 revision_id;
410 u8 pci_func; 401 u8 pci_func;
411 u8 linkup; 402 u8 linkup;
@@ -428,6 +419,10 @@ struct qlcnic_adapter_stats {
428 u64 xmit_on; 419 u64 xmit_on;
429 u64 xmit_off; 420 u64 xmit_off;
430 u64 skb_alloc_failure; 421 u64 skb_alloc_failure;
422 u64 null_skb;
423 u64 null_rxbuf;
424 u64 rx_dma_map_error;
425 u64 tx_dma_map_error;
431}; 426};
432 427
433/* 428/*
@@ -916,14 +911,12 @@ struct qlcnic_adapter {
916 u16 num_txd; 911 u16 num_txd;
917 u16 num_rxd; 912 u16 num_rxd;
918 u16 num_jumbo_rxd; 913 u16 num_jumbo_rxd;
919 u16 num_lro_rxd;
920 914
921 u8 max_rds_rings; 915 u8 max_rds_rings;
922 u8 max_sds_rings; 916 u8 max_sds_rings;
923 u8 driver_mismatch; 917 u8 driver_mismatch;
924 u8 msix_supported; 918 u8 msix_supported;
925 u8 rx_csum; 919 u8 rx_csum;
926 u8 pci_using_dac;
927 u8 portnum; 920 u8 portnum;
928 u8 physical_port; 921 u8 physical_port;
929 922
@@ -958,11 +951,15 @@ struct qlcnic_adapter {
958 u8 dev_state; 951 u8 dev_state;
959 u8 diag_test; 952 u8 diag_test;
960 u8 diag_cnt; 953 u8 diag_cnt;
954 u8 reset_ack_timeo;
955 u8 dev_init_timeo;
961 u8 rsrd1; 956 u8 rsrd1;
962 u16 rsrd2; 957 u16 msg_enable;
963 958
964 u8 mac_addr[ETH_ALEN]; 959 u8 mac_addr[ETH_ALEN];
965 960
961 u64 dev_rst_time;
962
966 struct qlcnic_adapter_stats stats; 963 struct qlcnic_adapter_stats stats;
967 964
968 struct qlcnic_recv_context recv_ctx; 965 struct qlcnic_recv_context recv_ctx;
@@ -994,6 +991,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
994int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); 991int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
995int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); 992int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
996int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); 993int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
994void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
995void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
996
997#define ADDR_IN_RANGE(addr, low, high) \
998 (((addr) < (high)) && ((addr) >= (low)))
997 999
998#define QLCRD32(adapter, off) \ 1000#define QLCRD32(adapter, off) \
999 (qlcnic_hw_read_wx_2M(adapter, off)) 1001 (qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1037,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1035void qlcnic_request_firmware(struct qlcnic_adapter *adapter); 1037void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1036void qlcnic_release_firmware(struct qlcnic_adapter *adapter); 1038void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1037int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); 1039int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1040int qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1038 1041
1039int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); 1042int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1040int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, 1043int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1131,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1128 1131
1129extern const struct ethtool_ops qlcnic_ethtool_ops; 1132extern const struct ethtool_ops qlcnic_ethtool_ops;
1130 1133
1134#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1135 if (NETIF_MSG_##lvl & adapter->msg_enable) \
1136 printk(KERN_INFO "%s: %s: " _fmt, \
1137 dev_name(&adapter->pdev->dev), \
1138 __func__, ##_args); \
1139 } while (0)
1140
1131#endif /* __QLCNIC_H_ */ 1141#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 0a6a39914aec..c2c1f5cc16c6 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -421,7 +421,8 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
421 421
422 if (addr == NULL) { 422 if (addr == NULL) {
423 dev_err(&pdev->dev, "failed to allocate tx desc ring\n"); 423 dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
424 return -ENOMEM; 424 err = -ENOMEM;
425 goto err_out_free;
425 } 426 }
426 427
427 tx_ring->desc_head = (struct cmd_desc_type0 *)addr; 428 tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15fe3e1b..3bd514ec7e8f 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -69,6 +69,14 @@ static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)}, 69 QLC_SIZEOF(stats.xmit_off), QLC_OFF(stats.xmit_off)},
70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure), 70 {"skb_alloc_failure", QLC_SIZEOF(stats.skb_alloc_failure),
71 QLC_OFF(stats.skb_alloc_failure)}, 71 QLC_OFF(stats.skb_alloc_failure)},
72 {"null skb",
73 QLC_SIZEOF(stats.null_skb), QLC_OFF(stats.null_skb)},
74 {"null rxbuf",
75 QLC_SIZEOF(stats.null_rxbuf), QLC_OFF(stats.null_rxbuf)},
76 {"rx dma map error", QLC_SIZEOF(stats.rx_dma_map_error),
77 QLC_OFF(stats.rx_dma_map_error)},
78 {"tx dma map error", QLC_SIZEOF(stats.tx_dma_map_error),
79 QLC_OFF(stats.tx_dma_map_error)},
72 80
73}; 81};
74 82
@@ -404,7 +412,6 @@ qlcnic_get_ringparam(struct net_device *dev,
404 412
405 ring->rx_pending = adapter->num_rxd; 413 ring->rx_pending = adapter->num_rxd;
406 ring->rx_jumbo_pending = adapter->num_jumbo_rxd; 414 ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
407 ring->rx_jumbo_pending += adapter->num_lro_rxd;
408 ring->tx_pending = adapter->num_txd; 415 ring->tx_pending = adapter->num_txd;
409 416
410 if (adapter->ahw.port_type == QLCNIC_GBE) { 417 if (adapter->ahw.port_type == QLCNIC_GBE) {
@@ -598,19 +605,12 @@ qlcnic_set_pauseparam(struct net_device *netdev,
598static int qlcnic_reg_test(struct net_device *dev) 605static int qlcnic_reg_test(struct net_device *dev)
599{ 606{
600 struct qlcnic_adapter *adapter = netdev_priv(dev); 607 struct qlcnic_adapter *adapter = netdev_priv(dev);
601 u32 data_read, data_written; 608 u32 data_read;
602 609
603 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0)); 610 data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
604 if ((data_read & 0xffff) != adapter->pdev->vendor) 611 if ((data_read & 0xffff) != adapter->pdev->vendor)
605 return 1; 612 return 1;
606 613
607 data_written = (u32)0xa5a5a5a5;
608
609 QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
610 data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
611 if (data_written != data_read)
612 return 1;
613
614 return 0; 614 return 0;
615} 615}
616 616
@@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
998 return 0; 998 return 0;
999} 999}
1000 1000
1001static u32 qlcnic_get_msglevel(struct net_device *netdev)
1002{
1003 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1004
1005 return adapter->msg_enable;
1006}
1007
1008static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1009{
1010 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1011
1012 adapter->msg_enable = msglvl;
1013}
1014
1001const struct ethtool_ops qlcnic_ethtool_ops = { 1015const struct ethtool_ops qlcnic_ethtool_ops = {
1002 .get_settings = qlcnic_get_settings, 1016 .get_settings = qlcnic_get_settings,
1003 .set_settings = qlcnic_set_settings, 1017 .set_settings = qlcnic_set_settings,
@@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1029 .get_flags = ethtool_op_get_flags, 1043 .get_flags = ethtool_op_get_flags,
1030 .set_flags = qlcnic_set_flags, 1044 .set_flags = qlcnic_set_flags,
1031 .phys_id = qlcnic_blink_led, 1045 .phys_id = qlcnic_blink_led,
1046 .set_msglevel = qlcnic_set_msglevel,
1047 .get_msglevel = qlcnic_get_msglevel,
1032}; 1048};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84360a4..ad9d167723c4 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
435#define QLCNIC_PCI_MS_2M (0x80000) 435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) 436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL) 437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_CAMQM (0x04800000UL)
439#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) 440#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) 441#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441 442
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) 443#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443 444
@@ -448,7 +449,7 @@ enum {
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) 449#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) 450#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) 451#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) 452#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
452 453
453/* 454/*
454 * Register offsets for MN 455 * Register offsets for MN
@@ -562,39 +563,16 @@ enum {
562#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8)) 563#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
563#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec)) 564#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
564 565
565#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
566#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
567
568#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4)) 566#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
569 567
570#define CRB_V2P_0 (QLCNIC_REG(0x290)) 568#define CRB_V2P_0 (QLCNIC_REG(0x290))
571#define CRB_V2P(port) (CRB_V2P_0+((port)*4)) 569#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
572#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0)) 570#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
573 571
574#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
575#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
576#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
577#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
578
579#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128)) 572#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
580#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0)) 573#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
581 574
582/* 575/*
583 * capabilities register, can be used to selectively enable/disable features
584 * for backward compability
585 */
586#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
587#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
588#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
589#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
590
591#define INTR_SCHEME_PERPORT 0x1
592#define MSI_MODE_MULTIFUNC 0x1
593
594/* used for ethtool tests */
595#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
596
597/*
598 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address 576 * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
599 * which can be read by the Phantom host to get producer/consumer indexes from 577 * which can be read by the Phantom host to get producer/consumer indexes from
600 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following 578 * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
@@ -693,15 +671,24 @@ enum {
693#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144)) 671#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) 672#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) 673#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c)) 674#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x174))
697 675#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
698 /* Device State */ 676#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
699#define QLCNIC_DEV_COLD 1 677
700#define QLCNIC_DEV_INITALIZING 2 678/* Device State */
701#define QLCNIC_DEV_READY 3 679#define QLCNIC_DEV_COLD 0x1
702#define QLCNIC_DEV_NEED_RESET 4 680#define QLCNIC_DEV_INITIALIZING 0x2
703#define QLCNIC_DEV_NEED_QUISCENT 5 681#define QLCNIC_DEV_READY 0x3
704#define QLCNIC_DEV_FAILED 6 682#define QLCNIC_DEV_NEED_RESET 0x4
683#define QLCNIC_DEV_NEED_QUISCENT 0x5
684#define QLCNIC_DEV_FAILED 0x6
685#define QLCNIC_DEV_QUISCENT 0x7
686
687#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
688#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
689#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
690#define QLC_DEV_SET_QSCNT_RDY(VAL, FN) ((VAL) |= (2 << (FN * 4)))
691#define QLC_DEV_CLR_RST_QSCNT(VAL, FN) ((VAL) &= ~(3 << (FN * 4)))
705 692
706#define QLCNIC_RCODE_DRIVER_INFO 0x20000000 693#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
707#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000 694#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
@@ -709,9 +696,8 @@ enum {
709#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff) 696#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
710#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff) 697#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
711 698
712#define FW_POLL_DELAY (2 * HZ) 699#define FW_POLL_DELAY (1 * HZ)
713#define FW_FAIL_THRESH 3 700#define FW_FAIL_THRESH 2
714#define FW_POLL_THRESH 10
715 701
716#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC))) 702#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
717#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200) 703#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba455aa20..0c2e1f08f459 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
54} 54}
55#endif 55#endif
56 56
57#define ADDR_IN_RANGE(addr, low, high) \
58 (((addr) < (high)) && ((addr) >= (low)))
59
60#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
61 ((adapter)->ahw.pci_base0 + (off))
62
63static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
64 unsigned long off)
65{
66 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
67 return PCI_OFFSET_FIRST_RANGE(adapter, off);
68
69 return NULL;
70}
71
72static const struct crb_128M_2M_block_map 57static const struct crb_128M_2M_block_map
73crb_128M_2M_map[64] __cacheline_aligned_in_smp = { 58crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
74 {{{0, 0, 0, 0} } }, /* 0: PCI */ 59 {{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
310 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); 295 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
311 if (done == 1) 296 if (done == 1)
312 break; 297 break;
313 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) 298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
299 dev_err(&adapter->pdev->dev,
300 "Failed to acquire sem=%d lock;reg_id=%d\n",
301 sem, id_reg);
314 return -EIO; 302 return -EIO;
303 }
315 msleep(1); 304 msleep(1);
316 } 305 }
317 306
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
427void qlcnic_set_multi(struct net_device *netdev) 416void qlcnic_set_multi(struct net_device *netdev)
428{ 417{
429 struct qlcnic_adapter *adapter = netdev_priv(netdev); 418 struct qlcnic_adapter *adapter = netdev_priv(netdev);
430 struct dev_mc_list *mc_ptr; 419 struct netdev_hw_addr *ha;
431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 420 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
432 u32 mode = VPORT_MISS_MODE_DROP; 421 u32 mode = VPORT_MISS_MODE_DROP;
433 422
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
449 } 438 }
450 439
451 if (!netdev_mc_empty(netdev)) { 440 if (!netdev_mc_empty(netdev)) {
452 netdev_for_each_mc_addr(mc_ptr, netdev) { 441 netdev_for_each_mc_addr(ha, netdev) {
453 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr); 442 qlcnic_nic_add_mac(adapter, ha->addr);
454 } 443 }
455 } 444 }
456 445
@@ -787,9 +776,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
787 776
788 window = CRB_HI(off); 777 window = CRB_HI(off);
789 778
790 if (adapter->ahw.crb_win == window)
791 return;
792
793 writel(window, addr); 779 writel(window, addr);
794 if (readl(addr) != window) { 780 if (readl(addr) != window) {
795 if (printk_ratelimit()) 781 if (printk_ratelimit())
@@ -797,7 +783,6 @@ qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
797 "failed to set CRB window to %d off 0x%lx\n", 783 "failed to set CRB window to %d off 0x%lx\n",
798 window, off); 784 window, off);
799 } 785 }
800 adapter->ahw.crb_win = window;
801} 786}
802 787
803int 788int
@@ -878,13 +863,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
878 u64 addr, u32 *start) 863 u64 addr, u32 *start)
879{ 864{
880 u32 window; 865 u32 window;
881 struct pci_dev *pdev = adapter->pdev;
882
883 if ((addr & 0x00ff800) == 0xff800) {
884 if (printk_ratelimit())
885 dev_warn(&pdev->dev, "QM access not handled\n");
886 return -EIO;
887 }
888 866
889 window = OCM_WIN_P3P(addr); 867 window = OCM_WIN_P3P(addr);
890 868
@@ -892,7 +870,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
892 /* read back to flush */ 870 /* read back to flush */
893 readl(adapter->ahw.ocm_win_crb); 871 readl(adapter->ahw.ocm_win_crb);
894 872
895 adapter->ahw.ocm_win = window;
896 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr); 873 *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
897 return 0; 874 return 0;
898} 875}
@@ -901,8 +878,7 @@ static int
901qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, 878qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
902 u64 *data, int op) 879 u64 *data, int op)
903{ 880{
904 void __iomem *addr, *mem_ptr = NULL; 881 void __iomem *addr;
905 resource_size_t mem_base;
906 int ret; 882 int ret;
907 u32 start; 883 u32 start;
908 884
@@ -912,21 +888,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
912 if (ret != 0) 888 if (ret != 0)
913 goto unlock; 889 goto unlock;
914 890
915 addr = pci_base_offset(adapter, start); 891 addr = adapter->ahw.pci_base0 + start;
916 if (addr)
917 goto noremap;
918
919 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
920
921 mem_ptr = ioremap(mem_base, PAGE_SIZE);
922 if (mem_ptr == NULL) {
923 ret = -EIO;
924 goto unlock;
925 }
926 892
927 addr = mem_ptr + (start & (PAGE_SIZE - 1));
928
929noremap:
930 if (op == 0) /* read */ 893 if (op == 0) /* read */
931 *data = readq(addr); 894 *data = readq(addr);
932 else /* write */ 895 else /* write */
@@ -935,11 +898,31 @@ noremap:
935unlock: 898unlock:
936 mutex_unlock(&adapter->ahw.mem_lock); 899 mutex_unlock(&adapter->ahw.mem_lock);
937 900
938 if (mem_ptr)
939 iounmap(mem_ptr);
940 return ret; 901 return ret;
941} 902}
942 903
904void
905qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
906{
907 void __iomem *addr = adapter->ahw.pci_base0 +
908 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
909
910 mutex_lock(&adapter->ahw.mem_lock);
911 *data = readq(addr);
912 mutex_unlock(&adapter->ahw.mem_lock);
913}
914
915void
916qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
917{
918 void __iomem *addr = adapter->ahw.pci_base0 +
919 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
920
921 mutex_lock(&adapter->ahw.mem_lock);
922 writeq(data, addr);
923 mutex_unlock(&adapter->ahw.mem_lock);
924}
925
943#define MAX_CTL_CHECK 1000 926#define MAX_CTL_CHECK 1000
944 927
945int 928int
@@ -948,7 +931,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
948{ 931{
949 int i, j, ret; 932 int i, j, ret;
950 u32 temp, off8; 933 u32 temp, off8;
951 u64 stride;
952 void __iomem *mem_crb; 934 void __iomem *mem_crb;
953 935
954 /* Only 64-bit aligned access */ 936 /* Only 64-bit aligned access */
@@ -957,7 +939,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
957 939
958 /* P3 onward, test agent base for MIU and SIU is same */ 940 /* P3 onward, test agent base for MIU and SIU is same */
959 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 941 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
960 QLCNIC_ADDR_QDR_NET_MAX_P3)) { 942 QLCNIC_ADDR_QDR_NET_MAX)) {
961 mem_crb = qlcnic_get_ioaddr(adapter, 943 mem_crb = qlcnic_get_ioaddr(adapter,
962 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); 944 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
963 goto correct; 945 goto correct;
@@ -975,9 +957,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
975 return -EIO; 957 return -EIO;
976 958
977correct: 959correct:
978 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 960 off8 = off & ~0xf;
979
980 off8 = off & ~(stride-1);
981 961
982 mutex_lock(&adapter->ahw.mem_lock); 962 mutex_lock(&adapter->ahw.mem_lock);
983 963
@@ -985,30 +965,28 @@ correct:
985 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 965 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
986 966
987 i = 0; 967 i = 0;
988 if (stride == 16) { 968 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
989 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); 969 writel((TA_CTL_START | TA_CTL_ENABLE),
990 writel((TA_CTL_START | TA_CTL_ENABLE), 970 (mem_crb + TEST_AGT_CTRL));
991 (mem_crb + TEST_AGT_CTRL));
992
993 for (j = 0; j < MAX_CTL_CHECK; j++) {
994 temp = readl(mem_crb + TEST_AGT_CTRL);
995 if ((temp & TA_CTL_BUSY) == 0)
996 break;
997 }
998 971
999 if (j >= MAX_CTL_CHECK) { 972 for (j = 0; j < MAX_CTL_CHECK; j++) {
1000 ret = -EIO; 973 temp = readl(mem_crb + TEST_AGT_CTRL);
1001 goto done; 974 if ((temp & TA_CTL_BUSY) == 0)
1002 } 975 break;
976 }
1003 977
1004 i = (off & 0xf) ? 0 : 2; 978 if (j >= MAX_CTL_CHECK) {
1005 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), 979 ret = -EIO;
1006 mem_crb + MIU_TEST_AGT_WRDATA(i)); 980 goto done;
1007 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1008 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1009 i = (off & 0xf) ? 2 : 0;
1010 } 981 }
1011 982
983 i = (off & 0xf) ? 0 : 2;
984 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
985 mem_crb + MIU_TEST_AGT_WRDATA(i));
986 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
987 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
988 i = (off & 0xf) ? 2 : 0;
989
1012 writel(data & 0xffffffff, 990 writel(data & 0xffffffff,
1013 mem_crb + MIU_TEST_AGT_WRDATA(i)); 991 mem_crb + MIU_TEST_AGT_WRDATA(i));
1014 writel((data >> 32) & 0xffffffff, 992 writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1022,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1044{ 1022{
1045 int j, ret; 1023 int j, ret;
1046 u32 temp, off8; 1024 u32 temp, off8;
1047 u64 val, stride; 1025 u64 val;
1048 void __iomem *mem_crb; 1026 void __iomem *mem_crb;
1049 1027
1050 /* Only 64-bit aligned access */ 1028 /* Only 64-bit aligned access */
@@ -1053,7 +1031,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1053 1031
1054 /* P3 onward, test agent base for MIU and SIU is same */ 1032 /* P3 onward, test agent base for MIU and SIU is same */
1055 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 1033 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1056 QLCNIC_ADDR_QDR_NET_MAX_P3)) { 1034 QLCNIC_ADDR_QDR_NET_MAX)) {
1057 mem_crb = qlcnic_get_ioaddr(adapter, 1035 mem_crb = qlcnic_get_ioaddr(adapter,
1058 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); 1036 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1059 goto correct; 1037 goto correct;
@@ -1073,9 +1051,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1073 return -EIO; 1051 return -EIO;
1074 1052
1075correct: 1053correct:
1076 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 1054 off8 = off & ~0xf;
1077
1078 off8 = off & ~(stride-1);
1079 1055
1080 mutex_lock(&adapter->ahw.mem_lock); 1056 mutex_lock(&adapter->ahw.mem_lock);
1081 1057
@@ -1097,7 +1073,7 @@ correct:
1097 ret = -EIO; 1073 ret = -EIO;
1098 } else { 1074 } else {
1099 off8 = MIU_TEST_AGT_RDDATA_LO; 1075 off8 = MIU_TEST_AGT_RDDATA_LO;
1100 if ((stride == 16) && (off & 0xf)) 1076 if (off & 0xf)
1101 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; 1077 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1102 1078
1103 temp = readl(mem_crb + off8 + 4); 1079 temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124048fa..71a4e664ad76 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -210,7 +210,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
210 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 210 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
211 if (cmd_buf_arr == NULL) { 211 if (cmd_buf_arr == NULL) {
212 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); 212 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
213 return -ENOMEM; 213 goto err_out;
214 } 214 }
215 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring)); 215 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
216 tx_ring->cmd_buf_arr = cmd_buf_arr; 216 tx_ring->cmd_buf_arr = cmd_buf_arr;
@@ -221,7 +221,7 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
221 rds_ring = kzalloc(size, GFP_KERNEL); 221 rds_ring = kzalloc(size, GFP_KERNEL);
222 if (rds_ring == NULL) { 222 if (rds_ring == NULL) {
223 dev_err(&netdev->dev, "failed to allocate rds ring struct\n"); 223 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
224 return -ENOMEM; 224 goto err_out;
225 } 225 }
226 recv_ctx->rds_rings = rds_ring; 226 recv_ctx->rds_rings = rds_ring;
227 227
@@ -230,17 +230,8 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
230 switch (ring) { 230 switch (ring) {
231 case RCV_RING_NORMAL: 231 case RCV_RING_NORMAL:
232 rds_ring->num_desc = adapter->num_rxd; 232 rds_ring->num_desc = adapter->num_rxd;
233 if (adapter->ahw.cut_through) { 233 rds_ring->dma_size = QLCNIC_P3_RX_BUF_MAX_LEN;
234 rds_ring->dma_size = 234 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
235 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
236 rds_ring->skb_size =
237 QLCNIC_CT_DEFAULT_RX_BUF_LEN;
238 } else {
239 rds_ring->dma_size =
240 QLCNIC_P3_RX_BUF_MAX_LEN;
241 rds_ring->skb_size =
242 rds_ring->dma_size + NET_IP_ALIGN;
243 }
244 break; 235 break;
245 236
246 case RCV_RING_JUMBO: 237 case RCV_RING_JUMBO:
@@ -254,13 +245,6 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
254 rds_ring->skb_size = 245 rds_ring->skb_size =
255 rds_ring->dma_size + NET_IP_ALIGN; 246 rds_ring->dma_size + NET_IP_ALIGN;
256 break; 247 break;
257
258 case RCV_RING_LRO:
259 rds_ring->num_desc = adapter->num_lro_rxd;
260 rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
261 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
262 break;
263
264 } 248 }
265 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) 249 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
266 vmalloc(RCV_BUFF_RINGSIZE(rds_ring)); 250 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
@@ -530,6 +514,36 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
530 return 0; 514 return 0;
531} 515}
532 516
517int
518qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
519
520 int timeo;
521 u32 val;
522
523 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
524 val = (val >> (adapter->portnum * 4)) & 0xf;
525
526 if ((val & 0x3) != 1) {
527 dev_err(&adapter->pdev->dev, "Not an Ethernet NIC func=%u\n",
528 val);
529 return -EIO;
530 }
531
532 adapter->physical_port = (val >> 2);
533
534 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
535 timeo = 30;
536
537 adapter->dev_init_timeo = timeo;
538
539 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
540 timeo = 10;
541
542 adapter->reset_ack_timeo = timeo;
543
544 return 0;
545}
546
533static int 547static int
534qlcnic_has_mn(struct qlcnic_adapter *adapter) 548qlcnic_has_mn(struct qlcnic_adapter *adapter)
535{ 549{
@@ -540,12 +554,10 @@ qlcnic_has_mn(struct qlcnic_adapter *adapter)
540 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver); 554 QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
541 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver); 555 flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
542 556
543 if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) { 557 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
558 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
559 return 1;
544 560
545 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
546 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
547 return 1;
548 }
549 return 0; 561 return 0;
550} 562}
551 563
@@ -612,7 +624,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
612 return -EINVAL; 624 return -EINVAL;
613 625
614 tab_size = cpu_to_le32(tab_desc->findex) + 626 tab_size = cpu_to_le32(tab_desc->findex) +
615 (cpu_to_le32(tab_desc->entry_size * (idx + 1))); 627 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
616 628
617 if (adapter->fw->size < tab_size) 629 if (adapter->fw->size < tab_size)
618 return -EINVAL; 630 return -EINVAL;
@@ -621,7 +633,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
621 (cpu_to_le32(tab_desc->entry_size) * (idx)); 633 (cpu_to_le32(tab_desc->entry_size) * (idx));
622 descr = (struct uni_data_desc *)&unirom[offs]; 634 descr = (struct uni_data_desc *)&unirom[offs];
623 635
624 data_size = descr->findex + cpu_to_le32(descr->size); 636 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
625 637
626 if (adapter->fw->size < data_size) 638 if (adapter->fw->size < data_size)
627 return -EINVAL; 639 return -EINVAL;
@@ -647,7 +659,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
647 return -EINVAL; 659 return -EINVAL;
648 660
649 tab_size = cpu_to_le32(tab_desc->findex) + 661 tab_size = cpu_to_le32(tab_desc->findex) +
650 (cpu_to_le32(tab_desc->entry_size * (idx + 1))); 662 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
651 663
652 if (adapter->fw->size < tab_size) 664 if (adapter->fw->size < tab_size)
653 return -EINVAL; 665 return -EINVAL;
@@ -655,7 +667,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
655 offs = cpu_to_le32(tab_desc->findex) + 667 offs = cpu_to_le32(tab_desc->findex) +
656 (cpu_to_le32(tab_desc->entry_size) * (idx)); 668 (cpu_to_le32(tab_desc->entry_size) * (idx));
657 descr = (struct uni_data_desc *)&unirom[offs]; 669 descr = (struct uni_data_desc *)&unirom[offs];
658 data_size = descr->findex + cpu_to_le32(descr->size); 670 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
659 671
660 if (adapter->fw->size < data_size) 672 if (adapter->fw->size < data_size)
661 return -EINVAL; 673 return -EINVAL;
@@ -950,6 +962,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
950 962
951 flashaddr += 8; 963 flashaddr += 8;
952 } 964 }
965
966 size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
967 if (size) {
968 data = cpu_to_le64(ptr64[i]);
969
970 if (qlcnic_pci_mem_write_2M(adapter,
971 flashaddr, data))
972 return -EIO;
973 }
974
953 } else { 975 } else {
954 u64 data; 976 u64 data;
955 u32 hi, lo; 977 u32 hi, lo;
@@ -1162,9 +1184,6 @@ int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
1162 if (err) 1184 if (err)
1163 return err; 1185 return err;
1164 1186
1165 QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
1166 QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
1167 QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
1168 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK); 1187 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
1169 1188
1170 return err; 1189 return err;
@@ -1254,13 +1273,13 @@ qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1254 1273
1255 skb = buffer->skb; 1274 skb = buffer->skb;
1256 1275
1257 if (!adapter->ahw.cut_through) 1276 skb_reserve(skb, 2);
1258 skb_reserve(skb, 2);
1259 1277
1260 dma = pci_map_single(pdev, skb->data, 1278 dma = pci_map_single(pdev, skb->data,
1261 rds_ring->dma_size, PCI_DMA_FROMDEVICE); 1279 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1262 1280
1263 if (pci_dma_mapping_error(pdev, dma)) { 1281 if (pci_dma_mapping_error(pdev, dma)) {
1282 adapter->stats.rx_dma_map_error++;
1264 dev_kfree_skb_any(skb); 1283 dev_kfree_skb_any(skb);
1265 buffer->skb = NULL; 1284 buffer->skb = NULL;
1266 return -ENOMEM; 1285 return -ENOMEM;
@@ -1285,8 +1304,10 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1285 PCI_DMA_FROMDEVICE); 1304 PCI_DMA_FROMDEVICE);
1286 1305
1287 skb = buffer->skb; 1306 skb = buffer->skb;
1288 if (!skb) 1307 if (!skb) {
1308 adapter->stats.null_skb++;
1289 goto no_skb; 1309 goto no_skb;
1310 }
1290 1311
1291 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) { 1312 if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
1292 adapter->stats.csummed++; 1313 adapter->stats.csummed++;
@@ -1476,6 +1497,8 @@ qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1476 1497
1477 if (rxbuf) 1498 if (rxbuf)
1478 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]); 1499 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
1500 else
1501 adapter->stats.null_rxbuf++;
1479 1502
1480skip: 1503skip:
1481 for (; desc_cnt > 0; desc_cnt--) { 1504 for (; desc_cnt > 0; desc_cnt--) {
@@ -1523,9 +1546,10 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1523 int producer, count = 0; 1546 int producer, count = 0;
1524 struct list_head *head; 1547 struct list_head *head;
1525 1548
1549 spin_lock(&rds_ring->lock);
1550
1526 producer = rds_ring->producer; 1551 producer = rds_ring->producer;
1527 1552
1528 spin_lock(&rds_ring->lock);
1529 head = &rds_ring->free_list; 1553 head = &rds_ring->free_list;
1530 while (!list_empty(head)) { 1554 while (!list_empty(head)) {
1531 1555
@@ -1547,13 +1571,13 @@ qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1547 1571
1548 producer = get_next_index(producer, rds_ring->num_desc); 1572 producer = get_next_index(producer, rds_ring->num_desc);
1549 } 1573 }
1550 spin_unlock(&rds_ring->lock);
1551 1574
1552 if (count) { 1575 if (count) {
1553 rds_ring->producer = producer; 1576 rds_ring->producer = producer;
1554 writel((producer-1) & (rds_ring->num_desc-1), 1577 writel((producer-1) & (rds_ring->num_desc-1),
1555 rds_ring->crb_rcv_producer); 1578 rds_ring->crb_rcv_producer);
1556 } 1579 }
1580 spin_unlock(&rds_ring->lock);
1557} 1581}
1558 1582
1559static void 1583static void
@@ -1565,10 +1589,11 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1565 int producer, count = 0; 1589 int producer, count = 0;
1566 struct list_head *head; 1590 struct list_head *head;
1567 1591
1568 producer = rds_ring->producer;
1569 if (!spin_trylock(&rds_ring->lock)) 1592 if (!spin_trylock(&rds_ring->lock))
1570 return; 1593 return;
1571 1594
1595 producer = rds_ring->producer;
1596
1572 head = &rds_ring->free_list; 1597 head = &rds_ring->free_list;
1573 while (!list_empty(head)) { 1598 while (!list_empty(head)) {
1574 1599
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1f9982..23ea9caa5261 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -61,6 +61,10 @@ static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644); 61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63 63
64static int load_fw_file;
65module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67
64static int __devinit qlcnic_probe(struct pci_dev *pdev, 68static int __devinit qlcnic_probe(struct pci_dev *pdev,
65 const struct pci_device_id *ent); 69 const struct pci_device_id *ent);
66static void __devexit qlcnic_remove(struct pci_dev *pdev); 70static void __devexit qlcnic_remove(struct pci_dev *pdev);
@@ -84,6 +88,7 @@ static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); 88static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 89static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
86 90
91static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
87static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter); 92static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
88static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter); 93static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
89 94
@@ -208,6 +213,9 @@ qlcnic_napi_enable(struct qlcnic_adapter *adapter)
208 struct qlcnic_host_sds_ring *sds_ring; 213 struct qlcnic_host_sds_ring *sds_ring;
209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 214 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210 215
216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
217 return;
218
211 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 219 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
212 sds_ring = &recv_ctx->sds_rings[ring]; 220 sds_ring = &recv_ctx->sds_rings[ring];
213 napi_enable(&sds_ring->napi); 221 napi_enable(&sds_ring->napi);
@@ -222,6 +230,9 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
222 struct qlcnic_host_sds_ring *sds_ring; 230 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx; 231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
224 232
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
234 return;
235
225 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
226 sds_ring = &recv_ctx->sds_rings[ring]; 237 sds_ring = &recv_ctx->sds_rings[ring];
227 qlcnic_disable_int(sds_ring); 238 qlcnic_disable_int(sds_ring);
@@ -233,67 +244,6 @@ qlcnic_napi_disable(struct qlcnic_adapter *adapter)
233static void qlcnic_clear_stats(struct qlcnic_adapter *adapter) 244static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
234{ 245{
235 memset(&adapter->stats, 0, sizeof(adapter->stats)); 246 memset(&adapter->stats, 0, sizeof(adapter->stats));
236 return;
237}
238
239static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
240{
241 struct pci_dev *pdev = adapter->pdev;
242 u64 mask, cmask;
243
244 adapter->pci_using_dac = 0;
245
246 mask = DMA_BIT_MASK(39);
247 cmask = mask;
248
249 if (pci_set_dma_mask(pdev, mask) == 0 &&
250 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
251 adapter->pci_using_dac = 1;
252 return 0;
253 }
254
255 return -EIO;
256}
257
258/* Update addressable range if firmware supports it */
259static int
260qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
261{
262 int change, shift, err;
263 u64 mask, old_mask, old_cmask;
264 struct pci_dev *pdev = adapter->pdev;
265
266 change = 0;
267
268 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
269 if (shift > 32)
270 return 0;
271
272 if (shift > 9)
273 change = 1;
274
275 if (change) {
276 old_mask = pdev->dma_mask;
277 old_cmask = pdev->dev.coherent_dma_mask;
278
279 mask = DMA_BIT_MASK(32+shift);
280
281 err = pci_set_dma_mask(pdev, mask);
282 if (err)
283 goto err_out;
284
285 err = pci_set_consistent_dma_mask(pdev, mask);
286 if (err)
287 goto err_out;
288 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
289 }
290
291 return 0;
292
293err_out:
294 pci_set_dma_mask(pdev, old_mask);
295 pci_set_consistent_dma_mask(pdev, old_cmask);
296 return err;
297} 247}
298 248
299static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter) 249static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
@@ -512,13 +462,6 @@ qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
512 struct pci_dev *pdev = adapter->pdev; 462 struct pci_dev *pdev = adapter->pdev;
513 int pci_func = adapter->ahw.pci_func; 463 int pci_func = adapter->ahw.pci_func;
514 464
515 /*
516 * Set the CRB window to invalid. If any register in window 0 is
517 * accessed it should set the window to 0 and then reset it to 1.
518 */
519 adapter->ahw.crb_win = -1;
520 adapter->ahw.ocm_win = -1;
521
522 /* remap phys address */ 465 /* remap phys address */
523 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 466 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
524 mem_len = pci_resource_len(pdev, 0); 467 mem_len = pci_resource_len(pdev, 0);
@@ -556,7 +499,9 @@ static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
556 qlcnic_boards[i].device == pdev->device && 499 qlcnic_boards[i].device == pdev->device &&
557 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor && 500 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
558 qlcnic_boards[i].sub_device == pdev->subsystem_device) { 501 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
559 strcpy(name, qlcnic_boards[i].short_name); 502 sprintf(name, "%pM: %s" ,
503 adapter->mac_addr,
504 qlcnic_boards[i].short_name);
560 found = 1; 505 found = 1;
561 break; 506 break;
562 } 507 }
@@ -605,22 +550,10 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
605 brd_name, adapter->ahw.revision_id); 550 brd_name, adapter->ahw.revision_id);
606 } 551 }
607 552
608 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) { 553 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
609 adapter->driver_mismatch = 1; 554 fw_major, fw_minor, fw_build);
610 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
611 fw_major, fw_minor, fw_build);
612 return;
613 }
614
615 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
616 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
617
618 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
619 fw_major, fw_minor, fw_build,
620 adapter->ahw.cut_through ? "cut-through" : "legacy");
621 555
622 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222)) 556 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
623 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
624 557
625 adapter->flags &= ~QLCNIC_LRO_ENABLED; 558 adapter->flags &= ~QLCNIC_LRO_ENABLED;
626 559
@@ -637,7 +570,6 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
637 570
638 adapter->num_txd = MAX_CMD_DESCRIPTORS; 571 adapter->num_txd = MAX_CMD_DESCRIPTORS;
639 572
640 adapter->num_lro_rxd = 0;
641 adapter->max_rds_rings = 2; 573 adapter->max_rds_rings = 2;
642} 574}
643 575
@@ -646,11 +578,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
646{ 578{
647 int val, err, first_boot; 579 int val, err, first_boot;
648 580
649 err = qlcnic_set_dma_mask(adapter); 581 err = qlcnic_can_start_firmware(adapter);
650 if (err) 582 if (err < 0)
651 return err; 583 return err;
652 584 else if (!err)
653 if (!qlcnic_can_start_firmware(adapter))
654 goto wait_init; 585 goto wait_init;
655 586
656 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc)); 587 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -658,7 +589,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
658 /* This is the first boot after power up */ 589 /* This is the first boot after power up */
659 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC); 590 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
660 591
661 qlcnic_request_firmware(adapter); 592 if (load_fw_file)
593 qlcnic_request_firmware(adapter);
594 else
595 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
662 596
663 err = qlcnic_need_fw_reset(adapter); 597 err = qlcnic_need_fw_reset(adapter);
664 if (err < 0) 598 if (err < 0)
@@ -672,7 +606,6 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
672 msleep(1); 606 msleep(1);
673 } 607 }
674 608
675 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
676 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0); 609 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
677 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0); 610 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
678 611
@@ -696,16 +629,18 @@ wait_init:
696 goto err_out; 629 goto err_out;
697 630
698 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY); 631 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
699 632 qlcnic_idc_debug_info(adapter, 1);
700 qlcnic_update_dma_mask(adapter);
701 633
702 qlcnic_check_options(adapter); 634 qlcnic_check_options(adapter);
703 635
704 adapter->need_fw_reset = 0; 636 adapter->need_fw_reset = 0;
705 637
706 /* fall through and release firmware */ 638 qlcnic_release_firmware(adapter);
639 return 0;
707 640
708err_out: 641err_out:
642 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
643 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
709 qlcnic_release_firmware(adapter); 644 qlcnic_release_firmware(adapter);
710 return err; 645 return err;
711} 646}
@@ -937,6 +872,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
937 struct qlcnic_host_sds_ring *sds_ring; 872 struct qlcnic_host_sds_ring *sds_ring;
938 int ring; 873 int ring;
939 874
875 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
940 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 876 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
941 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 877 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
942 sds_ring = &adapter->recv_ctx.sds_rings[ring]; 878 sds_ring = &adapter->recv_ctx.sds_rings[ring];
@@ -950,11 +886,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
950 adapter->max_sds_rings = max_sds_rings; 886 adapter->max_sds_rings = max_sds_rings;
951 887
952 if (qlcnic_attach(adapter)) 888 if (qlcnic_attach(adapter))
953 return; 889 goto out;
954 890
955 if (netif_running(netdev)) 891 if (netif_running(netdev))
956 __qlcnic_up(adapter, netdev); 892 __qlcnic_up(adapter, netdev);
957 893out:
958 netif_device_attach(netdev); 894 netif_device_attach(netdev);
959} 895}
960 896
@@ -976,8 +912,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
976 adapter->diag_test = test; 912 adapter->diag_test = test;
977 913
978 ret = qlcnic_attach(adapter); 914 ret = qlcnic_attach(adapter);
979 if (ret) 915 if (ret) {
916 netif_device_attach(netdev);
980 return ret; 917 return ret;
918 }
981 919
982 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 920 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
983 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 921 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -985,6 +923,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
985 qlcnic_enable_int(sds_ring); 923 qlcnic_enable_int(sds_ring);
986 } 924 }
987 } 925 }
926 set_bit(__QLCNIC_DEV_UP, &adapter->state);
988 927
989 return 0; 928 return 0;
990} 929}
@@ -1010,23 +949,19 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1010 if (netif_running(netdev)) { 949 if (netif_running(netdev)) {
1011 err = qlcnic_attach(adapter); 950 err = qlcnic_attach(adapter);
1012 if (!err) 951 if (!err)
1013 err = __qlcnic_up(adapter, netdev); 952 __qlcnic_up(adapter, netdev);
1014
1015 if (err)
1016 goto done;
1017 } 953 }
1018 954
1019 netif_device_attach(netdev); 955 netif_device_attach(netdev);
1020 } 956 }
1021 957
1022done:
1023 clear_bit(__QLCNIC_RESETTING, &adapter->state); 958 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1024 return err; 959 return err;
1025} 960}
1026 961
1027static int 962static int
1028qlcnic_setup_netdev(struct qlcnic_adapter *adapter, 963qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1029 struct net_device *netdev) 964 struct net_device *netdev, u8 pci_using_dac)
1030{ 965{
1031 int err; 966 int err;
1032 struct pci_dev *pdev = adapter->pdev; 967 struct pci_dev *pdev = adapter->pdev;
@@ -1049,7 +984,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1049 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 984 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1050 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6); 985 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1051 986
1052 if (adapter->pci_using_dac) { 987 if (pci_using_dac) {
1053 netdev->features |= NETIF_F_HIGHDMA; 988 netdev->features |= NETIF_F_HIGHDMA;
1054 netdev->vlan_features |= NETIF_F_HIGHDMA; 989 netdev->vlan_features |= NETIF_F_HIGHDMA;
1055 } 990 }
@@ -1079,6 +1014,22 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1079 return 0; 1014 return 0;
1080} 1015}
1081 1016
1017static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1018{
1019 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1020 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1021 *pci_using_dac = 1;
1022 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1023 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1024 *pci_using_dac = 0;
1025 else {
1026 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1027 return -EIO;
1028 }
1029
1030 return 0;
1031}
1032
1082static int __devinit 1033static int __devinit
1083qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1034qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1084{ 1035{
@@ -1087,6 +1038,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1087 int err; 1038 int err;
1088 int pci_func_id = PCI_FUNC(pdev->devfn); 1039 int pci_func_id = PCI_FUNC(pdev->devfn);
1089 uint8_t revision_id; 1040 uint8_t revision_id;
1041 uint8_t pci_using_dac;
1090 1042
1091 err = pci_enable_device(pdev); 1043 err = pci_enable_device(pdev);
1092 if (err) 1044 if (err)
@@ -1097,6 +1049,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1097 goto err_out_disable_pdev; 1049 goto err_out_disable_pdev;
1098 } 1050 }
1099 1051
1052 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1053 if (err)
1054 goto err_out_disable_pdev;
1055
1100 err = pci_request_regions(pdev, qlcnic_driver_name); 1056 err = pci_request_regions(pdev, qlcnic_driver_name);
1101 if (err) 1057 if (err)
1102 goto err_out_disable_pdev; 1058 goto err_out_disable_pdev;
@@ -1115,6 +1071,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1115 adapter = netdev_priv(netdev); 1071 adapter = netdev_priv(netdev);
1116 adapter->netdev = netdev; 1072 adapter->netdev = netdev;
1117 adapter->pdev = pdev; 1073 adapter->pdev = pdev;
1074 adapter->dev_rst_time = jiffies;
1118 adapter->ahw.pci_func = pci_func_id; 1075 adapter->ahw.pci_func = pci_func_id;
1119 1076
1120 revision_id = pdev->revision; 1077 revision_id = pdev->revision;
@@ -1139,21 +1096,23 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1139 goto err_out_iounmap; 1096 goto err_out_iounmap;
1140 } 1097 }
1141 1098
1099 if (qlcnic_read_mac_addr(adapter))
1100 dev_warn(&pdev->dev, "failed to read mac addr\n");
1101
1102 if (qlcnic_setup_idc_param(adapter))
1103 goto err_out_iounmap;
1142 1104
1143 err = qlcnic_start_firmware(adapter); 1105 err = qlcnic_start_firmware(adapter);
1144 if (err) 1106 if (err) {
1107 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1145 goto err_out_decr_ref; 1108 goto err_out_decr_ref;
1146 1109 }
1147 /*
1148 * See if the firmware gave us a virtual-physical port mapping.
1149 */
1150 adapter->physical_port = adapter->portnum;
1151 1110
1152 qlcnic_clear_stats(adapter); 1111 qlcnic_clear_stats(adapter);
1153 1112
1154 qlcnic_setup_intr(adapter); 1113 qlcnic_setup_intr(adapter);
1155 1114
1156 err = qlcnic_setup_netdev(adapter, netdev); 1115 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1157 if (err) 1116 if (err)
1158 goto err_out_disable_msi; 1117 goto err_out_disable_msi;
1159 1118
@@ -1304,9 +1263,6 @@ qlcnic_resume(struct pci_dev *pdev)
1304 pci_set_master(pdev); 1263 pci_set_master(pdev);
1305 pci_restore_state(pdev); 1264 pci_restore_state(pdev);
1306 1265
1307 adapter->ahw.crb_win = -1;
1308 adapter->ahw.ocm_win = -1;
1309
1310 err = qlcnic_start_firmware(adapter); 1266 err = qlcnic_start_firmware(adapter);
1311 if (err) { 1267 if (err) {
1312 dev_err(&pdev->dev, "failed to start firmware\n"); 1268 dev_err(&pdev->dev, "failed to start firmware\n");
@@ -1334,6 +1290,7 @@ err_out_detach:
1334 qlcnic_detach(adapter); 1290 qlcnic_detach(adapter);
1335err_out: 1291err_out:
1336 qlcnic_clr_all_drv_state(adapter); 1292 qlcnic_clr_all_drv_state(adapter);
1293 netif_device_attach(netdev);
1337 return err; 1294 return err;
1338} 1295}
1339#endif 1296#endif
@@ -1570,6 +1527,11 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1570 int frag_count, no_of_desc; 1527 int frag_count, no_of_desc;
1571 u32 num_txd = tx_ring->num_desc; 1528 u32 num_txd = tx_ring->num_desc;
1572 1529
1530 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1531 netif_stop_queue(netdev);
1532 return NETDEV_TX_BUSY;
1533 }
1534
1573 frag_count = skb_shinfo(skb)->nr_frags + 1; 1535 frag_count = skb_shinfo(skb)->nr_frags + 1;
1574 1536
1575 /* 4 fragments per cmd des */ 1537 /* 4 fragments per cmd des */
@@ -1586,8 +1548,10 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1586 1548
1587 pdev = adapter->pdev; 1549 pdev = adapter->pdev;
1588 1550
1589 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) 1551 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1552 adapter->stats.tx_dma_map_error++;
1590 goto drop_packet; 1553 goto drop_packet;
1554 }
1591 1555
1592 pbuf->skb = skb; 1556 pbuf->skb = skb;
1593 pbuf->frag_count = frag_count; 1557 pbuf->frag_count = frag_count;
@@ -1739,6 +1703,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
1739request_reset: 1703request_reset:
1740 adapter->need_fw_reset = 1; 1704 adapter->need_fw_reset = 1;
1741 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1705 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1706 QLCDB(adapter, DRV, "Resetting adapter\n");
1742} 1707}
1743 1708
1744static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 1709static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -1750,7 +1715,7 @@ static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1750 1715
1751 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts; 1716 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1752 stats->tx_packets = adapter->stats.xmitfinished; 1717 stats->tx_packets = adapter->stats.xmitfinished;
1753 stats->rx_bytes = adapter->stats.rxbytes; 1718 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
1754 stats->tx_bytes = adapter->stats.txbytes; 1719 stats->tx_bytes = adapter->stats.txbytes;
1755 stats->rx_dropped = adapter->stats.rxdropped; 1720 stats->rx_dropped = adapter->stats.rxdropped;
1756 stats->tx_dropped = adapter->stats.txdropped; 1721 stats->tx_dropped = adapter->stats.txdropped;
@@ -1944,7 +1909,20 @@ static void qlcnic_poll_controller(struct net_device *netdev)
1944#endif 1909#endif
1945 1910
1946static void 1911static void
1947qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state) 1912qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
1913{
1914 u32 val;
1915
1916 val = adapter->portnum & 0xf;
1917 val |= encoding << 7;
1918 val |= (jiffies - adapter->dev_rst_time) << 8;
1919
1920 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
1921 adapter->dev_rst_time = jiffies;
1922}
1923
1924static int
1925qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
1948{ 1926{
1949 u32 val; 1927 u32 val;
1950 1928
@@ -1952,18 +1930,20 @@ qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1952 state != QLCNIC_DEV_NEED_QUISCENT); 1930 state != QLCNIC_DEV_NEED_QUISCENT);
1953 1931
1954 if (qlcnic_api_lock(adapter)) 1932 if (qlcnic_api_lock(adapter))
1955 return ; 1933 return -EIO;
1956 1934
1957 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 1935 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1958 1936
1959 if (state == QLCNIC_DEV_NEED_RESET) 1937 if (state == QLCNIC_DEV_NEED_RESET)
1960 val |= ((u32)0x1 << (adapter->portnum * 4)); 1938 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
1961 else if (state == QLCNIC_DEV_NEED_QUISCENT) 1939 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1962 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1)); 1940 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
1963 1941
1964 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 1942 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1965 1943
1966 qlcnic_api_unlock(adapter); 1944 qlcnic_api_unlock(adapter);
1945
1946 return 0;
1967} 1947}
1968 1948
1969static int 1949static int
@@ -1975,7 +1955,7 @@ qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1975 return -EBUSY; 1955 return -EBUSY;
1976 1956
1977 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 1957 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1978 val &= ~((u32)0x3 << (adapter->portnum * 4)); 1958 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1979 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 1959 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1980 1960
1981 qlcnic_api_unlock(adapter); 1961 qlcnic_api_unlock(adapter);
@@ -1992,14 +1972,14 @@ qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1992 goto err; 1972 goto err;
1993 1973
1994 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 1974 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1995 val &= ~((u32)0x1 << (adapter->portnum * 4)); 1975 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
1996 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 1976 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1997 1977
1998 if (!(val & 0x11111111)) 1978 if (!(val & 0x11111111))
1999 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD); 1979 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2000 1980
2001 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 1981 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2002 val &= ~((u32)0x3 << (adapter->portnum * 4)); 1982 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2003 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 1983 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2004 1984
2005 qlcnic_api_unlock(adapter); 1985 qlcnic_api_unlock(adapter);
@@ -2009,6 +1989,7 @@ err:
2009 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1989 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2010} 1990}
2011 1991
1992/* Grab api lock, before checking state */
2012static int 1993static int
2013qlcnic_check_drv_state(struct qlcnic_adapter *adapter) 1994qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2014{ 1995{
@@ -2024,73 +2005,103 @@ qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2024 return 1; 2005 return 1;
2025} 2006}
2026 2007
2008static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2009{
2010 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2011
2012 if (val != QLCNIC_DRV_IDC_VER) {
2013 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2014 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2015 }
2016
2017 return 0;
2018}
2019
2027static int 2020static int
2028qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) 2021qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2029{ 2022{
2030 u32 val, prev_state; 2023 u32 val, prev_state;
2031 int cnt = 0; 2024 u8 dev_init_timeo = adapter->dev_init_timeo;
2032 int portnum = adapter->portnum; 2025 u8 portnum = adapter->portnum;
2026 u8 ret;
2027
2028 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2029 return 1;
2033 2030
2034 if (qlcnic_api_lock(adapter)) 2031 if (qlcnic_api_lock(adapter))
2035 return -1; 2032 return -1;
2036 2033
2037 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT); 2034 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2038 if (!(val & ((int)0x1 << (portnum * 4)))) { 2035 if (!(val & (1 << (portnum * 4)))) {
2039 val |= ((u32)0x1 << (portnum * 4)); 2036 QLC_DEV_SET_REF_CNT(val, portnum);
2040 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val); 2037 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2041 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
2042 goto start_fw;
2043 } 2038 }
2044 2039
2045 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2040 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2041 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2046 2042
2047 switch (prev_state) { 2043 switch (prev_state) {
2048 case QLCNIC_DEV_COLD: 2044 case QLCNIC_DEV_COLD:
2049start_fw: 2045 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2050 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING); 2046 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2047 qlcnic_idc_debug_info(adapter, 0);
2051 qlcnic_api_unlock(adapter); 2048 qlcnic_api_unlock(adapter);
2052 return 1; 2049 return 1;
2053 2050
2054 case QLCNIC_DEV_READY: 2051 case QLCNIC_DEV_READY:
2052 ret = qlcnic_check_idc_ver(adapter);
2055 qlcnic_api_unlock(adapter); 2053 qlcnic_api_unlock(adapter);
2056 return 0; 2054 return ret;
2057 2055
2058 case QLCNIC_DEV_NEED_RESET: 2056 case QLCNIC_DEV_NEED_RESET:
2059 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2057 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2060 val |= ((u32)0x1 << (portnum * 4)); 2058 QLC_DEV_SET_RST_RDY(val, portnum);
2061 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2059 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2062 break; 2060 break;
2063 2061
2064 case QLCNIC_DEV_NEED_QUISCENT: 2062 case QLCNIC_DEV_NEED_QUISCENT:
2065 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2063 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2066 val |= ((u32)0x1 << ((portnum * 4) + 1)); 2064 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2067 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2065 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2068 break; 2066 break;
2069 2067
2070 case QLCNIC_DEV_FAILED: 2068 case QLCNIC_DEV_FAILED:
2069 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2071 qlcnic_api_unlock(adapter); 2070 qlcnic_api_unlock(adapter);
2072 return -1; 2071 return -1;
2072
2073 case QLCNIC_DEV_INITIALIZING:
2074 case QLCNIC_DEV_QUISCENT:
2075 break;
2073 } 2076 }
2074 2077
2075 qlcnic_api_unlock(adapter); 2078 qlcnic_api_unlock(adapter);
2076 msleep(1000); 2079
2077 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) && 2080 do {
2078 ++cnt < 20)
2079 msleep(1000); 2081 msleep(1000);
2082 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2083
2084 if (prev_state == QLCNIC_DEV_QUISCENT)
2085 continue;
2086 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2080 2087
2081 if (cnt >= 20) 2088 if (!dev_init_timeo) {
2089 dev_err(&adapter->pdev->dev,
2090 "Waiting for device to initialize timeout\n");
2082 return -1; 2091 return -1;
2092 }
2083 2093
2084 if (qlcnic_api_lock(adapter)) 2094 if (qlcnic_api_lock(adapter))
2085 return -1; 2095 return -1;
2086 2096
2087 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE); 2097 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2088 val &= ~((u32)0x3 << (portnum * 4)); 2098 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2089 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val); 2099 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2090 2100
2101 ret = qlcnic_check_idc_ver(adapter);
2091 qlcnic_api_unlock(adapter); 2102 qlcnic_api_unlock(adapter);
2092 2103
2093 return 0; 2104 return ret;
2094} 2105}
2095 2106
2096static void 2107static void
@@ -2098,44 +2109,84 @@ qlcnic_fwinit_work(struct work_struct *work)
2098{ 2109{
2099 struct qlcnic_adapter *adapter = container_of(work, 2110 struct qlcnic_adapter *adapter = container_of(work,
2100 struct qlcnic_adapter, fw_work.work); 2111 struct qlcnic_adapter, fw_work.work);
2101 int dev_state; 2112 u32 dev_state = 0xf;
2102 2113
2103 if (++adapter->fw_wait_cnt > FW_POLL_THRESH) 2114 if (qlcnic_api_lock(adapter))
2104 goto err_ret; 2115 goto err_ret;
2105 2116
2106 if (test_bit(__QLCNIC_START_FW, &adapter->state)) { 2117 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2118 if (dev_state == QLCNIC_DEV_QUISCENT) {
2119 qlcnic_api_unlock(adapter);
2120 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2121 FW_POLL_DELAY * 2);
2122 return;
2123 }
2124
2125 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2126 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2127 adapter->reset_ack_timeo);
2128 goto skip_ack_check;
2129 }
2130
2131 if (!qlcnic_check_drv_state(adapter)) {
2132skip_ack_check:
2133 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2107 2134
2108 if (qlcnic_check_drv_state(adapter)) { 2135 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2109 qlcnic_schedule_work(adapter, 2136 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2110 qlcnic_fwinit_work, FW_POLL_DELAY); 2137 QLCNIC_DEV_QUISCENT);
2138 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2139 FW_POLL_DELAY * 2);
2140 QLCDB(adapter, DRV, "Quiscing the driver\n");
2141 qlcnic_idc_debug_info(adapter, 0);
2142
2143 qlcnic_api_unlock(adapter);
2111 return; 2144 return;
2112 } 2145 }
2113 2146
2147 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2148 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2149 QLCNIC_DEV_INITIALIZING);
2150 set_bit(__QLCNIC_START_FW, &adapter->state);
2151 QLCDB(adapter, DRV, "Restarting fw\n");
2152 qlcnic_idc_debug_info(adapter, 0);
2153 }
2154
2155 qlcnic_api_unlock(adapter);
2156
2114 if (!qlcnic_start_firmware(adapter)) { 2157 if (!qlcnic_start_firmware(adapter)) {
2115 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2158 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2116 return; 2159 return;
2117 } 2160 }
2118
2119 goto err_ret; 2161 goto err_ret;
2120 } 2162 }
2121 2163
2164 qlcnic_api_unlock(adapter);
2165
2122 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2166 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2167 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2168
2123 switch (dev_state) { 2169 switch (dev_state) {
2124 case QLCNIC_DEV_READY: 2170 case QLCNIC_DEV_QUISCENT:
2125 if (!qlcnic_start_firmware(adapter)) { 2171 case QLCNIC_DEV_NEED_QUISCENT:
2126 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2172 case QLCNIC_DEV_NEED_RESET:
2127 return; 2173 qlcnic_schedule_work(adapter,
2128 } 2174 qlcnic_fwinit_work, FW_POLL_DELAY);
2175 return;
2129 case QLCNIC_DEV_FAILED: 2176 case QLCNIC_DEV_FAILED:
2130 break; 2177 break;
2131 2178
2132 default: 2179 default:
2133 qlcnic_schedule_work(adapter, 2180 if (!qlcnic_start_firmware(adapter)) {
2134 qlcnic_fwinit_work, 2 * FW_POLL_DELAY); 2181 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2135 return; 2182 return;
2183 }
2136 } 2184 }
2137 2185
2138err_ret: 2186err_ret:
2187 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2188 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2189 netif_device_attach(adapter->netdev);
2139 qlcnic_clr_all_drv_state(adapter); 2190 qlcnic_clr_all_drv_state(adapter);
2140} 2191}
2141 2192
@@ -2163,7 +2214,8 @@ qlcnic_detach_work(struct work_struct *work)
2163 if (adapter->temp == QLCNIC_TEMP_PANIC) 2214 if (adapter->temp == QLCNIC_TEMP_PANIC)
2164 goto err_ret; 2215 goto err_ret;
2165 2216
2166 qlcnic_set_drv_state(adapter, adapter->dev_state); 2217 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2218 goto err_ret;
2167 2219
2168 adapter->fw_wait_cnt = 0; 2220 adapter->fw_wait_cnt = 0;
2169 2221
@@ -2172,10 +2224,14 @@ qlcnic_detach_work(struct work_struct *work)
2172 return; 2224 return;
2173 2225
2174err_ret: 2226err_ret:
2227 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2228 status, adapter->temp);
2229 netif_device_attach(netdev);
2175 qlcnic_clr_all_drv_state(adapter); 2230 qlcnic_clr_all_drv_state(adapter);
2176 2231
2177} 2232}
2178 2233
2234/*Transit to RESET state from READY state only */
2179static void 2235static void
2180qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2236qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2181{ 2237{
@@ -2186,9 +2242,10 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2186 2242
2187 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2243 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2188 2244
2189 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) { 2245 if (state == QLCNIC_DEV_READY) {
2190 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); 2246 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2191 set_bit(__QLCNIC_START_FW, &adapter->state); 2247 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2248 qlcnic_idc_debug_info(adapter, 0);
2192 } 2249 }
2193 2250
2194 qlcnic_api_unlock(adapter); 2251 qlcnic_api_unlock(adapter);
@@ -2233,9 +2290,8 @@ qlcnic_attach_work(struct work_struct *work)
2233 qlcnic_config_indev_addr(netdev, NETDEV_UP); 2290 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2234 } 2291 }
2235 2292
2236 netif_device_attach(netdev);
2237
2238done: 2293done:
2294 netif_device_attach(netdev);
2239 adapter->fw_fail_cnt = 0; 2295 adapter->fw_fail_cnt = 0;
2240 clear_bit(__QLCNIC_RESETTING, &adapter->state); 2296 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2241 2297
@@ -2253,10 +2309,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
2253 if (qlcnic_check_temp(adapter)) 2309 if (qlcnic_check_temp(adapter))
2254 goto detach; 2310 goto detach;
2255 2311
2256 if (adapter->need_fw_reset) { 2312 if (adapter->need_fw_reset)
2257 qlcnic_dev_request_reset(adapter); 2313 qlcnic_dev_request_reset(adapter);
2258 goto detach;
2259 }
2260 2314
2261 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2315 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2262 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT) 2316 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
@@ -2285,8 +2339,11 @@ detach:
2285 QLCNIC_DEV_NEED_RESET; 2339 QLCNIC_DEV_NEED_RESET;
2286 2340
2287 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2341 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2288 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 2342 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2343
2289 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2344 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2345 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2346 }
2290 2347
2291 return 1; 2348 return 1;
2292} 2349}
@@ -2387,51 +2444,72 @@ static int
2387qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, 2444qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2388 loff_t offset, size_t size) 2445 loff_t offset, size_t size)
2389{ 2446{
2447 size_t crb_size = 4;
2448
2390 if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) 2449 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2391 return -EIO; 2450 return -EIO;
2392 2451
2393 if ((size != 4) || (offset & 0x3)) 2452 if (offset < QLCNIC_PCI_CRBSPACE) {
2394 return -EINVAL; 2453 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2454 QLCNIC_PCI_CAMQM_END))
2455 crb_size = 8;
2456 else
2457 return -EINVAL;
2458 }
2395 2459
2396 if (offset < QLCNIC_PCI_CRBSPACE) 2460 if ((size != crb_size) || (offset & (crb_size-1)))
2397 return -EINVAL; 2461 return -EINVAL;
2398 2462
2399 return 0; 2463 return 0;
2400} 2464}
2401 2465
2402static ssize_t 2466static ssize_t
2403qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr, 2467qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2468 struct bin_attribute *attr,
2404 char *buf, loff_t offset, size_t size) 2469 char *buf, loff_t offset, size_t size)
2405{ 2470{
2406 struct device *dev = container_of(kobj, struct device, kobj); 2471 struct device *dev = container_of(kobj, struct device, kobj);
2407 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 2472 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2408 u32 data; 2473 u32 data;
2474 u64 qmdata;
2409 int ret; 2475 int ret;
2410 2476
2411 ret = qlcnic_sysfs_validate_crb(adapter, offset, size); 2477 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2412 if (ret != 0) 2478 if (ret != 0)
2413 return ret; 2479 return ret;
2414 2480
2415 data = QLCRD32(adapter, offset); 2481 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2416 memcpy(buf, &data, size); 2482 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2483 memcpy(buf, &qmdata, size);
2484 } else {
2485 data = QLCRD32(adapter, offset);
2486 memcpy(buf, &data, size);
2487 }
2417 return size; 2488 return size;
2418} 2489}
2419 2490
2420static ssize_t 2491static ssize_t
2421qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr, 2492qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2493 struct bin_attribute *attr,
2422 char *buf, loff_t offset, size_t size) 2494 char *buf, loff_t offset, size_t size)
2423{ 2495{
2424 struct device *dev = container_of(kobj, struct device, kobj); 2496 struct device *dev = container_of(kobj, struct device, kobj);
2425 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 2497 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2426 u32 data; 2498 u32 data;
2499 u64 qmdata;
2427 int ret; 2500 int ret;
2428 2501
2429 ret = qlcnic_sysfs_validate_crb(adapter, offset, size); 2502 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2430 if (ret != 0) 2503 if (ret != 0)
2431 return ret; 2504 return ret;
2432 2505
2433 memcpy(&data, buf, size); 2506 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2434 QLCWR32(adapter, offset, data); 2507 memcpy(&qmdata, buf, size);
2508 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2509 } else {
2510 memcpy(&data, buf, size);
2511 QLCWR32(adapter, offset, data);
2512 }
2435 return size; 2513 return size;
2436} 2514}
2437 2515
@@ -2449,7 +2527,8 @@ qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2449} 2527}
2450 2528
2451static ssize_t 2529static ssize_t
2452qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr, 2530qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2531 struct bin_attribute *attr,
2453 char *buf, loff_t offset, size_t size) 2532 char *buf, loff_t offset, size_t size)
2454{ 2533{
2455 struct device *dev = container_of(kobj, struct device, kobj); 2534 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2470,7 +2549,8 @@ qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2470} 2549}
2471 2550
2472static ssize_t 2551static ssize_t
2473qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr, 2552qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2553 struct bin_attribute *attr,
2474 char *buf, loff_t offset, size_t size) 2554 char *buf, loff_t offset, size_t size)
2475{ 2555{
2476 struct device *dev = container_of(kobj, struct device, kobj); 2556 struct device *dev = container_of(kobj, struct device, kobj);
@@ -2553,24 +2633,12 @@ qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2553 2633
2554#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops) 2634#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2555 2635
2556static int
2557qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2558{
2559 if (adapter->ahw.cut_through)
2560 return 0;
2561
2562 return 1;
2563}
2564
2565static void 2636static void
2566qlcnic_config_indev_addr(struct net_device *dev, unsigned long event) 2637qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2567{ 2638{
2568 struct in_device *indev; 2639 struct in_device *indev;
2569 struct qlcnic_adapter *adapter = netdev_priv(dev); 2640 struct qlcnic_adapter *adapter = netdev_priv(dev);
2570 2641
2571 if (!qlcnic_destip_supported(adapter))
2572 return;
2573
2574 indev = in_dev_get(dev); 2642 indev = in_dev_get(dev);
2575 if (!indev) 2643 if (!indev)
2576 return; 2644 return;
@@ -2591,7 +2659,6 @@ qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2591 } endfor_ifa(indev); 2659 } endfor_ifa(indev);
2592 2660
2593 in_dev_put(indev); 2661 in_dev_put(indev);
2594 return;
2595} 2662}
2596 2663
2597static int qlcnic_netdev_event(struct notifier_block *this, 2664static int qlcnic_netdev_event(struct notifier_block *this,
@@ -2650,7 +2717,7 @@ recheck:
2650 2717
2651 adapter = netdev_priv(dev); 2718 adapter = netdev_priv(dev);
2652 2719
2653 if (!adapter || !qlcnic_destip_supported(adapter)) 2720 if (!adapter)
2654 goto done; 2721 goto done;
2655 2722
2656 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC) 2723 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b639ceb..20624ba44a37 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
1344}; 1344};
1345 1345
1346struct map_list { 1346struct map_list {
1347 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1347 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1348 DECLARE_PCI_UNMAP_LEN(maplen); 1348 DEFINE_DMA_UNMAP_LEN(maplen);
1349}; 1349};
1350 1350
1351struct tx_ring_desc { 1351struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
1373 } p; 1373 } p;
1374 __le64 *addr; 1374 __le64 *addr;
1375 u32 index; 1375 u32 index;
1376 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1376 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1377 DECLARE_PCI_UNMAP_LEN(maplen); 1377 DEFINE_DMA_UNMAP_LEN(maplen);
1378}; 1378};
1379 1379
1380#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) 1380#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 362664628937..68a1c9b91e74 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
1340 1340
1341 for (i = 0; i < count; i += 8) { 1341 for (i = 0; i < count; i += 8) {
1342 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " 1342 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1343 "%.08x %.08x %.08x \n", i, 1343 "%.08x %.08x %.08x\n", i,
1344 tmp[i + 0], 1344 tmp[i + 0],
1345 tmp[i + 1], 1345 tmp[i + 1],
1346 tmp[i + 2], 1346 tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
2058 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", 2058 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
2059 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", 2059 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
2060 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); 2060 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
2061 printk(KERN_ERR PFX "flags3 = %s %s %s \n", 2061 printk(KERN_ERR PFX "flags3 = %s %s %s\n",
2062 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", 2062 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
2063 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", 2063 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
2064 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); 2064 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 7e09ff4a5755..4892d64f4e05 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -181,8 +181,6 @@ quit:
181 spin_unlock(&qdev->stats_lock); 181 spin_unlock(&qdev->stats_lock);
182 182
183 QL_DUMP_STAT(qdev); 183 QL_DUMP_STAT(qdev);
184
185 return;
186} 184}
187 185
188static char ql_stats_str_arr[][ETH_GSTRING_LEN] = { 186static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f266c0a8..fa4b24c49f42 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); 1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058 1058
1059 pci_dma_sync_single_for_cpu(qdev->pdev, 1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr), 1060 dma_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size, 1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE); 1062 PCI_DMA_FROMDEVICE);
1063 1063
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1170 1170
1171 map = lbq_desc->p.pg_chunk.map + 1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset; 1172 lbq_desc->p.pg_chunk.offset;
1173 pci_unmap_addr_set(lbq_desc, mapaddr, map); 1173 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1174 pci_unmap_len_set(lbq_desc, maplen, 1174 dma_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size); 1175 rx_ring->lbq_buf_size);
1176 *lbq_desc->addr = cpu_to_le64(map); 1176 *lbq_desc->addr = cpu_to_le64(map);
1177 1177
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1241 sbq_desc->p.skb = NULL; 1241 sbq_desc->p.skb = NULL;
1242 return; 1242 return;
1243 } 1243 }
1244 pci_unmap_addr_set(sbq_desc, mapaddr, map); 1244 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1245 pci_unmap_len_set(sbq_desc, maplen, 1245 dma_unmap_len_set(sbq_desc, maplen,
1246 rx_ring->sbq_buf_size); 1246 rx_ring->sbq_buf_size);
1247 *sbq_desc->addr = cpu_to_le64(map); 1247 *sbq_desc->addr = cpu_to_le64(map);
1248 } 1248 }
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
1298 "unmapping OAL area.\n"); 1298 "unmapping OAL area.\n");
1299 } 1299 }
1300 pci_unmap_single(qdev->pdev, 1300 pci_unmap_single(qdev->pdev,
1301 pci_unmap_addr(&tx_ring_desc->map[i], 1301 dma_unmap_addr(&tx_ring_desc->map[i],
1302 mapaddr), 1302 mapaddr),
1303 pci_unmap_len(&tx_ring_desc->map[i], 1303 dma_unmap_len(&tx_ring_desc->map[i],
1304 maplen), 1304 maplen),
1305 PCI_DMA_TODEVICE); 1305 PCI_DMA_TODEVICE);
1306 } else { 1306 } else {
1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, 1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308 "unmapping frag %d.\n", i); 1308 "unmapping frag %d.\n", i);
1309 pci_unmap_page(qdev->pdev, 1309 pci_unmap_page(qdev->pdev,
1310 pci_unmap_addr(&tx_ring_desc->map[i], 1310 dma_unmap_addr(&tx_ring_desc->map[i],
1311 mapaddr), 1311 mapaddr),
1312 pci_unmap_len(&tx_ring_desc->map[i], 1312 dma_unmap_len(&tx_ring_desc->map[i],
1313 maplen), PCI_DMA_TODEVICE); 1313 maplen), PCI_DMA_TODEVICE);
1314 } 1314 }
1315 } 1315 }
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1348 1348
1349 tbd->len = cpu_to_le32(len); 1349 tbd->len = cpu_to_le32(len);
1350 tbd->addr = cpu_to_le64(map); 1350 tbd->addr = cpu_to_le64(map);
1351 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); 1351 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); 1352 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1353 map_idx++; 1353 map_idx++;
1354 1354
1355 /* 1355 /*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
1402 tbd->len = 1402 tbd->len =
1403 cpu_to_le32((sizeof(struct tx_buf_desc) * 1403 cpu_to_le32((sizeof(struct tx_buf_desc) *
1404 (frag_cnt - frag_idx)) | TX_DESC_C); 1404 (frag_cnt - frag_idx)) | TX_DESC_C);
1405 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, 1405 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1406 map); 1406 map);
1407 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1407 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1408 sizeof(struct oal)); 1408 sizeof(struct oal));
1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; 1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1410 map_idx++; 1410 map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1425 1425
1426 tbd->addr = cpu_to_le64(map); 1426 tbd->addr = cpu_to_le64(map);
1427 tbd->len = cpu_to_le32(frag->size); 1427 tbd->len = cpu_to_le32(frag->size);
1428 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); 1428 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1429 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1430 frag->size); 1430 frag->size);
1431 1431
1432 } 1432 }
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1742 */ 1742 */
1743 sbq_desc = ql_get_curr_sbuf(rx_ring); 1743 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744 pci_unmap_single(qdev->pdev, 1744 pci_unmap_single(qdev->pdev,
1745 pci_unmap_addr(sbq_desc, mapaddr), 1745 dma_unmap_addr(sbq_desc, mapaddr),
1746 pci_unmap_len(sbq_desc, maplen), 1746 dma_unmap_len(sbq_desc, maplen),
1747 PCI_DMA_FROMDEVICE); 1747 PCI_DMA_FROMDEVICE);
1748 skb = sbq_desc->p.skb; 1748 skb = sbq_desc->p.skb;
1749 ql_realign_skb(skb, hdr_len); 1749 ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1774 */ 1774 */
1775 sbq_desc = ql_get_curr_sbuf(rx_ring); 1775 sbq_desc = ql_get_curr_sbuf(rx_ring);
1776 pci_dma_sync_single_for_cpu(qdev->pdev, 1776 pci_dma_sync_single_for_cpu(qdev->pdev,
1777 pci_unmap_addr 1777 dma_unmap_addr
1778 (sbq_desc, mapaddr), 1778 (sbq_desc, mapaddr),
1779 pci_unmap_len 1779 dma_unmap_len
1780 (sbq_desc, maplen), 1780 (sbq_desc, maplen),
1781 PCI_DMA_FROMDEVICE); 1781 PCI_DMA_FROMDEVICE);
1782 memcpy(skb_put(skb, length), 1782 memcpy(skb_put(skb, length),
1783 sbq_desc->p.skb->data, length); 1783 sbq_desc->p.skb->data, length);
1784 pci_dma_sync_single_for_device(qdev->pdev, 1784 pci_dma_sync_single_for_device(qdev->pdev,
1785 pci_unmap_addr 1785 dma_unmap_addr
1786 (sbq_desc, 1786 (sbq_desc,
1787 mapaddr), 1787 mapaddr),
1788 pci_unmap_len 1788 dma_unmap_len
1789 (sbq_desc, 1789 (sbq_desc,
1790 maplen), 1790 maplen),
1791 PCI_DMA_FROMDEVICE); 1791 PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1798 ql_realign_skb(skb, length); 1798 ql_realign_skb(skb, length);
1799 skb_put(skb, length); 1799 skb_put(skb, length);
1800 pci_unmap_single(qdev->pdev, 1800 pci_unmap_single(qdev->pdev,
1801 pci_unmap_addr(sbq_desc, 1801 dma_unmap_addr(sbq_desc,
1802 mapaddr), 1802 mapaddr),
1803 pci_unmap_len(sbq_desc, 1803 dma_unmap_len(sbq_desc,
1804 maplen), 1804 maplen),
1805 PCI_DMA_FROMDEVICE); 1805 PCI_DMA_FROMDEVICE);
1806 sbq_desc->p.skb = NULL; 1806 sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1839 return NULL; 1839 return NULL;
1840 } 1840 }
1841 pci_unmap_page(qdev->pdev, 1841 pci_unmap_page(qdev->pdev,
1842 pci_unmap_addr(lbq_desc, 1842 dma_unmap_addr(lbq_desc,
1843 mapaddr), 1843 mapaddr),
1844 pci_unmap_len(lbq_desc, maplen), 1844 dma_unmap_len(lbq_desc, maplen),
1845 PCI_DMA_FROMDEVICE); 1845 PCI_DMA_FROMDEVICE);
1846 skb_reserve(skb, NET_IP_ALIGN); 1846 skb_reserve(skb, NET_IP_ALIGN);
1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1874 int size, i = 0; 1874 int size, i = 0;
1875 sbq_desc = ql_get_curr_sbuf(rx_ring); 1875 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876 pci_unmap_single(qdev->pdev, 1876 pci_unmap_single(qdev->pdev,
1877 pci_unmap_addr(sbq_desc, mapaddr), 1877 dma_unmap_addr(sbq_desc, mapaddr),
1878 pci_unmap_len(sbq_desc, maplen), 1878 dma_unmap_len(sbq_desc, maplen),
1879 PCI_DMA_FROMDEVICE); 1879 PCI_DMA_FROMDEVICE);
1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { 1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1881 /* 1881 /*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2737 } 2737 }
2738 if (sbq_desc->p.skb) { 2738 if (sbq_desc->p.skb) {
2739 pci_unmap_single(qdev->pdev, 2739 pci_unmap_single(qdev->pdev,
2740 pci_unmap_addr(sbq_desc, mapaddr), 2740 dma_unmap_addr(sbq_desc, mapaddr),
2741 pci_unmap_len(sbq_desc, maplen), 2741 dma_unmap_len(sbq_desc, maplen),
2742 PCI_DMA_FROMDEVICE); 2742 PCI_DMA_FROMDEVICE);
2743 dev_kfree_skb(sbq_desc->p.skb); 2743 dev_kfree_skb(sbq_desc->p.skb);
2744 sbq_desc->p.skb = NULL; 2744 sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4207static void qlge_set_multicast_list(struct net_device *ndev) 4207static void qlge_set_multicast_list(struct net_device *ndev)
4208{ 4208{
4209 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4209 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4210 struct dev_mc_list *mc_ptr; 4210 struct netdev_hw_addr *ha;
4211 int i, status; 4211 int i, status;
4212 4212
4213 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 4213 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
4271 if (status) 4271 if (status)
4272 goto exit; 4272 goto exit;
4273 i = 0; 4273 i = 0;
4274 netdev_for_each_mc_addr(mc_ptr, ndev) { 4274 netdev_for_each_mc_addr(ha, ndev) {
4275 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, 4275 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4276 MAC_ADDR_TYPE_MULTI_MAC, i)) { 4276 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4277 netif_err(qdev, hw, qdev->ndev, 4277 netif_err(qdev, hw, qdev->ndev,
4278 "Failed to loadmulticast address.\n"); 4278 "Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c1dcb6..9a251acf5ab8 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
330 do { 330 do {
331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
332 if (!skb) { 332 if (!skb) {
333 printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name); 333 netdev_err(dev, "failed to alloc skb for rx\n");
334 rc = -ENOMEM; 334 rc = -ENOMEM;
335 goto err_exit; 335 goto err_exit;
336 } 336 }
@@ -400,9 +400,6 @@ static void r6040_init_mac_regs(struct net_device *dev)
400 * we may got called by r6040_tx_timeout which has left 400 * we may got called by r6040_tx_timeout which has left
401 * some unsent tx buffers */ 401 * some unsent tx buffers */
402 iowrite16(0x01, ioaddr + MTPR); 402 iowrite16(0x01, ioaddr + MTPR);
403
404 /* Check media */
405 mii_check_media(&lp->mii_if, 1, 1);
406} 403}
407 404
408static void r6040_tx_timeout(struct net_device *dev) 405static void r6040_tx_timeout(struct net_device *dev)
@@ -410,9 +407,9 @@ static void r6040_tx_timeout(struct net_device *dev)
410 struct r6040_private *priv = netdev_priv(dev); 407 struct r6040_private *priv = netdev_priv(dev);
411 void __iomem *ioaddr = priv->base; 408 void __iomem *ioaddr = priv->base;
412 409
413 printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x " 410 netdev_warn(dev, "transmit timed out, int enable %4.4x "
414 "status %4.4x, PHY status %4.4x\n", 411 "status %4.4x, PHY status %4.4x\n",
415 dev->name, ioread16(ioaddr + MIER), 412 ioread16(ioaddr + MIER),
416 ioread16(ioaddr + MISR), 413 ioread16(ioaddr + MISR),
417 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); 414 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
418 415
@@ -530,8 +527,6 @@ static int r6040_phy_mode_chk(struct net_device *dev)
530 phy_dat = 0x0000; 527 phy_dat = 0x0000;
531 } 528 }
532 529
533 mii_check_media(&lp->mii_if, 0, 1);
534
535 return phy_dat; 530 return phy_dat;
536}; 531};
537 532
@@ -813,6 +808,9 @@ static void r6040_timer(unsigned long data)
813 808
814 /* Timer active again */ 809 /* Timer active again */
815 mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); 810 mod_timer(&lp->timer, round_jiffies(jiffies + HZ));
811
812 /* Check media */
813 mii_check_media(&lp->mii_if, 1, 1);
816} 814}
817 815
818/* Read/set MAC address routines */ 816/* Read/set MAC address routines */
@@ -897,7 +895,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
897 if (!lp->tx_free_desc) { 895 if (!lp->tx_free_desc) {
898 spin_unlock_irqrestore(&lp->lock, flags); 896 spin_unlock_irqrestore(&lp->lock, flags);
899 netif_stop_queue(dev); 897 netif_stop_queue(dev);
900 printk(KERN_ERR DRV_NAME ": no tx descriptor\n"); 898 netdev_err(dev, ": no tx descriptor\n");
901 return NETDEV_TX_BUSY; 899 return NETDEV_TX_BUSY;
902 } 900 }
903 901
@@ -924,7 +922,6 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
924 if (!lp->tx_free_desc) 922 if (!lp->tx_free_desc)
925 netif_stop_queue(dev); 923 netif_stop_queue(dev);
926 924
927 dev->trans_start = jiffies;
928 spin_unlock_irqrestore(&lp->lock, flags); 925 spin_unlock_irqrestore(&lp->lock, flags);
929 926
930 return NETDEV_TX_OK; 927 return NETDEV_TX_OK;
@@ -937,7 +934,7 @@ static void r6040_multicast_list(struct net_device *dev)
937 u16 *adrp; 934 u16 *adrp;
938 u16 reg; 935 u16 reg;
939 unsigned long flags; 936 unsigned long flags;
940 struct dev_mc_list *dmi; 937 struct netdev_hw_addr *ha;
941 int i; 938 int i;
942 939
943 /* MAC Address */ 940 /* MAC Address */
@@ -972,8 +969,8 @@ static void r6040_multicast_list(struct net_device *dev)
972 for (i = 0; i < 4; i++) 969 for (i = 0; i < 4; i++)
973 hash_table[i] = 0; 970 hash_table[i] = 0;
974 971
975 netdev_for_each_mc_addr(dmi, dev) { 972 netdev_for_each_mc_addr(ha, dev) {
976 char *addrs = dmi->dmi_addr; 973 char *addrs = ha->addr;
977 974
978 if (!(*addrs & 1)) 975 if (!(*addrs & 1))
979 continue; 976 continue;
@@ -990,9 +987,9 @@ static void r6040_multicast_list(struct net_device *dev)
990 } 987 }
991 /* Multicast Address 1~4 case */ 988 /* Multicast Address 1~4 case */
992 i = 0; 989 i = 0;
993 netdev_for_each_mc_addr(dmi, dev) { 990 netdev_for_each_mc_addr(ha, dev) {
994 if (i < MCAST_MAX) { 991 if (i < MCAST_MAX) {
995 adrp = (u16 *) dmi->dmi_addr; 992 adrp = (u16 *) ha->addr;
996 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); 993 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
997 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 994 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
998 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 995 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1087,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1090 /* this should always be supported */ 1087 /* this should always be supported */
1091 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1088 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1092 if (err) { 1089 if (err) {
1093 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" 1090 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1094 "not supported by the card\n"); 1091 "not supported by the card\n");
1095 goto err_out; 1092 goto err_out;
1096 } 1093 }
1097 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1094 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1098 if (err) { 1095 if (err) {
1099 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" 1096 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1100 "not supported by the card\n"); 1097 "not supported by the card\n");
1101 goto err_out; 1098 goto err_out;
1102 } 1099 }
1103 1100
1104 /* IO Size check */ 1101 /* IO Size check */
1105 if (pci_resource_len(pdev, bar) < io_size) { 1102 if (pci_resource_len(pdev, bar) < io_size) {
1106 printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n"); 1103 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1107 err = -EIO; 1104 err = -EIO;
1108 goto err_out; 1105 goto err_out;
1109 } 1106 }
@@ -1112,7 +1109,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1112 1109
1113 dev = alloc_etherdev(sizeof(struct r6040_private)); 1110 dev = alloc_etherdev(sizeof(struct r6040_private));
1114 if (!dev) { 1111 if (!dev) {
1115 printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n"); 1112 dev_err(&pdev->dev, "Failed to allocate etherdev\n");
1116 err = -ENOMEM; 1113 err = -ENOMEM;
1117 goto err_out; 1114 goto err_out;
1118 } 1115 }
@@ -1122,14 +1119,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1122 err = pci_request_regions(pdev, DRV_NAME); 1119 err = pci_request_regions(pdev, DRV_NAME);
1123 1120
1124 if (err) { 1121 if (err) {
1125 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 1122 dev_err(&pdev->dev, "Failed to request PCI regions\n");
1126 goto err_out_free_dev; 1123 goto err_out_free_dev;
1127 } 1124 }
1128 1125
1129 ioaddr = pci_iomap(pdev, bar, io_size); 1126 ioaddr = pci_iomap(pdev, bar, io_size);
1130 if (!ioaddr) { 1127 if (!ioaddr) {
1131 printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n", 1128 dev_err(&pdev->dev, "ioremap failed for device\n");
1132 pci_name(pdev));
1133 err = -EIO; 1129 err = -EIO;
1134 goto err_out_free_res; 1130 goto err_out_free_res;
1135 } 1131 }
@@ -1156,7 +1152,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1156 /* Some bootloader/BIOSes do not initialize 1152 /* Some bootloader/BIOSes do not initialize
1157 * MAC address, warn about that */ 1153 * MAC address, warn about that */
1158 if (!(adrp[0] || adrp[1] || adrp[2])) { 1154 if (!(adrp[0] || adrp[1] || adrp[2])) {
1159 printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n"); 1155 netdev_warn(dev, "MAC address not initialized, generating random\n");
1160 random_ether_addr(dev->dev_addr); 1156 random_ether_addr(dev->dev_addr);
1161 } 1157 }
1162 1158
@@ -1184,7 +1180,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1184 1180
1185 /* Check the vendor ID on the PHY, if 0xffff assume none attached */ 1181 /* Check the vendor ID on the PHY, if 0xffff assume none attached */
1186 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) { 1182 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
1187 printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n"); 1183 dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
1188 err = -ENODEV; 1184 err = -ENODEV;
1189 goto err_out_unmap; 1185 goto err_out_unmap;
1190 } 1186 }
@@ -1192,7 +1188,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1192 /* Register net device. After this dev->name assign */ 1188 /* Register net device. After this dev->name assign */
1193 err = register_netdev(dev); 1189 err = register_netdev(dev);
1194 if (err) { 1190 if (err) {
1195 printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); 1191 dev_err(&pdev->dev, "Failed to register net device\n");
1196 goto err_out_unmap; 1192 goto err_out_unmap;
1197 } 1193 }
1198 return 0; 1194 return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dbb1f5a1824c..217e709bda3e 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
23#include <linux/tcp.h> 23#include <linux/tcp.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h>
26 27
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/io.h> 29#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
509 510
510 struct mii_if_info mii; 511 struct mii_if_info mii;
511 struct rtl8169_counters counters; 512 struct rtl8169_counters counters;
513 u32 saved_wolopts;
512}; 514};
513 515
514MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 516MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
748 750
749 spin_lock_irqsave(&tp->lock, flags); 751 spin_lock_irqsave(&tp->lock, flags);
750 if (tp->link_ok(ioaddr)) { 752 if (tp->link_ok(ioaddr)) {
753 /* This is to cancel a scheduled suspend if there's one. */
754 pm_request_resume(&tp->pci_dev->dev);
751 netif_carrier_on(dev); 755 netif_carrier_on(dev);
752 netif_info(tp, ifup, dev, "link up\n"); 756 netif_info(tp, ifup, dev, "link up\n");
753 } else { 757 } else {
754 netif_carrier_off(dev); 758 netif_carrier_off(dev);
755 netif_info(tp, ifdown, dev, "link down\n"); 759 netif_info(tp, ifdown, dev, "link down\n");
760 pm_schedule_suspend(&tp->pci_dev->dev, 100);
756 } 761 }
757 spin_unlock_irqrestore(&tp->lock, flags); 762 spin_unlock_irqrestore(&tp->lock, flags);
758} 763}
759 764
760static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 765#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
766
767static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
761{ 768{
762 struct rtl8169_private *tp = netdev_priv(dev);
763 void __iomem *ioaddr = tp->mmio_addr; 769 void __iomem *ioaddr = tp->mmio_addr;
764 u8 options; 770 u8 options;
765 771 u32 wolopts = 0;
766 wol->wolopts = 0;
767
768#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
769 wol->supported = WAKE_ANY;
770
771 spin_lock_irq(&tp->lock);
772 772
773 options = RTL_R8(Config1); 773 options = RTL_R8(Config1);
774 if (!(options & PMEnable)) 774 if (!(options & PMEnable))
775 goto out_unlock; 775 return 0;
776 776
777 options = RTL_R8(Config3); 777 options = RTL_R8(Config3);
778 if (options & LinkUp) 778 if (options & LinkUp)
779 wol->wolopts |= WAKE_PHY; 779 wolopts |= WAKE_PHY;
780 if (options & MagicPacket) 780 if (options & MagicPacket)
781 wol->wolopts |= WAKE_MAGIC; 781 wolopts |= WAKE_MAGIC;
782 782
783 options = RTL_R8(Config5); 783 options = RTL_R8(Config5);
784 if (options & UWF) 784 if (options & UWF)
785 wol->wolopts |= WAKE_UCAST; 785 wolopts |= WAKE_UCAST;
786 if (options & BWF) 786 if (options & BWF)
787 wol->wolopts |= WAKE_BCAST; 787 wolopts |= WAKE_BCAST;
788 if (options & MWF) 788 if (options & MWF)
789 wol->wolopts |= WAKE_MCAST; 789 wolopts |= WAKE_MCAST;
790 790
791out_unlock: 791 return wolopts;
792 spin_unlock_irq(&tp->lock);
793} 792}
794 793
795static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 794static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
796{ 795{
797 struct rtl8169_private *tp = netdev_priv(dev); 796 struct rtl8169_private *tp = netdev_priv(dev);
797
798 spin_lock_irq(&tp->lock);
799
800 wol->supported = WAKE_ANY;
801 wol->wolopts = __rtl8169_get_wol(tp);
802
803 spin_unlock_irq(&tp->lock);
804}
805
806static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
807{
798 void __iomem *ioaddr = tp->mmio_addr; 808 void __iomem *ioaddr = tp->mmio_addr;
799 unsigned int i; 809 unsigned int i;
800 static const struct { 810 static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
811 { WAKE_ANY, Config5, LanWake } 821 { WAKE_ANY, Config5, LanWake }
812 }; 822 };
813 823
814 spin_lock_irq(&tp->lock);
815
816 RTL_W8(Cfg9346, Cfg9346_Unlock); 824 RTL_W8(Cfg9346, Cfg9346_Unlock);
817 825
818 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 826 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
819 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 827 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
820 if (wol->wolopts & cfg[i].opt) 828 if (wolopts & cfg[i].opt)
821 options |= cfg[i].mask; 829 options |= cfg[i].mask;
822 RTL_W8(cfg[i].reg, options); 830 RTL_W8(cfg[i].reg, options);
823 } 831 }
824 832
825 RTL_W8(Cfg9346, Cfg9346_Lock); 833 RTL_W8(Cfg9346, Cfg9346_Lock);
834}
835
836static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
837{
838 struct rtl8169_private *tp = netdev_priv(dev);
839
840 spin_lock_irq(&tp->lock);
826 841
827 if (wol->wolopts) 842 if (wol->wolopts)
828 tp->features |= RTL_FEATURE_WOL; 843 tp->features |= RTL_FEATURE_WOL;
829 else 844 else
830 tp->features &= ~RTL_FEATURE_WOL; 845 tp->features &= ~RTL_FEATURE_WOL;
846 __rtl8169_set_wol(tp, wol->wolopts);
831 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 847 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
832 848
833 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
@@ -1042,14 +1058,14 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1042} 1058}
1043 1059
1044static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1060static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1045 struct sk_buff *skb) 1061 struct sk_buff *skb, int polling)
1046{ 1062{
1047 u32 opts2 = le32_to_cpu(desc->opts2); 1063 u32 opts2 = le32_to_cpu(desc->opts2);
1048 struct vlan_group *vlgrp = tp->vlgrp; 1064 struct vlan_group *vlgrp = tp->vlgrp;
1049 int ret; 1065 int ret;
1050 1066
1051 if (vlgrp && (opts2 & RxVlanTag)) { 1067 if (vlgrp && (opts2 & RxVlanTag)) {
1052 vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff)); 1068 __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
1053 ret = 0; 1069 ret = 0;
1054 } else 1070 } else
1055 ret = -1; 1071 ret = -1;
@@ -1066,7 +1082,7 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1066} 1082}
1067 1083
1068static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1084static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1069 struct sk_buff *skb) 1085 struct sk_buff *skb, int polling)
1070{ 1086{
1071 return -1; 1087 return -1;
1072} 1088}
@@ -2759,6 +2775,7 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2759{ 2775{
2760 iounmap(ioaddr); 2776 iounmap(ioaddr);
2761 pci_release_regions(pdev); 2777 pci_release_regions(pdev);
2778 pci_clear_mwi(pdev);
2762 pci_disable_device(pdev); 2779 pci_disable_device(pdev);
2763 free_netdev(dev); 2780 free_netdev(dev);
2764} 2781}
@@ -2825,8 +2842,13 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
2825 spin_lock_irq(&tp->lock); 2842 spin_lock_irq(&tp->lock);
2826 2843
2827 RTL_W8(Cfg9346, Cfg9346_Unlock); 2844 RTL_W8(Cfg9346, Cfg9346_Unlock);
2845
2828 RTL_W32(MAC4, high); 2846 RTL_W32(MAC4, high);
2847 RTL_R32(MAC4);
2848
2829 RTL_W32(MAC0, low); 2849 RTL_W32(MAC0, low);
2850 RTL_R32(MAC0);
2851
2830 RTL_W8(Cfg9346, Cfg9346_Lock); 2852 RTL_W8(Cfg9346, Cfg9346_Lock);
2831 2853
2832 spin_unlock_irq(&tp->lock); 2854 spin_unlock_irq(&tp->lock);
@@ -3014,9 +3036,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3014 goto err_out_free_dev_1; 3036 goto err_out_free_dev_1;
3015 } 3037 }
3016 3038
3017 rc = pci_set_mwi(pdev); 3039 if (pci_set_mwi(pdev) < 0)
3018 if (rc < 0) 3040 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
3019 goto err_out_disable_2;
3020 3041
3021 /* make sure PCI base addr 1 is MMIO */ 3042 /* make sure PCI base addr 1 is MMIO */
3022 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { 3043 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
@@ -3024,7 +3045,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3024 "region #%d not an MMIO resource, aborting\n", 3045 "region #%d not an MMIO resource, aborting\n",
3025 region); 3046 region);
3026 rc = -ENODEV; 3047 rc = -ENODEV;
3027 goto err_out_mwi_3; 3048 goto err_out_mwi_2;
3028 } 3049 }
3029 3050
3030 /* check for weird/broken PCI region reporting */ 3051 /* check for weird/broken PCI region reporting */
@@ -3032,13 +3053,13 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3032 netif_err(tp, probe, dev, 3053 netif_err(tp, probe, dev,
3033 "Invalid PCI region size(s), aborting\n"); 3054 "Invalid PCI region size(s), aborting\n");
3034 rc = -ENODEV; 3055 rc = -ENODEV;
3035 goto err_out_mwi_3; 3056 goto err_out_mwi_2;
3036 } 3057 }
3037 3058
3038 rc = pci_request_regions(pdev, MODULENAME); 3059 rc = pci_request_regions(pdev, MODULENAME);
3039 if (rc < 0) { 3060 if (rc < 0) {
3040 netif_err(tp, probe, dev, "could not request regions\n"); 3061 netif_err(tp, probe, dev, "could not request regions\n");
3041 goto err_out_mwi_3; 3062 goto err_out_mwi_2;
3042 } 3063 }
3043 3064
3044 tp->cp_cmd = PCIMulRW | RxChkSum; 3065 tp->cp_cmd = PCIMulRW | RxChkSum;
@@ -3051,7 +3072,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3051 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 3072 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3052 if (rc < 0) { 3073 if (rc < 0) {
3053 netif_err(tp, probe, dev, "DMA configuration failed\n"); 3074 netif_err(tp, probe, dev, "DMA configuration failed\n");
3054 goto err_out_free_res_4; 3075 goto err_out_free_res_3;
3055 } 3076 }
3056 } 3077 }
3057 3078
@@ -3060,7 +3081,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3060 if (!ioaddr) { 3081 if (!ioaddr) {
3061 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); 3082 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
3062 rc = -EIO; 3083 rc = -EIO;
3063 goto err_out_free_res_4; 3084 goto err_out_free_res_3;
3064 } 3085 }
3065 3086
3066 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 3087 tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
@@ -3102,7 +3123,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3102 if (i == ARRAY_SIZE(rtl_chip_info)) { 3123 if (i == ARRAY_SIZE(rtl_chip_info)) {
3103 dev_err(&pdev->dev, 3124 dev_err(&pdev->dev,
3104 "driver bug, MAC version not found in rtl_chip_info\n"); 3125 "driver bug, MAC version not found in rtl_chip_info\n");
3105 goto err_out_msi_5; 3126 goto err_out_msi_4;
3106 } 3127 }
3107 tp->chipset = i; 3128 tp->chipset = i;
3108 3129
@@ -3167,7 +3188,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3167 3188
3168 rc = register_netdev(dev); 3189 rc = register_netdev(dev);
3169 if (rc < 0) 3190 if (rc < 0)
3170 goto err_out_msi_5; 3191 goto err_out_msi_4;
3171 3192
3172 pci_set_drvdata(pdev, dev); 3193 pci_set_drvdata(pdev, dev);
3173 3194
@@ -3187,17 +3208,22 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3187 3208
3188 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3209 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3189 3210
3211 if (pci_dev_run_wake(pdev)) {
3212 pm_runtime_set_active(&pdev->dev);
3213 pm_runtime_enable(&pdev->dev);
3214 }
3215 pm_runtime_idle(&pdev->dev);
3216
3190out: 3217out:
3191 return rc; 3218 return rc;
3192 3219
3193err_out_msi_5: 3220err_out_msi_4:
3194 rtl_disable_msi(pdev, tp); 3221 rtl_disable_msi(pdev, tp);
3195 iounmap(ioaddr); 3222 iounmap(ioaddr);
3196err_out_free_res_4: 3223err_out_free_res_3:
3197 pci_release_regions(pdev); 3224 pci_release_regions(pdev);
3198err_out_mwi_3: 3225err_out_mwi_2:
3199 pci_clear_mwi(pdev); 3226 pci_clear_mwi(pdev);
3200err_out_disable_2:
3201 pci_disable_device(pdev); 3227 pci_disable_device(pdev);
3202err_out_free_dev_1: 3228err_out_free_dev_1:
3203 free_netdev(dev); 3229 free_netdev(dev);
@@ -3209,10 +3235,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3209 struct net_device *dev = pci_get_drvdata(pdev); 3235 struct net_device *dev = pci_get_drvdata(pdev);
3210 struct rtl8169_private *tp = netdev_priv(dev); 3236 struct rtl8169_private *tp = netdev_priv(dev);
3211 3237
3238 pm_runtime_get_sync(&pdev->dev);
3239
3212 flush_scheduled_work(); 3240 flush_scheduled_work();
3213 3241
3214 unregister_netdev(dev); 3242 unregister_netdev(dev);
3215 3243
3244 if (pci_dev_run_wake(pdev)) {
3245 pm_runtime_disable(&pdev->dev);
3246 pm_runtime_set_suspended(&pdev->dev);
3247 }
3248 pm_runtime_put_noidle(&pdev->dev);
3249
3216 /* restore original MAC address */ 3250 /* restore original MAC address */
3217 rtl_rar_set(tp, dev->perm_addr); 3251 rtl_rar_set(tp, dev->perm_addr);
3218 3252
@@ -3239,6 +3273,7 @@ static int rtl8169_open(struct net_device *dev)
3239 struct pci_dev *pdev = tp->pci_dev; 3273 struct pci_dev *pdev = tp->pci_dev;
3240 int retval = -ENOMEM; 3274 int retval = -ENOMEM;
3241 3275
3276 pm_runtime_get_sync(&pdev->dev);
3242 3277
3243 /* 3278 /*
3244 * Note that we use a magic value here, its wierd I know 3279 * Note that we use a magic value here, its wierd I know
@@ -3259,7 +3294,7 @@ static int rtl8169_open(struct net_device *dev)
3259 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, 3294 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
3260 &tp->TxPhyAddr); 3295 &tp->TxPhyAddr);
3261 if (!tp->TxDescArray) 3296 if (!tp->TxDescArray)
3262 goto out; 3297 goto err_pm_runtime_put;
3263 3298
3264 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, 3299 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
3265 &tp->RxPhyAddr); 3300 &tp->RxPhyAddr);
@@ -3286,6 +3321,9 @@ static int rtl8169_open(struct net_device *dev)
3286 3321
3287 rtl8169_request_timer(dev); 3322 rtl8169_request_timer(dev);
3288 3323
3324 tp->saved_wolopts = 0;
3325 pm_runtime_put_noidle(&pdev->dev);
3326
3289 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3327 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
3290out: 3328out:
3291 return retval; 3329 return retval;
@@ -3295,9 +3333,13 @@ err_release_ring_2:
3295err_free_rx_1: 3333err_free_rx_1:
3296 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 3334 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
3297 tp->RxPhyAddr); 3335 tp->RxPhyAddr);
3336 tp->RxDescArray = NULL;
3298err_free_tx_0: 3337err_free_tx_0:
3299 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 3338 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
3300 tp->TxPhyAddr); 3339 tp->TxPhyAddr);
3340 tp->TxDescArray = NULL;
3341err_pm_runtime_put:
3342 pm_runtime_put_noidle(&pdev->dev);
3301 goto out; 3343 goto out;
3302} 3344}
3303 3345
@@ -4441,12 +4483,20 @@ out:
4441 return done; 4483 return done;
4442} 4484}
4443 4485
4486/*
4487 * Warning : rtl8169_rx_interrupt() might be called :
4488 * 1) from NAPI (softirq) context
4489 * (polling = 1 : we should call netif_receive_skb())
4490 * 2) from process context (rtl8169_reset_task())
4491 * (polling = 0 : we must call netif_rx() instead)
4492 */
4444static int rtl8169_rx_interrupt(struct net_device *dev, 4493static int rtl8169_rx_interrupt(struct net_device *dev,
4445 struct rtl8169_private *tp, 4494 struct rtl8169_private *tp,
4446 void __iomem *ioaddr, u32 budget) 4495 void __iomem *ioaddr, u32 budget)
4447{ 4496{
4448 unsigned int cur_rx, rx_left; 4497 unsigned int cur_rx, rx_left;
4449 unsigned int delta, count; 4498 unsigned int delta, count;
4499 int polling = (budget != ~(u32)0) ? 1 : 0;
4450 4500
4451 cur_rx = tp->cur_rx; 4501 cur_rx = tp->cur_rx;
4452 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 4502 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
@@ -4508,8 +4558,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4508 skb_put(skb, pkt_size); 4558 skb_put(skb, pkt_size);
4509 skb->protocol = eth_type_trans(skb, dev); 4559 skb->protocol = eth_type_trans(skb, dev);
4510 4560
4511 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) 4561 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4512 netif_receive_skb(skb); 4562 if (likely(polling))
4563 netif_receive_skb(skb);
4564 else
4565 netif_rx(skb);
4566 }
4513 4567
4514 dev->stats.rx_bytes += pkt_size; 4568 dev->stats.rx_bytes += pkt_size;
4515 dev->stats.rx_packets++; 4569 dev->stats.rx_packets++;
@@ -4704,6 +4758,8 @@ static int rtl8169_close(struct net_device *dev)
4704 struct rtl8169_private *tp = netdev_priv(dev); 4758 struct rtl8169_private *tp = netdev_priv(dev);
4705 struct pci_dev *pdev = tp->pci_dev; 4759 struct pci_dev *pdev = tp->pci_dev;
4706 4760
4761 pm_runtime_get_sync(&pdev->dev);
4762
4707 /* update counters before going down */ 4763 /* update counters before going down */
4708 rtl8169_update_counters(dev); 4764 rtl8169_update_counters(dev);
4709 4765
@@ -4718,6 +4774,8 @@ static int rtl8169_close(struct net_device *dev)
4718 tp->TxDescArray = NULL; 4774 tp->TxDescArray = NULL;
4719 tp->RxDescArray = NULL; 4775 tp->RxDescArray = NULL;
4720 4776
4777 pm_runtime_put_sync(&pdev->dev);
4778
4721 return 0; 4779 return 0;
4722} 4780}
4723 4781
@@ -4743,12 +4801,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
4743 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 4801 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4744 mc_filter[1] = mc_filter[0] = 0xffffffff; 4802 mc_filter[1] = mc_filter[0] = 0xffffffff;
4745 } else { 4803 } else {
4746 struct dev_mc_list *mclist; 4804 struct netdev_hw_addr *ha;
4747 4805
4748 rx_mode = AcceptBroadcast | AcceptMyPhys; 4806 rx_mode = AcceptBroadcast | AcceptMyPhys;
4749 mc_filter[1] = mc_filter[0] = 0; 4807 mc_filter[1] = mc_filter[0] = 0;
4750 netdev_for_each_mc_addr(mclist, dev) { 4808 netdev_for_each_mc_addr(ha, dev) {
4751 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 4809 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4752 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 4810 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4753 rx_mode |= AcceptMulticast; 4811 rx_mode |= AcceptMulticast;
4754 } 4812 }
@@ -4816,21 +4874,74 @@ static int rtl8169_suspend(struct device *device)
4816 return 0; 4874 return 0;
4817} 4875}
4818 4876
4877static void __rtl8169_resume(struct net_device *dev)
4878{
4879 netif_device_attach(dev);
4880 rtl8169_schedule_work(dev, rtl8169_reset_task);
4881}
4882
4819static int rtl8169_resume(struct device *device) 4883static int rtl8169_resume(struct device *device)
4820{ 4884{
4821 struct pci_dev *pdev = to_pci_dev(device); 4885 struct pci_dev *pdev = to_pci_dev(device);
4822 struct net_device *dev = pci_get_drvdata(pdev); 4886 struct net_device *dev = pci_get_drvdata(pdev);
4823 4887
4824 if (!netif_running(dev)) 4888 if (netif_running(dev))
4825 goto out; 4889 __rtl8169_resume(dev);
4826 4890
4827 netif_device_attach(dev); 4891 return 0;
4892}
4893
4894static int rtl8169_runtime_suspend(struct device *device)
4895{
4896 struct pci_dev *pdev = to_pci_dev(device);
4897 struct net_device *dev = pci_get_drvdata(pdev);
4898 struct rtl8169_private *tp = netdev_priv(dev);
4899
4900 if (!tp->TxDescArray)
4901 return 0;
4902
4903 spin_lock_irq(&tp->lock);
4904 tp->saved_wolopts = __rtl8169_get_wol(tp);
4905 __rtl8169_set_wol(tp, WAKE_ANY);
4906 spin_unlock_irq(&tp->lock);
4907
4908 rtl8169_net_suspend(dev);
4828 4909
4829 rtl8169_schedule_work(dev, rtl8169_reset_task);
4830out:
4831 return 0; 4910 return 0;
4832} 4911}
4833 4912
4913static int rtl8169_runtime_resume(struct device *device)
4914{
4915 struct pci_dev *pdev = to_pci_dev(device);
4916 struct net_device *dev = pci_get_drvdata(pdev);
4917 struct rtl8169_private *tp = netdev_priv(dev);
4918
4919 if (!tp->TxDescArray)
4920 return 0;
4921
4922 spin_lock_irq(&tp->lock);
4923 __rtl8169_set_wol(tp, tp->saved_wolopts);
4924 tp->saved_wolopts = 0;
4925 spin_unlock_irq(&tp->lock);
4926
4927 __rtl8169_resume(dev);
4928
4929 return 0;
4930}
4931
4932static int rtl8169_runtime_idle(struct device *device)
4933{
4934 struct pci_dev *pdev = to_pci_dev(device);
4935 struct net_device *dev = pci_get_drvdata(pdev);
4936 struct rtl8169_private *tp = netdev_priv(dev);
4937
4938 if (!tp->TxDescArray)
4939 return 0;
4940
4941 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4942 return -EBUSY;
4943}
4944
4834static const struct dev_pm_ops rtl8169_pm_ops = { 4945static const struct dev_pm_ops rtl8169_pm_ops = {
4835 .suspend = rtl8169_suspend, 4946 .suspend = rtl8169_suspend,
4836 .resume = rtl8169_resume, 4947 .resume = rtl8169_resume,
@@ -4838,6 +4949,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
4838 .thaw = rtl8169_resume, 4949 .thaw = rtl8169_resume,
4839 .poweroff = rtl8169_suspend, 4950 .poweroff = rtl8169_suspend,
4840 .restore = rtl8169_resume, 4951 .restore = rtl8169_resume,
4952 .runtime_suspend = rtl8169_runtime_suspend,
4953 .runtime_resume = rtl8169_runtime_resume,
4954 .runtime_idle = rtl8169_runtime_idle,
4841}; 4955};
4842 4956
4843#define RTL8169_PM_OPS (&rtl8169_pm_ops) 4957#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index f2e335f0d1b7..e26e107f93e0 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1467,7 +1467,6 @@ static netdev_tx_t rr_start_xmit(struct sk_buff *skb,
1467 1467
1468 spin_unlock_irqrestore(&rrpriv->lock, flags); 1468 spin_unlock_irqrestore(&rrpriv->lock, flags);
1469 1469
1470 dev->trans_start = jiffies;
1471 return NETDEV_TX_OK; 1470 return NETDEV_TX_OK;
1472} 1471}
1473 1472
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3de39b..668327ccd8d0 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2400 return NULL; 2400 return NULL;
2401 } 2401 }
2402 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, 2402 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2403 skb->len - skb->data_len, PCI_DMA_TODEVICE); 2403 skb_headlen(skb), PCI_DMA_TODEVICE);
2404 frg_cnt = skb_shinfo(skb)->nr_frags; 2404 frg_cnt = skb_shinfo(skb)->nr_frags;
2405 if (frg_cnt) { 2405 if (frg_cnt) {
2406 txds++; 2406 txds++;
@@ -2943,7 +2943,6 @@ static void s2io_netpoll(struct net_device *dev)
2943 } 2943 }
2944 } 2944 }
2945 enable_irq(dev->irq); 2945 enable_irq(dev->irq);
2946 return;
2947} 2946}
2948#endif 2947#endif
2949 2948
@@ -4202,7 +4201,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4202 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 4201 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4203 } 4202 }
4204 4203
4205 frg_len = skb->len - skb->data_len; 4204 frg_len = skb_headlen(skb);
4206 if (offload_type == SKB_GSO_UDP) { 4205 if (offload_type == SKB_GSO_UDP) {
4207 int ufo_size; 4206 int ufo_size;
4208 4207
@@ -4756,7 +4755,6 @@ reset:
4756 s2io_stop_all_tx_queue(sp); 4755 s2io_stop_all_tx_queue(sp);
4757 schedule_work(&sp->rst_timer_task); 4756 schedule_work(&sp->rst_timer_task);
4758 sw_stat->soft_reset_cnt++; 4757 sw_stat->soft_reset_cnt++;
4759 return;
4760} 4758}
4761 4759
4762/** 4760/**
@@ -4965,7 +4963,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4965static void s2io_set_multicast(struct net_device *dev) 4963static void s2io_set_multicast(struct net_device *dev)
4966{ 4964{
4967 int i, j, prev_cnt; 4965 int i, j, prev_cnt;
4968 struct dev_mc_list *mclist; 4966 struct netdev_hw_addr *ha;
4969 struct s2io_nic *sp = netdev_priv(dev); 4967 struct s2io_nic *sp = netdev_priv(dev);
4970 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4968 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4971 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4969 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5092,12 @@ static void s2io_set_multicast(struct net_device *dev)
5094 5092
5095 /* Create the new Rx filter list and update the same in H/W. */ 5093 /* Create the new Rx filter list and update the same in H/W. */
5096 i = 0; 5094 i = 0;
5097 netdev_for_each_mc_addr(mclist, dev) { 5095 netdev_for_each_mc_addr(ha, dev) {
5098 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, 5096 memcpy(sp->usr_addrs[i].addr, ha->addr,
5099 ETH_ALEN); 5097 ETH_ALEN);
5100 mac_addr = 0; 5098 mac_addr = 0;
5101 for (j = 0; j < ETH_ALEN; j++) { 5099 for (j = 0; j < ETH_ALEN; j++) {
5102 mac_addr |= mclist->dmi_addr[j]; 5100 mac_addr |= ha->addr[j];
5103 mac_addr <<= 8; 5101 mac_addr <<= 8;
5104 } 5102 }
5105 mac_addr >>= 8; 5103 mac_addr >>= 8;
@@ -8645,7 +8643,6 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8645 first->truesize += skb->truesize; 8643 first->truesize += skb->truesize;
8646 lro->last_frag = skb; 8644 lro->last_frag = skb;
8647 swstats->clubbed_frms_cnt++; 8645 swstats->clubbed_frms_cnt++;
8648 return;
8649} 8646}
8650 8647
8651/** 8648/**
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f26344b368..a7ff8ea342b4 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
396 } else { 396 } else {
397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) 397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
398 & S6_GMAC_BURST_POSTRD_LEN_MASK); 398 & S6_GMAC_BURST_POSTRD_LEN_MASK);
399 skb->dev = dev;
400 skb->protocol = eth_type_trans(skb, dev); 399 skb->protocol = eth_type_trans(skb, dev);
401 skb->ip_summed = CHECKSUM_UNNECESSARY; 400 skb->ip_summed = CHECKSUM_UNNECESSARY;
402 netif_rx(skb); 401 netif_rx(skb);
@@ -853,8 +852,8 @@ static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
853{ 852{
854 struct s6gmac *pd = netdev_priv(dev); 853 struct s6gmac *pd = netdev_priv(dev);
855 unsigned long flags; 854 unsigned long flags;
855
856 spin_lock_irqsave(&pd->lock, flags); 856 spin_lock_irqsave(&pd->lock, flags);
857 dev->trans_start = jiffies;
858 writel(skb->len << S6_GMAC_BURST_PREWR_LEN | 857 writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
859 0 << S6_GMAC_BURST_PREWR_CFE | 858 0 << S6_GMAC_BURST_PREWR_CFE |
860 1 << S6_GMAC_BURST_PREWR_PPE | 859 1 << S6_GMAC_BURST_PREWR_PPE |
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index abc8eefdd4b6..a9ae505e1baf 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -426,7 +426,6 @@ sb1000_send_command(const int ioaddr[], const char* name,
426 if (sb1000_debug > 3) 426 if (sb1000_debug > 3)
427 printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x" 427 printk(KERN_DEBUG "%s: sb1000_send_command out: %02x%02x%02x%02x"
428 "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]); 428 "%02x%02x\n", name, out[0], out[1], out[2], out[3], out[4], out[5]);
429 return;
430} 429}
431 430
432/* Card Read Status (to be used during frame rx) */ 431/* Card Read Status (to be used during frame rx) */
@@ -438,7 +437,6 @@ sb1000_read_status(const int ioaddr[], unsigned char in[])
438 in[3] = inb(ioaddr[0] + 3); 437 in[3] = inb(ioaddr[0] + 3);
439 in[4] = inb(ioaddr[0] + 4); 438 in[4] = inb(ioaddr[0] + 4);
440 in[0] = inb(ioaddr[0] + 5); 439 in[0] = inb(ioaddr[0] + 5);
441 return;
442} 440}
443 441
444/* Issue Read Command (to be used during frame rx) */ 442/* Issue Read Command (to be used during frame rx) */
@@ -450,7 +448,6 @@ sb1000_issue_read_command(const int ioaddr[], const char* name)
450 sb1000_wait_for_ready_clear(ioaddr, name); 448 sb1000_wait_for_ready_clear(ioaddr, name);
451 outb(0xa0, ioaddr[0] + 6); 449 outb(0xa0, ioaddr[0] + 6);
452 sb1000_send_command(ioaddr, name, Command0); 450 sb1000_send_command(ioaddr, name, Command0);
453 return;
454} 451}
455 452
456 453
@@ -733,7 +730,6 @@ sb1000_print_status_buffer(const char* name, unsigned char st[],
733 printk("\n"); 730 printk("\n");
734 } 731 }
735 } 732 }
736 return;
737} 733}
738 734
739/* 735/*
@@ -926,7 +922,6 @@ sb1000_error_dpc(struct net_device *dev)
926 sb1000_read_status(ioaddr, st); 922 sb1000_read_status(ioaddr, st);
927 if (st[1] & 0x10) 923 if (st[1] & 0x10)
928 lp->rx_error_dpc_count = ErrorDpcCounterInitialize; 924 lp->rx_error_dpc_count = ErrorDpcCounterInitialize;
929 return;
930} 925}
931 926
932 927
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 9944e5d662c0..1f3acc3a5dfd 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -48,23 +48,6 @@
48#include <asm/io.h> 48#include <asm/io.h>
49#include <asm/processor.h> /* Processor type for cache alignment. */ 49#include <asm/processor.h> /* Processor type for cache alignment. */
50 50
51/* This is only here until the firmware is ready. In that case,
52 the firmware leaves the ethernet address in the register for us. */
53#ifdef CONFIG_SIBYTE_STANDALONE
54#define SBMAC_ETH0_HWADDR "40:00:00:00:01:00"
55#define SBMAC_ETH1_HWADDR "40:00:00:00:01:01"
56#define SBMAC_ETH2_HWADDR "40:00:00:00:01:02"
57#define SBMAC_ETH3_HWADDR "40:00:00:00:01:03"
58#endif
59
60
61/* These identify the driver base version and may not be removed. */
62#if 0
63static char version1[] __initdata =
64"sb1250-mac.c:1.00 1/11/2001 Written by Mitch Lichtenberg\n";
65#endif
66
67
68/* Operational parameters that usually are not changed. */ 51/* Operational parameters that usually are not changed. */
69 52
70#define CONFIG_SBMAC_COALESCE 53#define CONFIG_SBMAC_COALESCE
@@ -349,7 +332,6 @@ static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
349 ********************************************************************* */ 332 ********************************************************************* */
350 333
351static char sbmac_string[] = "sb1250-mac"; 334static char sbmac_string[] = "sb1250-mac";
352static char sbmac_pretty[] = "SB1250 MAC";
353 335
354static char sbmac_mdio_string[] = "sb1250-mac-mdio"; 336static char sbmac_mdio_string[] = "sb1250-mac-mdio";
355 337
@@ -2086,8 +2068,6 @@ static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
2086 return NETDEV_TX_BUSY; 2068 return NETDEV_TX_BUSY;
2087 } 2069 }
2088 2070
2089 dev->trans_start = jiffies;
2090
2091 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2071 spin_unlock_irqrestore(&sc->sbm_lock, flags);
2092 2072
2093 return NETDEV_TX_OK; 2073 return NETDEV_TX_OK;
@@ -2112,7 +2092,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2112 uint64_t reg; 2092 uint64_t reg;
2113 void __iomem *port; 2093 void __iomem *port;
2114 int idx; 2094 int idx;
2115 struct dev_mc_list *mclist; 2095 struct netdev_hw_addr *ha;
2116 struct net_device *dev = sc->sbm_dev; 2096 struct net_device *dev = sc->sbm_dev;
2117 2097
2118 /* 2098 /*
@@ -2161,10 +2141,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2161 * XXX if the table overflows */ 2141 * XXX if the table overflows */
2162 2142
2163 idx = 1; /* skip station address */ 2143 idx = 1; /* skip station address */
2164 netdev_for_each_mc_addr(mclist, dev) { 2144 netdev_for_each_mc_addr(ha, dev) {
2165 if (idx == MAC_ADDR_COUNT) 2145 if (idx == MAC_ADDR_COUNT)
2166 break; 2146 break;
2167 reg = sbmac_addr2reg(mclist->dmi_addr); 2147 reg = sbmac_addr2reg(ha->addr);
2168 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); 2148 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2169 __raw_writeq(reg, port); 2149 __raw_writeq(reg, port);
2170 idx++; 2150 idx++;
@@ -2182,85 +2162,6 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2182 } 2162 }
2183} 2163}
2184 2164
2185#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2186/**********************************************************************
2187 * SBMAC_PARSE_XDIGIT(str)
2188 *
2189 * Parse a hex digit, returning its value
2190 *
2191 * Input parameters:
2192 * str - character
2193 *
2194 * Return value:
2195 * hex value, or -1 if invalid
2196 ********************************************************************* */
2197
2198static int sbmac_parse_xdigit(char str)
2199{
2200 int digit;
2201
2202 if ((str >= '0') && (str <= '9'))
2203 digit = str - '0';
2204 else if ((str >= 'a') && (str <= 'f'))
2205 digit = str - 'a' + 10;
2206 else if ((str >= 'A') && (str <= 'F'))
2207 digit = str - 'A' + 10;
2208 else
2209 return -1;
2210
2211 return digit;
2212}
2213
2214/**********************************************************************
2215 * SBMAC_PARSE_HWADDR(str,hwaddr)
2216 *
2217 * Convert a string in the form xx:xx:xx:xx:xx:xx into a 6-byte
2218 * Ethernet address.
2219 *
2220 * Input parameters:
2221 * str - string
2222 * hwaddr - pointer to hardware address
2223 *
2224 * Return value:
2225 * 0 if ok, else -1
2226 ********************************************************************* */
2227
2228static int sbmac_parse_hwaddr(char *str, unsigned char *hwaddr)
2229{
2230 int digit1,digit2;
2231 int idx = 6;
2232
2233 while (*str && (idx > 0)) {
2234 digit1 = sbmac_parse_xdigit(*str);
2235 if (digit1 < 0)
2236 return -1;
2237 str++;
2238 if (!*str)
2239 return -1;
2240
2241 if ((*str == ':') || (*str == '-')) {
2242 digit2 = digit1;
2243 digit1 = 0;
2244 }
2245 else {
2246 digit2 = sbmac_parse_xdigit(*str);
2247 if (digit2 < 0)
2248 return -1;
2249 str++;
2250 }
2251
2252 *hwaddr++ = (digit1 << 4) | digit2;
2253 idx--;
2254
2255 if (*str == '-')
2256 str++;
2257 if (*str == ':')
2258 str++;
2259 }
2260 return 0;
2261}
2262#endif
2263
2264static int sb1250_change_mtu(struct net_device *_dev, int new_mtu) 2165static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
2265{ 2166{
2266 if (new_mtu > ENET_PACKET_SIZE) 2167 if (new_mtu > ENET_PACKET_SIZE)
@@ -2353,17 +2254,36 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2353 2254
2354 sc->mii_bus = mdiobus_alloc(); 2255 sc->mii_bus = mdiobus_alloc();
2355 if (sc->mii_bus == NULL) { 2256 if (sc->mii_bus == NULL) {
2356 sbmac_uninitctx(sc); 2257 err = -ENOMEM;
2357 return -ENOMEM; 2258 goto uninit_ctx;
2358 } 2259 }
2359 2260
2261 sc->mii_bus->name = sbmac_mdio_string;
2262 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2263 sc->mii_bus->priv = sc;
2264 sc->mii_bus->read = sbmac_mii_read;
2265 sc->mii_bus->write = sbmac_mii_write;
2266 sc->mii_bus->irq = sc->phy_irq;
2267 for (i = 0; i < PHY_MAX_ADDR; ++i)
2268 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2269
2270 sc->mii_bus->parent = &pldev->dev;
2271 /*
2272 * Probe PHY address
2273 */
2274 err = mdiobus_register(sc->mii_bus);
2275 if (err) {
2276 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2277 dev->name);
2278 goto free_mdio;
2279 }
2280 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2281
2360 err = register_netdev(dev); 2282 err = register_netdev(dev);
2361 if (err) { 2283 if (err) {
2362 printk(KERN_ERR "%s.%d: unable to register netdev\n", 2284 printk(KERN_ERR "%s.%d: unable to register netdev\n",
2363 sbmac_string, idx); 2285 sbmac_string, idx);
2364 mdiobus_free(sc->mii_bus); 2286 goto unreg_mdio;
2365 sbmac_uninitctx(sc);
2366 return err;
2367 } 2287 }
2368 2288
2369 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name); 2289 pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
@@ -2379,19 +2299,15 @@ static int sbmac_init(struct platform_device *pldev, long long base)
2379 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n", 2299 pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
2380 dev->name, base, eaddr); 2300 dev->name, base, eaddr);
2381 2301
2382 sc->mii_bus->name = sbmac_mdio_string;
2383 snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
2384 sc->mii_bus->priv = sc;
2385 sc->mii_bus->read = sbmac_mii_read;
2386 sc->mii_bus->write = sbmac_mii_write;
2387 sc->mii_bus->irq = sc->phy_irq;
2388 for (i = 0; i < PHY_MAX_ADDR; ++i)
2389 sc->mii_bus->irq[i] = SBMAC_PHY_INT;
2390
2391 sc->mii_bus->parent = &pldev->dev;
2392 dev_set_drvdata(&pldev->dev, sc->mii_bus);
2393
2394 return 0; 2302 return 0;
2303unreg_mdio:
2304 mdiobus_unregister(sc->mii_bus);
2305 dev_set_drvdata(&pldev->dev, NULL);
2306free_mdio:
2307 mdiobus_free(sc->mii_bus);
2308uninit_ctx:
2309 sbmac_uninitctx(sc);
2310 return err;
2395} 2311}
2396 2312
2397 2313
@@ -2417,16 +2333,6 @@ static int sbmac_open(struct net_device *dev)
2417 goto out_err; 2333 goto out_err;
2418 } 2334 }
2419 2335
2420 /*
2421 * Probe PHY address
2422 */
2423 err = mdiobus_register(sc->mii_bus);
2424 if (err) {
2425 printk(KERN_ERR "%s: unable to register MDIO bus\n",
2426 dev->name);
2427 goto out_unirq;
2428 }
2429
2430 sc->sbm_speed = sbmac_speed_none; 2336 sc->sbm_speed = sbmac_speed_none;
2431 sc->sbm_duplex = sbmac_duplex_none; 2337 sc->sbm_duplex = sbmac_duplex_none;
2432 sc->sbm_fc = sbmac_fc_none; 2338 sc->sbm_fc = sbmac_fc_none;
@@ -2457,11 +2363,7 @@ static int sbmac_open(struct net_device *dev)
2457 return 0; 2363 return 0;
2458 2364
2459out_unregister: 2365out_unregister:
2460 mdiobus_unregister(sc->mii_bus);
2461
2462out_unirq:
2463 free_irq(dev->irq, dev); 2366 free_irq(dev->irq, dev);
2464
2465out_err: 2367out_err:
2466 return err; 2368 return err;
2467} 2369}
@@ -2584,7 +2486,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
2584 spin_lock_irqsave(&sc->sbm_lock, flags); 2486 spin_lock_irqsave(&sc->sbm_lock, flags);
2585 2487
2586 2488
2587 dev->trans_start = jiffies; 2489 dev->trans_start = jiffies; /* prevent tx timeout */
2588 dev->stats.tx_errors++; 2490 dev->stats.tx_errors++;
2589 2491
2590 spin_unlock_irqrestore(&sc->sbm_lock, flags); 2492 spin_unlock_irqrestore(&sc->sbm_lock, flags);
@@ -2650,9 +2552,6 @@ static int sbmac_close(struct net_device *dev)
2650 2552
2651 phy_disconnect(sc->phy_dev); 2553 phy_disconnect(sc->phy_dev);
2652 sc->phy_dev = NULL; 2554 sc->phy_dev = NULL;
2653
2654 mdiobus_unregister(sc->mii_bus);
2655
2656 free_irq(dev->irq, dev); 2555 free_irq(dev->irq, dev);
2657 2556
2658 sbdma_emptyring(&(sc->sbm_txdma)); 2557 sbdma_emptyring(&(sc->sbm_txdma));
@@ -2664,7 +2563,6 @@ static int sbmac_close(struct net_device *dev)
2664static int sbmac_poll(struct napi_struct *napi, int budget) 2563static int sbmac_poll(struct napi_struct *napi, int budget)
2665{ 2564{
2666 struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); 2565 struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
2667 struct net_device *dev = sc->sbm_dev;
2668 int work_done; 2566 int work_done;
2669 2567
2670 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); 2568 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
@@ -2760,6 +2658,7 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2760 2658
2761 unregister_netdev(dev); 2659 unregister_netdev(dev);
2762 sbmac_uninitctx(sc); 2660 sbmac_uninitctx(sc);
2661 mdiobus_unregister(sc->mii_bus);
2763 mdiobus_free(sc->mii_bus); 2662 mdiobus_free(sc->mii_bus);
2764 iounmap(sc->sbm_base); 2663 iounmap(sc->sbm_base);
2765 free_netdev(dev); 2664 free_netdev(dev);
@@ -2767,162 +2666,6 @@ static int __exit sbmac_remove(struct platform_device *pldev)
2767 return 0; 2666 return 0;
2768} 2667}
2769 2668
2770
2771static struct platform_device **sbmac_pldev;
2772static int sbmac_max_units;
2773
2774#if defined(SBMAC_ETH0_HWADDR) || defined(SBMAC_ETH1_HWADDR) || defined(SBMAC_ETH2_HWADDR) || defined(SBMAC_ETH3_HWADDR)
2775static void __init sbmac_setup_hwaddr(int idx, char *addr)
2776{
2777 void __iomem *sbm_base;
2778 unsigned long start, end;
2779 uint8_t eaddr[6];
2780 uint64_t val;
2781
2782 if (idx >= sbmac_max_units)
2783 return;
2784
2785 start = A_MAC_CHANNEL_BASE(idx);
2786 end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
2787
2788 sbm_base = ioremap_nocache(start, end - start + 1);
2789 if (!sbm_base) {
2790 printk(KERN_ERR "%s: unable to map device registers\n",
2791 sbmac_string);
2792 return;
2793 }
2794
2795 sbmac_parse_hwaddr(addr, eaddr);
2796 val = sbmac_addr2reg(eaddr);
2797 __raw_writeq(val, sbm_base + R_MAC_ETHERNET_ADDR);
2798 val = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
2799
2800 iounmap(sbm_base);
2801}
2802#endif
2803
2804static int __init sbmac_platform_probe_one(int idx)
2805{
2806 struct platform_device *pldev;
2807 struct {
2808 struct resource r;
2809 char name[strlen(sbmac_pretty) + 4];
2810 } *res;
2811 int err;
2812
2813 res = kzalloc(sizeof(*res), GFP_KERNEL);
2814 if (!res) {
2815 printk(KERN_ERR "%s.%d: unable to allocate memory\n",
2816 sbmac_string, idx);
2817 err = -ENOMEM;
2818 goto out_err;
2819 }
2820
2821 /*
2822 * This is the base address of the MAC.
2823 */
2824 snprintf(res->name, sizeof(res->name), "%s %d", sbmac_pretty, idx);
2825 res->r.name = res->name;
2826 res->r.flags = IORESOURCE_MEM;
2827 res->r.start = A_MAC_CHANNEL_BASE(idx);
2828 res->r.end = A_MAC_CHANNEL_BASE(idx + 1) - 1;
2829
2830 pldev = platform_device_register_simple(sbmac_string, idx, &res->r, 1);
2831 if (IS_ERR(pldev)) {
2832 printk(KERN_ERR "%s.%d: unable to register platform device\n",
2833 sbmac_string, idx);
2834 err = PTR_ERR(pldev);
2835 goto out_kfree;
2836 }
2837
2838 if (!pldev->dev.driver) {
2839 err = 0; /* No hardware at this address. */
2840 goto out_unregister;
2841 }
2842
2843 sbmac_pldev[idx] = pldev;
2844 return 0;
2845
2846out_unregister:
2847 platform_device_unregister(pldev);
2848
2849out_kfree:
2850 kfree(res);
2851
2852out_err:
2853 return err;
2854}
2855
2856static void __init sbmac_platform_probe(void)
2857{
2858 int i;
2859
2860 /* Set the number of available units based on the SOC type. */
2861 switch (soc_type) {
2862 case K_SYS_SOC_TYPE_BCM1250:
2863 case K_SYS_SOC_TYPE_BCM1250_ALT:
2864 sbmac_max_units = 3;
2865 break;
2866 case K_SYS_SOC_TYPE_BCM1120:
2867 case K_SYS_SOC_TYPE_BCM1125:
2868 case K_SYS_SOC_TYPE_BCM1125H:
2869 case K_SYS_SOC_TYPE_BCM1250_ALT2: /* Hybrid */
2870 sbmac_max_units = 2;
2871 break;
2872 case K_SYS_SOC_TYPE_BCM1x55:
2873 case K_SYS_SOC_TYPE_BCM1x80:
2874 sbmac_max_units = 4;
2875 break;
2876 default:
2877 return; /* none */
2878 }
2879
2880 /*
2881 * For bringup when not using the firmware, we can pre-fill
2882 * the MAC addresses using the environment variables
2883 * specified in this file (or maybe from the config file?)
2884 */
2885#ifdef SBMAC_ETH0_HWADDR
2886 sbmac_setup_hwaddr(0, SBMAC_ETH0_HWADDR);
2887#endif
2888#ifdef SBMAC_ETH1_HWADDR
2889 sbmac_setup_hwaddr(1, SBMAC_ETH1_HWADDR);
2890#endif
2891#ifdef SBMAC_ETH2_HWADDR
2892 sbmac_setup_hwaddr(2, SBMAC_ETH2_HWADDR);
2893#endif
2894#ifdef SBMAC_ETH3_HWADDR
2895 sbmac_setup_hwaddr(3, SBMAC_ETH3_HWADDR);
2896#endif
2897
2898 sbmac_pldev = kcalloc(sbmac_max_units, sizeof(*sbmac_pldev),
2899 GFP_KERNEL);
2900 if (!sbmac_pldev) {
2901 printk(KERN_ERR "%s: unable to allocate memory\n",
2902 sbmac_string);
2903 return;
2904 }
2905
2906 /*
2907 * Walk through the Ethernet controllers and find
2908 * those who have their MAC addresses set.
2909 */
2910 for (i = 0; i < sbmac_max_units; i++)
2911 if (sbmac_platform_probe_one(i))
2912 break;
2913}
2914
2915
2916static void __exit sbmac_platform_cleanup(void)
2917{
2918 int i;
2919
2920 for (i = 0; i < sbmac_max_units; i++)
2921 platform_device_unregister(sbmac_pldev[i]);
2922 kfree(sbmac_pldev);
2923}
2924
2925
2926static struct platform_driver sbmac_driver = { 2669static struct platform_driver sbmac_driver = {
2927 .probe = sbmac_probe, 2670 .probe = sbmac_probe,
2928 .remove = __exit_p(sbmac_remove), 2671 .remove = __exit_p(sbmac_remove),
@@ -2933,20 +2676,11 @@ static struct platform_driver sbmac_driver = {
2933 2676
2934static int __init sbmac_init_module(void) 2677static int __init sbmac_init_module(void)
2935{ 2678{
2936 int err; 2679 return platform_driver_register(&sbmac_driver);
2937
2938 err = platform_driver_register(&sbmac_driver);
2939 if (err)
2940 return err;
2941
2942 sbmac_platform_probe();
2943
2944 return err;
2945} 2680}
2946 2681
2947static void __exit sbmac_cleanup_module(void) 2682static void __exit sbmac_cleanup_module(void)
2948{ 2683{
2949 sbmac_platform_cleanup();
2950 platform_driver_unregister(&sbmac_driver); 2684 platform_driver_unregister(&sbmac_driver);
2951} 2685}
2952 2686
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c4787fffa..8c4067af32b0 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
433 (dev->flags & IFF_ALLMULTI)) 433 (dev->flags & IFF_ALLMULTI))
434 mar0 = mar1 = 0xffffffff; 434 mar0 = mar1 = 0xffffffff;
435 else if (dev->flags & IFF_MULTICAST) { 435 else if (dev->flags & IFF_MULTICAST) {
436 struct dev_mc_list *mc_list; 436 struct netdev_hw_addr *ha;
437 437
438 netdev_for_each_mc_addr(mc_list, dev) { 438 netdev_for_each_mc_addr(ha, dev) {
439 u32 crc; 439 u32 crc;
440 unsigned bit = 0; 440 unsigned bit = 0;
441 441
442 crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr); 442 crc = ~ether_crc(ETH_ALEN, ha->addr);
443 crc >>= 24; 443 crc >>= 24;
444 444
445 if (crc & 0x01) bit |= 0x02; 445 if (crc & 0x01) bit |= 0x02;
@@ -987,8 +987,6 @@ static netdev_tx_t sc92031_start_xmit(struct sk_buff *skb,
987 iowrite32(tx_status, port_base + TxStatus0 + entry * 4); 987 iowrite32(tx_status, port_base + TxStatus0 + entry * 4);
988 mmiowb(); 988 mmiowb();
989 989
990 dev->trans_start = jiffies;
991
992 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC) 990 if (priv->tx_head - priv->tx_tail >= NUM_TX_DESC)
993 netif_stop_queue(dev); 991 netif_stop_queue(dev);
994 992
diff --git a/drivers/net/seeq8005.c b/drivers/net/seeq8005.c
index 374832cca11f..d2fce98f557f 100644
--- a/drivers/net/seeq8005.c
+++ b/drivers/net/seeq8005.c
@@ -390,7 +390,7 @@ static void seeq8005_timeout(struct net_device *dev)
390 tx_done(dev) ? "IRQ conflict" : "network cable problem"); 390 tx_done(dev) ? "IRQ conflict" : "network cable problem");
391 /* Try to restart the adaptor. */ 391 /* Try to restart the adaptor. */
392 seeq8005_init(dev, 1); 392 seeq8005_init(dev, 1);
393 dev->trans_start = jiffies; 393 dev->trans_start = jiffies; /* prevent tx timeout */
394 netif_wake_queue(dev); 394 netif_wake_queue(dev);
395} 395}
396 396
@@ -411,7 +411,6 @@ static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
411 netif_stop_queue(dev); 411 netif_stop_queue(dev);
412 412
413 hardware_send_packet(dev, buf, length); 413 hardware_send_packet(dev, buf, length);
414 dev->trans_start = jiffies;
415 dev->stats.tx_bytes += length; 414 dev->stats.tx_bytes += length;
416 dev_kfree_skb (skb); 415 dev_kfree_skb (skb);
417 /* You might need to clean up and record Tx statistics here. */ 416 /* You might need to clean up and record Tx statistics here. */
@@ -579,7 +578,6 @@ static void seeq8005_rx(struct net_device *dev)
579 /* If any worth-while packets have been received, netif_rx() 578 /* If any worth-while packets have been received, netif_rx()
580 has done a mark_bh(NET_BH) for us and will work on them 579 has done a mark_bh(NET_BH) for us and will work on them
581 when we get to the bottom-half routine. */ 580 when we get to the bottom-half routine. */
582 return;
583} 581}
584 582
585/* The inverse routine to net_open(). */ 583/* The inverse routine to net_open(). */
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 6486657c47b8..156460527231 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -225,17 +225,17 @@ static void efx_fini_channels(struct efx_nic *efx);
225 * never be concurrently called more than once on the same channel, 225 * never be concurrently called more than once on the same channel,
226 * though different channels may be being processed concurrently. 226 * though different channels may be being processed concurrently.
227 */ 227 */
228static int efx_process_channel(struct efx_channel *channel, int rx_quota) 228static int efx_process_channel(struct efx_channel *channel, int budget)
229{ 229{
230 struct efx_nic *efx = channel->efx; 230 struct efx_nic *efx = channel->efx;
231 int rx_packets; 231 int spent;
232 232
233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE || 233 if (unlikely(efx->reset_pending != RESET_TYPE_NONE ||
234 !channel->enabled)) 234 !channel->enabled))
235 return 0; 235 return 0;
236 236
237 rx_packets = efx_nic_process_eventq(channel, rx_quota); 237 spent = efx_nic_process_eventq(channel, budget);
238 if (rx_packets == 0) 238 if (spent == 0)
239 return 0; 239 return 0;
240 240
241 /* Deliver last RX packet. */ 241 /* Deliver last RX packet. */
@@ -249,7 +249,7 @@ static int efx_process_channel(struct efx_channel *channel, int rx_quota)
249 249
250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]); 250 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
251 251
252 return rx_packets; 252 return spent;
253} 253}
254 254
255/* Mark channel as finished processing 255/* Mark channel as finished processing
@@ -278,17 +278,17 @@ static int efx_poll(struct napi_struct *napi, int budget)
278{ 278{
279 struct efx_channel *channel = 279 struct efx_channel *channel =
280 container_of(napi, struct efx_channel, napi_str); 280 container_of(napi, struct efx_channel, napi_str);
281 int rx_packets; 281 int spent;
282 282
283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", 283 EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
284 channel->channel, raw_smp_processor_id()); 284 channel->channel, raw_smp_processor_id());
285 285
286 rx_packets = efx_process_channel(channel, budget); 286 spent = efx_process_channel(channel, budget);
287 287
288 if (rx_packets < budget) { 288 if (spent < budget) {
289 struct efx_nic *efx = channel->efx; 289 struct efx_nic *efx = channel->efx;
290 290
291 if (channel->used_flags & EFX_USED_BY_RX && 291 if (channel->channel < efx->n_rx_channels &&
292 efx->irq_rx_adaptive && 292 efx->irq_rx_adaptive &&
293 unlikely(++channel->irq_count == 1000)) { 293 unlikely(++channel->irq_count == 1000)) {
294 if (unlikely(channel->irq_mod_score < 294 if (unlikely(channel->irq_mod_score <
@@ -318,7 +318,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
318 efx_channel_processed(channel); 318 efx_channel_processed(channel);
319 } 319 }
320 320
321 return rx_packets; 321 return spent;
322} 322}
323 323
324/* Process the eventq of the specified channel immediately on this CPU 324/* Process the eventq of the specified channel immediately on this CPU
@@ -333,7 +333,6 @@ void efx_process_channel_now(struct efx_channel *channel)
333{ 333{
334 struct efx_nic *efx = channel->efx; 334 struct efx_nic *efx = channel->efx;
335 335
336 BUG_ON(!channel->used_flags);
337 BUG_ON(!channel->enabled); 336 BUG_ON(!channel->enabled);
338 337
339 /* Disable interrupts and wait for ISRs to complete */ 338 /* Disable interrupts and wait for ISRs to complete */
@@ -446,12 +445,12 @@ static void efx_set_channel_names(struct efx_nic *efx)
446 445
447 efx_for_each_channel(channel, efx) { 446 efx_for_each_channel(channel, efx) {
448 number = channel->channel; 447 number = channel->channel;
449 if (efx->n_channels > efx->n_rx_queues) { 448 if (efx->n_channels > efx->n_rx_channels) {
450 if (channel->channel < efx->n_rx_queues) { 449 if (channel->channel < efx->n_rx_channels) {
451 type = "-rx"; 450 type = "-rx";
452 } else { 451 } else {
453 type = "-tx"; 452 type = "-tx";
454 number -= efx->n_rx_queues; 453 number -= efx->n_rx_channels;
455 } 454 }
456 } 455 }
457 snprintf(channel->name, sizeof(channel->name), 456 snprintf(channel->name, sizeof(channel->name),
@@ -585,8 +584,6 @@ static void efx_remove_channel(struct efx_channel *channel)
585 efx_for_each_channel_tx_queue(tx_queue, channel) 584 efx_for_each_channel_tx_queue(tx_queue, channel)
586 efx_remove_tx_queue(tx_queue); 585 efx_remove_tx_queue(tx_queue);
587 efx_remove_eventq(channel); 586 efx_remove_eventq(channel);
588
589 channel->used_flags = 0;
590} 587}
591 588
592void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 589void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
@@ -956,10 +953,9 @@ static void efx_fini_io(struct efx_nic *efx)
956 pci_disable_device(efx->pci_dev); 953 pci_disable_device(efx->pci_dev);
957} 954}
958 955
959/* Get number of RX queues wanted. Return number of online CPU 956/* Get number of channels wanted. Each channel will have its own IRQ,
960 * packages in the expectation that an IRQ balancer will spread 957 * 1 RX queue and/or 2 TX queues. */
961 * interrupts across them. */ 958static int efx_wanted_channels(void)
962static int efx_wanted_rx_queues(void)
963{ 959{
964 cpumask_var_t core_mask; 960 cpumask_var_t core_mask;
965 int count; 961 int count;
@@ -995,34 +991,39 @@ static void efx_probe_interrupts(struct efx_nic *efx)
995 991
996 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { 992 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
997 struct msix_entry xentries[EFX_MAX_CHANNELS]; 993 struct msix_entry xentries[EFX_MAX_CHANNELS];
998 int wanted_ints; 994 int n_channels;
999 int rx_queues;
1000 995
1001 /* We want one RX queue and interrupt per CPU package 996 n_channels = efx_wanted_channels();
1002 * (or as specified by the rss_cpus module parameter). 997 if (separate_tx_channels)
1003 * We will need one channel per interrupt. 998 n_channels *= 2;
1004 */ 999 n_channels = min(n_channels, max_channels);
1005 rx_queues = rss_cpus ? rss_cpus : efx_wanted_rx_queues();
1006 wanted_ints = rx_queues + (separate_tx_channels ? 1 : 0);
1007 wanted_ints = min(wanted_ints, max_channels);
1008 1000
1009 for (i = 0; i < wanted_ints; i++) 1001 for (i = 0; i < n_channels; i++)
1010 xentries[i].entry = i; 1002 xentries[i].entry = i;
1011 rc = pci_enable_msix(efx->pci_dev, xentries, wanted_ints); 1003 rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
1012 if (rc > 0) { 1004 if (rc > 0) {
1013 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors" 1005 EFX_ERR(efx, "WARNING: Insufficient MSI-X vectors"
1014 " available (%d < %d).\n", rc, wanted_ints); 1006 " available (%d < %d).\n", rc, n_channels);
1015 EFX_ERR(efx, "WARNING: Performance may be reduced.\n"); 1007 EFX_ERR(efx, "WARNING: Performance may be reduced.\n");
1016 EFX_BUG_ON_PARANOID(rc >= wanted_ints); 1008 EFX_BUG_ON_PARANOID(rc >= n_channels);
1017 wanted_ints = rc; 1009 n_channels = rc;
1018 rc = pci_enable_msix(efx->pci_dev, xentries, 1010 rc = pci_enable_msix(efx->pci_dev, xentries,
1019 wanted_ints); 1011 n_channels);
1020 } 1012 }
1021 1013
1022 if (rc == 0) { 1014 if (rc == 0) {
1023 efx->n_rx_queues = min(rx_queues, wanted_ints); 1015 efx->n_channels = n_channels;
1024 efx->n_channels = wanted_ints; 1016 if (separate_tx_channels) {
1025 for (i = 0; i < wanted_ints; i++) 1017 efx->n_tx_channels =
1018 max(efx->n_channels / 2, 1U);
1019 efx->n_rx_channels =
1020 max(efx->n_channels -
1021 efx->n_tx_channels, 1U);
1022 } else {
1023 efx->n_tx_channels = efx->n_channels;
1024 efx->n_rx_channels = efx->n_channels;
1025 }
1026 for (i = 0; i < n_channels; i++)
1026 efx->channel[i].irq = xentries[i].vector; 1027 efx->channel[i].irq = xentries[i].vector;
1027 } else { 1028 } else {
1028 /* Fall back to single channel MSI */ 1029 /* Fall back to single channel MSI */
@@ -1033,8 +1034,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1033 1034
1034 /* Try single interrupt MSI */ 1035 /* Try single interrupt MSI */
1035 if (efx->interrupt_mode == EFX_INT_MODE_MSI) { 1036 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
1036 efx->n_rx_queues = 1;
1037 efx->n_channels = 1; 1037 efx->n_channels = 1;
1038 efx->n_rx_channels = 1;
1039 efx->n_tx_channels = 1;
1038 rc = pci_enable_msi(efx->pci_dev); 1040 rc = pci_enable_msi(efx->pci_dev);
1039 if (rc == 0) { 1041 if (rc == 0) {
1040 efx->channel[0].irq = efx->pci_dev->irq; 1042 efx->channel[0].irq = efx->pci_dev->irq;
@@ -1046,8 +1048,9 @@ static void efx_probe_interrupts(struct efx_nic *efx)
1046 1048
1047 /* Assume legacy interrupts */ 1049 /* Assume legacy interrupts */
1048 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { 1050 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
1049 efx->n_rx_queues = 1;
1050 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0); 1051 efx->n_channels = 1 + (separate_tx_channels ? 1 : 0);
1052 efx->n_rx_channels = 1;
1053 efx->n_tx_channels = 1;
1051 efx->legacy_irq = efx->pci_dev->irq; 1054 efx->legacy_irq = efx->pci_dev->irq;
1052 } 1055 }
1053} 1056}
@@ -1068,21 +1071,24 @@ static void efx_remove_interrupts(struct efx_nic *efx)
1068 1071
1069static void efx_set_channels(struct efx_nic *efx) 1072static void efx_set_channels(struct efx_nic *efx)
1070{ 1073{
1074 struct efx_channel *channel;
1071 struct efx_tx_queue *tx_queue; 1075 struct efx_tx_queue *tx_queue;
1072 struct efx_rx_queue *rx_queue; 1076 struct efx_rx_queue *rx_queue;
1077 unsigned tx_channel_offset =
1078 separate_tx_channels ? efx->n_channels - efx->n_tx_channels : 0;
1073 1079
1074 efx_for_each_tx_queue(tx_queue, efx) { 1080 efx_for_each_channel(channel, efx) {
1075 if (separate_tx_channels) 1081 if (channel->channel - tx_channel_offset < efx->n_tx_channels) {
1076 tx_queue->channel = &efx->channel[efx->n_channels-1]; 1082 channel->tx_queue = &efx->tx_queue[
1077 else 1083 (channel->channel - tx_channel_offset) *
1078 tx_queue->channel = &efx->channel[0]; 1084 EFX_TXQ_TYPES];
1079 tx_queue->channel->used_flags |= EFX_USED_BY_TX; 1085 efx_for_each_channel_tx_queue(tx_queue, channel)
1086 tx_queue->channel = channel;
1087 }
1080 } 1088 }
1081 1089
1082 efx_for_each_rx_queue(rx_queue, efx) { 1090 efx_for_each_rx_queue(rx_queue, efx)
1083 rx_queue->channel = &efx->channel[rx_queue->queue]; 1091 rx_queue->channel = &efx->channel[rx_queue->queue];
1084 rx_queue->channel->used_flags |= EFX_USED_BY_RX;
1085 }
1086} 1092}
1087 1093
1088static int efx_probe_nic(struct efx_nic *efx) 1094static int efx_probe_nic(struct efx_nic *efx)
@@ -1096,11 +1102,12 @@ static int efx_probe_nic(struct efx_nic *efx)
1096 if (rc) 1102 if (rc)
1097 return rc; 1103 return rc;
1098 1104
1099 /* Determine the number of channels and RX queues by trying to hook 1105 /* Determine the number of channels and queues by trying to hook
1100 * in MSI-X interrupts. */ 1106 * in MSI-X interrupts. */
1101 efx_probe_interrupts(efx); 1107 efx_probe_interrupts(efx);
1102 1108
1103 efx_set_channels(efx); 1109 efx_set_channels(efx);
1110 efx->net_dev->real_num_tx_queues = efx->n_tx_channels;
1104 1111
1105 /* Initialise the interrupt moderation settings */ 1112 /* Initialise the interrupt moderation settings */
1106 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true); 1113 efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec, true);
@@ -1187,11 +1194,12 @@ static void efx_start_all(struct efx_nic *efx)
1187 /* Mark the port as enabled so port reconfigurations can start, then 1194 /* Mark the port as enabled so port reconfigurations can start, then
1188 * restart the transmit interface early so the watchdog timer stops */ 1195 * restart the transmit interface early so the watchdog timer stops */
1189 efx_start_port(efx); 1196 efx_start_port(efx);
1190 if (efx_dev_registered(efx))
1191 efx_wake_queue(efx);
1192 1197
1193 efx_for_each_channel(channel, efx) 1198 efx_for_each_channel(channel, efx) {
1199 if (efx_dev_registered(efx))
1200 efx_wake_queue(channel);
1194 efx_start_channel(channel); 1201 efx_start_channel(channel);
1202 }
1195 1203
1196 efx_nic_enable_interrupts(efx); 1204 efx_nic_enable_interrupts(efx);
1197 1205
@@ -1282,7 +1290,9 @@ static void efx_stop_all(struct efx_nic *efx)
1282 /* Stop the kernel transmit interface late, so the watchdog 1290 /* Stop the kernel transmit interface late, so the watchdog
1283 * timer isn't ticking over the flush */ 1291 * timer isn't ticking over the flush */
1284 if (efx_dev_registered(efx)) { 1292 if (efx_dev_registered(efx)) {
1285 efx_stop_queue(efx); 1293 struct efx_channel *channel;
1294 efx_for_each_channel(channel, efx)
1295 efx_stop_queue(channel);
1286 netif_tx_lock_bh(efx->net_dev); 1296 netif_tx_lock_bh(efx->net_dev);
1287 netif_tx_unlock_bh(efx->net_dev); 1297 netif_tx_unlock_bh(efx->net_dev);
1288 } 1298 }
@@ -1537,9 +1547,8 @@ static void efx_watchdog(struct net_device *net_dev)
1537{ 1547{
1538 struct efx_nic *efx = netdev_priv(net_dev); 1548 struct efx_nic *efx = netdev_priv(net_dev);
1539 1549
1540 EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d:" 1550 EFX_ERR(efx, "TX stuck with port_enabled=%d: resetting channels\n",
1541 " resetting channels\n", 1551 efx->port_enabled);
1542 atomic_read(&efx->netif_stop_count), efx->port_enabled);
1543 1552
1544 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG); 1553 efx_schedule_reset(efx, RESET_TYPE_TX_WATCHDOG);
1545} 1554}
@@ -1603,7 +1612,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1603static void efx_set_multicast_list(struct net_device *net_dev) 1612static void efx_set_multicast_list(struct net_device *net_dev)
1604{ 1613{
1605 struct efx_nic *efx = netdev_priv(net_dev); 1614 struct efx_nic *efx = netdev_priv(net_dev);
1606 struct dev_mc_list *mc_list; 1615 struct netdev_hw_addr *ha;
1607 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1616 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1608 u32 crc; 1617 u32 crc;
1609 int bit; 1618 int bit;
@@ -1615,8 +1624,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1615 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1624 memset(mc_hash, 0xff, sizeof(*mc_hash));
1616 } else { 1625 } else {
1617 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1626 memset(mc_hash, 0x00, sizeof(*mc_hash));
1618 netdev_for_each_mc_addr(mc_list, net_dev) { 1627 netdev_for_each_mc_addr(ha, net_dev) {
1619 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); 1628 crc = ether_crc_le(ETH_ALEN, ha->addr);
1620 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1629 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1621 set_bit_le(bit, mc_hash->byte); 1630 set_bit_le(bit, mc_hash->byte);
1622 } 1631 }
@@ -1861,6 +1870,7 @@ out:
1861 } 1870 }
1862 1871
1863 if (disabled) { 1872 if (disabled) {
1873 dev_close(efx->net_dev);
1864 EFX_ERR(efx, "has been disabled\n"); 1874 EFX_ERR(efx, "has been disabled\n");
1865 efx->state = STATE_DISABLED; 1875 efx->state = STATE_DISABLED;
1866 } else { 1876 } else {
@@ -1884,8 +1894,7 @@ static void efx_reset_work(struct work_struct *data)
1884 } 1894 }
1885 1895
1886 rtnl_lock(); 1896 rtnl_lock();
1887 if (efx_reset(efx, efx->reset_pending)) 1897 (void)efx_reset(efx, efx->reset_pending);
1888 dev_close(efx->net_dev);
1889 rtnl_unlock(); 1898 rtnl_unlock();
1890} 1899}
1891 1900
@@ -2014,22 +2023,22 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2014 2023
2015 efx->net_dev = net_dev; 2024 efx->net_dev = net_dev;
2016 efx->rx_checksum_enabled = true; 2025 efx->rx_checksum_enabled = true;
2017 spin_lock_init(&efx->netif_stop_lock);
2018 spin_lock_init(&efx->stats_lock); 2026 spin_lock_init(&efx->stats_lock);
2019 mutex_init(&efx->mac_lock); 2027 mutex_init(&efx->mac_lock);
2020 efx->mac_op = type->default_mac_ops; 2028 efx->mac_op = type->default_mac_ops;
2021 efx->phy_op = &efx_dummy_phy_operations; 2029 efx->phy_op = &efx_dummy_phy_operations;
2022 efx->mdio.dev = net_dev; 2030 efx->mdio.dev = net_dev;
2023 INIT_WORK(&efx->mac_work, efx_mac_work); 2031 INIT_WORK(&efx->mac_work, efx_mac_work);
2024 atomic_set(&efx->netif_stop_count, 1);
2025 2032
2026 for (i = 0; i < EFX_MAX_CHANNELS; i++) { 2033 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
2027 channel = &efx->channel[i]; 2034 channel = &efx->channel[i];
2028 channel->efx = efx; 2035 channel->efx = efx;
2029 channel->channel = i; 2036 channel->channel = i;
2030 channel->work_pending = false; 2037 channel->work_pending = false;
2038 spin_lock_init(&channel->tx_stop_lock);
2039 atomic_set(&channel->tx_stop_count, 1);
2031 } 2040 }
2032 for (i = 0; i < EFX_TX_QUEUE_COUNT; i++) { 2041 for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
2033 tx_queue = &efx->tx_queue[i]; 2042 tx_queue = &efx->tx_queue[i];
2034 tx_queue->efx = efx; 2043 tx_queue->efx = efx;
2035 tx_queue->queue = i; 2044 tx_queue->queue = i;
@@ -2201,7 +2210,7 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2201 int i, rc; 2210 int i, rc;
2202 2211
2203 /* Allocate and initialise a struct net_device and struct efx_nic */ 2212 /* Allocate and initialise a struct net_device and struct efx_nic */
2204 net_dev = alloc_etherdev(sizeof(*efx)); 2213 net_dev = alloc_etherdev_mq(sizeof(*efx), EFX_MAX_CORE_TX_QUEUES);
2205 if (!net_dev) 2214 if (!net_dev)
2206 return -ENOMEM; 2215 return -ENOMEM;
2207 net_dev->features |= (type->offload_features | NETIF_F_SG | 2216 net_dev->features |= (type->offload_features | NETIF_F_SG |
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 7eff0a615cb3..ffd708c5304a 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -35,8 +35,8 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
35extern netdev_tx_t 35extern netdev_tx_t
36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 36efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 37extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
38extern void efx_stop_queue(struct efx_nic *efx); 38extern void efx_stop_queue(struct efx_channel *channel);
39extern void efx_wake_queue(struct efx_nic *efx); 39extern void efx_wake_queue(struct efx_channel *channel);
40#define EFX_TXQ_SIZE 1024 40#define EFX_TXQ_SIZE 1024
41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1) 41#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
42 42
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index d9f9c02a928e..22026bfbc4c1 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -304,7 +304,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
304{ 304{
305 struct efx_tx_queue *tx_queue; 305 struct efx_tx_queue *tx_queue;
306 306
307 efx_for_each_tx_queue(tx_queue, efx) { 307 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
308 efx_fill_test(test_index++, strings, data, 308 efx_fill_test(test_index++, strings, data,
309 &lb_tests->tx_sent[tx_queue->queue], 309 &lb_tests->tx_sent[tx_queue->queue],
310 EFX_TX_QUEUE_NAME(tx_queue), 310 EFX_TX_QUEUE_NAME(tx_queue),
@@ -647,7 +647,7 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
647 efx_for_each_tx_queue(tx_queue, efx) { 647 efx_for_each_tx_queue(tx_queue, efx) {
648 channel = tx_queue->channel; 648 channel = tx_queue->channel;
649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { 649 if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
650 if (channel->used_flags != EFX_USED_BY_RX_TX) 650 if (channel->channel < efx->n_rx_channels)
651 coalesce->tx_coalesce_usecs_irq = 651 coalesce->tx_coalesce_usecs_irq =
652 channel->irq_moderation; 652 channel->irq_moderation;
653 else 653 else
@@ -690,7 +690,7 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
690 690
691 /* If the channel is shared only allow RX parameters to be set */ 691 /* If the channel is shared only allow RX parameters to be set */
692 efx_for_each_tx_queue(tx_queue, efx) { 692 efx_for_each_tx_queue(tx_queue, efx) {
693 if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) && 693 if ((tx_queue->channel->channel < efx->n_rx_channels) &&
694 tx_usecs) { 694 tx_usecs) {
695 EFX_ERR(efx, "Channel is shared. " 695 EFX_ERR(efx, "Channel is shared. "
696 "Only RX coalescing may be set\n"); 696 "Only RX coalescing may be set\n");
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index d294d66fd600..655b697b45b2 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -175,16 +175,19 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", 175 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 176 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
177 177
178 /* Check to see if we have a serious error condition */
179 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
180 if (unlikely(syserr))
181 return efx_nic_fatal_interrupt(efx);
182
183 /* Determine interrupting queues, clear interrupt status 178 /* Determine interrupting queues, clear interrupt status
184 * register and acknowledge the device interrupt. 179 * register and acknowledge the device interrupt.
185 */ 180 */
186 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); 181 BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS);
187 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); 182 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q);
183
184 /* Check to see if we have a serious error condition */
185 if (queues & (1U << efx->fatal_irq_level)) {
186 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
187 if (unlikely(syserr))
188 return efx_nic_fatal_interrupt(efx);
189 }
190
188 EFX_ZERO_OWORD(*int_ker); 191 EFX_ZERO_OWORD(*int_ker);
189 wmb(); /* Ensure the vector is cleared before interrupt ack */ 192 wmb(); /* Ensure the vector is cleared before interrupt ack */
190 falcon_irq_ack_a1(efx); 193 falcon_irq_ack_a1(efx);
@@ -504,6 +507,9 @@ static void falcon_reset_macs(struct efx_nic *efx)
504 /* Ensure the correct MAC is selected before statistics 507 /* Ensure the correct MAC is selected before statistics
505 * are re-enabled by the caller */ 508 * are re-enabled by the caller */
506 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); 509 efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL);
510
511 /* This can run even when the GMAC is selected */
512 falcon_setup_xaui(efx);
507} 513}
508 514
509void falcon_drain_tx_fifo(struct efx_nic *efx) 515void falcon_drain_tx_fifo(struct efx_nic *efx)
@@ -1320,7 +1326,9 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1320 1326
1321 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 1327 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
1322 1328
1323 falcon_probe_board(efx, board_rev); 1329 rc = falcon_probe_board(efx, board_rev);
1330 if (rc)
1331 goto fail2;
1324 1332
1325 kfree(nvconfig); 1333 kfree(nvconfig);
1326 return 0; 1334 return 0;
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index 5712fddd72f2..c7a933a3292e 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -728,15 +728,7 @@ static const struct falcon_board_type board_types[] = {
728 }, 728 },
729}; 729};
730 730
731static const struct falcon_board_type falcon_dummy_board = { 731int falcon_probe_board(struct efx_nic *efx, u16 revision_info)
732 .init = efx_port_dummy_op_int,
733 .init_phy = efx_port_dummy_op_void,
734 .fini = efx_port_dummy_op_void,
735 .set_id_led = efx_port_dummy_op_set_id_led,
736 .monitor = efx_port_dummy_op_int,
737};
738
739void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
740{ 732{
741 struct falcon_board *board = falcon_board(efx); 733 struct falcon_board *board = falcon_board(efx);
742 u8 type_id = FALCON_BOARD_TYPE(revision_info); 734 u8 type_id = FALCON_BOARD_TYPE(revision_info);
@@ -754,8 +746,9 @@ void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
754 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) 746 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
755 ? board->type->ref_model : board->type->gen_type, 747 ? board->type->ref_model : board->type->gen_type,
756 'A' + board->major, board->minor); 748 'A' + board->major, board->minor);
749 return 0;
757 } else { 750 } else {
758 EFX_ERR(efx, "unknown board type %d\n", type_id); 751 EFX_ERR(efx, "unknown board type %d\n", type_id);
759 board->type = &falcon_dummy_board; 752 return -ENODEV;
760 } 753 }
761} 754}
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index 8ccab2c67a20..c84a2ce2ccbb 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -26,7 +26,7 @@
26 *************************************************************************/ 26 *************************************************************************/
27 27
28/* Configure the XAUI driver that is an output from Falcon */ 28/* Configure the XAUI driver that is an output from Falcon */
29static void falcon_setup_xaui(struct efx_nic *efx) 29void falcon_setup_xaui(struct efx_nic *efx)
30{ 30{
31 efx_oword_t sdctl, txdrv; 31 efx_oword_t sdctl, txdrv;
32 32
@@ -85,14 +85,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
85 return -ETIMEDOUT; 85 return -ETIMEDOUT;
86} 86}
87 87
88static void falcon_mask_status_intr(struct efx_nic *efx, bool enable) 88static void falcon_ack_status_intr(struct efx_nic *efx)
89{ 89{
90 efx_oword_t reg; 90 efx_oword_t reg;
91 91
92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 92 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
93 return; 93 return;
94 94
95 /* We expect xgmii faults if the wireside link is up */ 95 /* We expect xgmii faults if the wireside link is down */
96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up) 96 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
97 return; 97 return;
98 98
@@ -101,14 +101,7 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
101 if (efx->xmac_poll_required) 101 if (efx->xmac_poll_required)
102 return; 102 return;
103 103
104 /* Flush the ISR */ 104 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
105 if (enable)
106 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
107
108 EFX_POPULATE_OWORD_2(reg,
109 FRF_AB_XM_MSK_RMTFLT, !enable,
110 FRF_AB_XM_MSK_LCLFLT, !enable);
111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
112} 105}
113 106
114static bool falcon_xgxs_link_ok(struct efx_nic *efx) 107static bool falcon_xgxs_link_ok(struct efx_nic *efx)
@@ -283,15 +276,13 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
283 276
284static int falcon_reconfigure_xmac(struct efx_nic *efx) 277static int falcon_reconfigure_xmac(struct efx_nic *efx)
285{ 278{
286 falcon_mask_status_intr(efx, false);
287
288 falcon_reconfigure_xgxs_core(efx); 279 falcon_reconfigure_xgxs_core(efx);
289 falcon_reconfigure_xmac_core(efx); 280 falcon_reconfigure_xmac_core(efx);
290 281
291 falcon_reconfigure_mac_wrapper(efx); 282 falcon_reconfigure_mac_wrapper(efx);
292 283
293 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 284 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
294 falcon_mask_status_intr(efx, true); 285 falcon_ack_status_intr(efx);
295 286
296 return 0; 287 return 0;
297} 288}
@@ -362,9 +353,8 @@ void falcon_poll_xmac(struct efx_nic *efx)
362 !efx->xmac_poll_required) 353 !efx->xmac_poll_required)
363 return; 354 return;
364 355
365 falcon_mask_status_intr(efx, false);
366 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 356 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
367 falcon_mask_status_intr(efx, true); 357 falcon_ack_status_intr(efx);
368} 358}
369 359
370struct efx_mac_operations falcon_xmac_operations = { 360struct efx_mac_operations falcon_xmac_operations = {
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index c48669c77414..93cc3c1b9450 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -613,7 +613,7 @@ int efx_mcdi_fwver(struct efx_nic *efx, u64 *version, u32 *build)
613 } 613 }
614 614
615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) { 615 if (outlength < MC_CMD_GET_VERSION_V1_OUT_LEN) {
616 rc = -EMSGSIZE; 616 rc = -EIO;
617 goto fail; 617 goto fail;
618 } 618 }
619 619
@@ -647,8 +647,10 @@ int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
647 outbuf, sizeof(outbuf), &outlen); 647 outbuf, sizeof(outbuf), &outlen);
648 if (rc) 648 if (rc)
649 goto fail; 649 goto fail;
650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) 650 if (outlen < MC_CMD_DRV_ATTACH_OUT_LEN) {
651 rc = -EIO;
651 goto fail; 652 goto fail;
653 }
652 654
653 if (was_attached != NULL) 655 if (was_attached != NULL)
654 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE); 656 *was_attached = MCDI_DWORD(outbuf, DRV_ATTACH_OUT_OLD_STATE);
@@ -676,7 +678,7 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
676 goto fail; 678 goto fail;
677 679
678 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) { 680 if (outlen < MC_CMD_GET_BOARD_CFG_OUT_LEN) {
679 rc = -EMSGSIZE; 681 rc = -EIO;
680 goto fail; 682 goto fail;
681 } 683 }
682 684
@@ -738,8 +740,10 @@ int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
738 outbuf, sizeof(outbuf), &outlen); 740 outbuf, sizeof(outbuf), &outlen);
739 if (rc) 741 if (rc)
740 goto fail; 742 goto fail;
741 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) 743 if (outlen < MC_CMD_NVRAM_TYPES_OUT_LEN) {
744 rc = -EIO;
742 goto fail; 745 goto fail;
746 }
743 747
744 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES); 748 *nvram_types_out = MCDI_DWORD(outbuf, NVRAM_TYPES_OUT_TYPES);
745 return 0; 749 return 0;
@@ -765,8 +769,10 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
765 outbuf, sizeof(outbuf), &outlen); 769 outbuf, sizeof(outbuf), &outlen);
766 if (rc) 770 if (rc)
767 goto fail; 771 goto fail;
768 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) 772 if (outlen < MC_CMD_NVRAM_INFO_OUT_LEN) {
773 rc = -EIO;
769 goto fail; 774 goto fail;
775 }
770 776
771 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE); 777 *size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_SIZE);
772 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE); 778 *erase_size_out = MCDI_DWORD(outbuf, NVRAM_INFO_OUT_ERASESIZE);
@@ -926,20 +932,26 @@ int efx_mcdi_nvram_test_all(struct efx_nic *efx)
926 932
927 rc = efx_mcdi_nvram_types(efx, &nvram_types); 933 rc = efx_mcdi_nvram_types(efx, &nvram_types);
928 if (rc) 934 if (rc)
929 return rc; 935 goto fail1;
930 936
931 type = 0; 937 type = 0;
932 while (nvram_types != 0) { 938 while (nvram_types != 0) {
933 if (nvram_types & 1) { 939 if (nvram_types & 1) {
934 rc = efx_mcdi_nvram_test(efx, type); 940 rc = efx_mcdi_nvram_test(efx, type);
935 if (rc) 941 if (rc)
936 return rc; 942 goto fail2;
937 } 943 }
938 type++; 944 type++;
939 nvram_types >>= 1; 945 nvram_types >>= 1;
940 } 946 }
941 947
942 return 0; 948 return 0;
949
950fail2:
951 EFX_ERR(efx, "%s: failed type=%u\n", __func__, type);
952fail1:
953 EFX_ERR(efx, "%s: failed rc=%d\n", __func__, rc);
954 return rc;
943} 955}
944 956
945static int efx_mcdi_read_assertion(struct efx_nic *efx) 957static int efx_mcdi_read_assertion(struct efx_nic *efx)
@@ -968,7 +980,7 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
968 if (rc) 980 if (rc)
969 return rc; 981 return rc;
970 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN) 982 if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
971 return -EINVAL; 983 return -EIO;
972 984
973 /* Print out any recorded assertion state */ 985 /* Print out any recorded assertion state */
974 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS); 986 flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
@@ -1086,7 +1098,7 @@ int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
1086 goto fail; 1098 goto fail;
1087 1099
1088 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) { 1100 if (outlen < MC_CMD_WOL_FILTER_SET_OUT_LEN) {
1089 rc = -EMSGSIZE; 1101 rc = -EIO;
1090 goto fail; 1102 goto fail;
1091 } 1103 }
1092 1104
@@ -1121,7 +1133,7 @@ int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
1121 goto fail; 1133 goto fail;
1122 1134
1123 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) { 1135 if (outlen < MC_CMD_WOL_FILTER_GET_OUT_LEN) {
1124 rc = -EMSGSIZE; 1136 rc = -EIO;
1125 goto fail; 1137 goto fail;
1126 } 1138 }
1127 1139
diff --git a/drivers/net/sfc/mcdi_mac.c b/drivers/net/sfc/mcdi_mac.c
index 06d24a1e412a..39182631ac92 100644
--- a/drivers/net/sfc/mcdi_mac.c
+++ b/drivers/net/sfc/mcdi_mac.c
@@ -80,7 +80,7 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN]; 80 u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
81 int rc; 81 int rc;
82 efx_dword_t *cmd_ptr; 82 efx_dword_t *cmd_ptr;
83 int period = 1000; 83 int period = enable ? 1000 : 0;
84 u32 addr_hi; 84 u32 addr_hi;
85 u32 addr_lo; 85 u32 addr_lo;
86 86
@@ -92,21 +92,14 @@ int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo); 92 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi); 93 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD); 94 cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
95 if (enable) 95 EFX_POPULATE_DWORD_7(*cmd_ptr,
96 EFX_POPULATE_DWORD_6(*cmd_ptr, 96 MC_CMD_MAC_STATS_CMD_DMA, !!enable,
97 MC_CMD_MAC_STATS_CMD_DMA, 1, 97 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
98 MC_CMD_MAC_STATS_CMD_CLEAR, clear, 98 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
99 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1, 99 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, !!enable,
100 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 1, 100 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0,
101 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0, 101 MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT, 1,
102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period); 102 MC_CMD_MAC_STATS_CMD_PERIOD_MS, period);
103 else
104 EFX_POPULATE_DWORD_5(*cmd_ptr,
105 MC_CMD_MAC_STATS_CMD_DMA, 0,
106 MC_CMD_MAC_STATS_CMD_CLEAR, clear,
107 MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE, 1,
108 MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE, 0,
109 MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR, 0);
110 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len); 103 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
111 104
112 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf), 105 rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index bd59302695b3..90359e644006 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -863,7 +863,7 @@
863 * bist output. The driver should only consume the BIST output 863 * bist output. The driver should only consume the BIST output
864 * after validating OUTLEN and PHY_CFG.PHY_TYPE. 864 * after validating OUTLEN and PHY_CFG.PHY_TYPE.
865 * 865 *
866 * If a driver can't succesfully parse the BIST output, it should 866 * If a driver can't successfully parse the BIST output, it should
867 * still respect the pass/Fail in OUT.RESULT 867 * still respect the pass/Fail in OUT.RESULT
868 * 868 *
869 * Locks required: PHY_LOCK if doing a PHY BIST 869 * Locks required: PHY_LOCK if doing a PHY BIST
@@ -872,7 +872,7 @@
872#define MC_CMD_POLL_BIST 0x26 872#define MC_CMD_POLL_BIST 0x26
873#define MC_CMD_POLL_BIST_IN_LEN 0 873#define MC_CMD_POLL_BIST_IN_LEN 0
874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN 874#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40 875#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8 876#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 877#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
878#define MC_CMD_POLL_BIST_RUNNING 1 878#define MC_CMD_POLL_BIST_RUNNING 1
@@ -882,15 +882,14 @@
882/* Generic: */ 882/* Generic: */
883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4 883#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
884/* SFT9001-specific: */ 884/* SFT9001-specific: */
885/* (offset 4 unused?) */ 885#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 4
886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8 886#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12 887#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16 888#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20 889#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24 890#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28 891#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32 892#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
893#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1 893#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
895#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2 894#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
896#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3 895#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
@@ -1054,9 +1053,13 @@
1054/* MC_CMD_PHY_STATS: 1053/* MC_CMD_PHY_STATS:
1055 * Get generic PHY statistics 1054 * Get generic PHY statistics
1056 * 1055 *
1057 * This call returns the statistics for a generic PHY, by direct DMA 1056 * This call returns the statistics for a generic PHY in a sparse
1058 * into host memory, in a sparse array (indexed by the enumerate). 1057 * array (indexed by the enumerate). Each value is represented by
1059 * Each value is represented by a 32bit number. 1058 * a 32bit number.
1059 *
1060 * If the DMA_ADDR is 0, then no DMA is performed, and the statistics
1061 * may be read directly out of shared memory. If DMA_ADDR != 0, then
1062 * the statistics are dmad to that (page-aligned location)
1060 * 1063 *
1061 * Locks required: None 1064 * Locks required: None
1062 * Returns: 0, ETIME 1065 * Returns: 0, ETIME
@@ -1066,7 +1069,8 @@
1066#define MC_CMD_PHY_STATS_IN_LEN 8 1069#define MC_CMD_PHY_STATS_IN_LEN 8
1067#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0 1070#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
1068#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4 1071#define MC_CMD_PHY_STATS_IN_DMA_ADDR_HI_OFST 4
1069#define MC_CMD_PHY_STATS_OUT_LEN 0 1072#define MC_CMD_PHY_STATS_OUT_DMA_LEN 0
1073#define MC_CMD_PHY_STATS_OUT_NO_DMA_LEN (MC_CMD_PHY_NSTATS * 4)
1070 1074
1071/* Unified MAC statistics enumeration */ 1075/* Unified MAC statistics enumeration */
1072#define MC_CMD_MAC_GENERATION_START 0 1076#define MC_CMD_MAC_GENERATION_START 0
@@ -1158,11 +1162,13 @@
1158#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1 1162#define MC_CMD_MAC_STATS_CMD_CLEAR_WIDTH 1
1159#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2 1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_LBN 2
1160#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1 1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CHANGE_WIDTH 1
1161/* Fields only relevent when PERIODIC_CHANGE is set */ 1165/* Remaining PERIOD* fields only relevent when PERIODIC_CHANGE is set */
1162#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3 1166#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_LBN 3
1163#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1 1167#define MC_CMD_MAC_STATS_CMD_PERIODIC_ENABLE_WIDTH 1
1164#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4 1168#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_LBN 4
1165#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1 1169#define MC_CMD_MAC_STATS_CMD_PERIODIC_CLEAR_WIDTH 1
1170#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_LBN 5
1171#define MC_CMD_MAC_STATS_CMD_PERIODIC_NOEVENT_WIDTH 1
1166#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16 1172#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_LBN 16
1167#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16 1173#define MC_CMD_MAC_STATS_CMD_PERIOD_MS_WIDTH 16
1168#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12 1174#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
@@ -1729,6 +1735,39 @@
1729#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */ 1735#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
1730#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */ 1736#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
1731 1737
1738/* MC_CMD_TEST_HACK: (debug (unsurprisingly))
1739 * Change bits of network port state for test purposes in ways that would never be
1740 * useful in normal operation and so need a special command to change. */
1741#define MC_CMD_TEST_HACK 0x2f
1742#define MC_CMD_TEST_HACK_IN_LEN 8
1743#define MC_CMD_TEST_HACK_IN_TXPAD_OFST 0
1744#define MC_CMD_TEST_HACK_IN_TXPAD_AUTO 0 /* Let the MC manage things */
1745#define MC_CMD_TEST_HACK_IN_TXPAD_ON 1 /* Force on */
1746#define MC_CMD_TEST_HACK_IN_TXPAD_OFF 2 /* Force on */
1747#define MC_CMD_TEST_HACK_IN_IPG_OFST 4 /* Takes a value in bits */
1748#define MC_CMD_TEST_HACK_IN_IPG_AUTO 0 /* The MC picks the value */
1749#define MC_CMD_TEST_HACK_OUT_LEN 0
1750
1751/* MC_CMD_SENSOR_SET_LIMS: (debug) (mostly) adjust the sensor limits. This
1752 * is a warranty-voiding operation.
1753 *
1754 * IN: sensor identifier (one of the enumeration starting with MC_CMD_SENSOR_CONTROLLER_TEMP
1755 * followed by 4 32-bit values: min(warning) max(warning), min(fatal), max(fatal). Which
1756 * of these limits are meaningful and what their interpretation is is sensor-specific.
1757 *
1758 * OUT: nothing
1759 *
1760 * Returns: ENOENT if the sensor specified does not exist, EINVAL if the limits are
1761 * out of range.
1762 */
1763#define MC_CMD_SENSOR_SET_LIMS 0x4e
1764#define MC_CMD_SENSOR_SET_LIMS_IN_LEN 20
1765#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
1766#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
1767#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
1768#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
1769#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
1770
1732/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be 1771/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
1733 * used for post-3.0 extensions. If you run out of space, look for gaps or 1772 * used for post-3.0 extensions. If you run out of space, look for gaps or
1734 * commands that are unused in the existing range. */ 1773 * commands that are unused in the existing range. */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index 2f2354696663..6032c0e1f1f8 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -17,6 +17,8 @@
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h" 19#include "mdio_10g.h"
20#include "nic.h"
21#include "selftest.h"
20 22
21struct efx_mcdi_phy_cfg { 23struct efx_mcdi_phy_cfg {
22 u32 flags; 24 u32 flags;
@@ -48,7 +50,7 @@ efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_cfg *cfg)
48 goto fail; 50 goto fail;
49 51
50 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) { 52 if (outlen < MC_CMD_GET_PHY_CFG_OUT_LEN) {
51 rc = -EMSGSIZE; 53 rc = -EIO;
52 goto fail; 54 goto fail;
53 } 55 }
54 56
@@ -111,7 +113,7 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
111 goto fail; 113 goto fail;
112 114
113 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) { 115 if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
114 rc = -EMSGSIZE; 116 rc = -EIO;
115 goto fail; 117 goto fail;
116 } 118 }
117 119
@@ -587,13 +589,153 @@ static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
587 return rc; 589 return rc;
588 590
589 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN) 591 if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
590 return -EMSGSIZE; 592 return -EIO;
591 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK) 593 if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
592 return -EINVAL; 594 return -EINVAL;
593 595
594 return 0; 596 return 0;
595} 597}
596 598
599static const char *const mcdi_sft9001_cable_diag_names[] = {
600 "cable.pairA.length",
601 "cable.pairB.length",
602 "cable.pairC.length",
603 "cable.pairD.length",
604 "cable.pairA.status",
605 "cable.pairB.status",
606 "cable.pairC.status",
607 "cable.pairD.status",
608};
609
610static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
611 int *results)
612{
613 unsigned int retry, i, count = 0;
614 size_t outlen;
615 u32 status;
616 u8 *buf, *ptr;
617 int rc;
618
619 buf = kzalloc(0x100, GFP_KERNEL);
620 if (buf == NULL)
621 return -ENOMEM;
622
623 BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
624 MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
625 rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
626 NULL, 0, NULL);
627 if (rc)
628 goto out;
629
630 /* Wait up to 10s for BIST to finish */
631 for (retry = 0; retry < 100; ++retry) {
632 BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
633 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
634 buf, 0x100, &outlen);
635 if (rc)
636 goto out;
637
638 status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
639 if (status != MC_CMD_POLL_BIST_RUNNING)
640 goto finished;
641
642 msleep(100);
643 }
644
645 rc = -ETIMEDOUT;
646 goto out;
647
648finished:
649 results[count++] = (status == MC_CMD_POLL_BIST_PASSED) ? 1 : -1;
650
651 /* SFT9001 specific cable diagnostics output */
652 if (efx->phy_type == PHY_TYPE_SFT9001B &&
653 (bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
654 bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
655 ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
656 if (status == MC_CMD_POLL_BIST_PASSED &&
657 outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
658 for (i = 0; i < 8; i++) {
659 results[count + i] =
660 EFX_DWORD_FIELD(((efx_dword_t *)ptr)[i],
661 EFX_DWORD_0);
662 }
663 }
664 count += 8;
665 }
666 rc = count;
667
668out:
669 kfree(buf);
670
671 return rc;
672}
673
674static int efx_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
675 unsigned flags)
676{
677 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
678 u32 mode;
679 int rc;
680
681 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
682 rc = efx_mcdi_bist(efx, MC_CMD_PHY_BIST, results);
683 if (rc < 0)
684 return rc;
685
686 results += rc;
687 }
688
689 /* If we support both LONG and SHORT, then run each in response to
690 * break or not. Otherwise, run the one we support */
691 mode = 0;
692 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN)) {
693 if ((flags & ETH_TEST_FL_OFFLINE) &&
694 (phy_cfg->flags &
695 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN)))
696 mode = MC_CMD_PHY_BIST_CABLE_LONG;
697 else
698 mode = MC_CMD_PHY_BIST_CABLE_SHORT;
699 } else if (phy_cfg->flags &
700 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))
701 mode = MC_CMD_PHY_BIST_CABLE_LONG;
702
703 if (mode != 0) {
704 rc = efx_mcdi_bist(efx, mode, results);
705 if (rc < 0)
706 return rc;
707 results += rc;
708 }
709
710 return 0;
711}
712
713const char *efx_mcdi_phy_test_name(struct efx_nic *efx, unsigned int index)
714{
715 struct efx_mcdi_phy_cfg *phy_cfg = efx->phy_data;
716
717 if (phy_cfg->flags & (1 << MC_CMD_GET_PHY_CFG_BIST_LBN)) {
718 if (index == 0)
719 return "bist";
720 --index;
721 }
722
723 if (phy_cfg->flags & ((1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN) |
724 (1 << MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN))) {
725 if (index == 0)
726 return "cable";
727 --index;
728
729 if (efx->phy_type == PHY_TYPE_SFT9001B) {
730 if (index < ARRAY_SIZE(mcdi_sft9001_cable_diag_names))
731 return mcdi_sft9001_cable_diag_names[index];
732 index -= ARRAY_SIZE(mcdi_sft9001_cable_diag_names);
733 }
734 }
735
736 return NULL;
737}
738
597struct efx_phy_operations efx_mcdi_phy_ops = { 739struct efx_phy_operations efx_mcdi_phy_ops = {
598 .probe = efx_mcdi_phy_probe, 740 .probe = efx_mcdi_phy_probe,
599 .init = efx_port_dummy_op_int, 741 .init = efx_port_dummy_op_int,
@@ -604,6 +746,6 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
604 .get_settings = efx_mcdi_phy_get_settings, 746 .get_settings = efx_mcdi_phy_get_settings,
605 .set_settings = efx_mcdi_phy_set_settings, 747 .set_settings = efx_mcdi_phy_set_settings,
606 .test_alive = efx_mcdi_phy_test_alive, 748 .test_alive = efx_mcdi_phy_test_alive,
607 .run_tests = NULL, 749 .run_tests = efx_mcdi_phy_run_tests,
608 .test_name = NULL, 750 .test_name = efx_mcdi_phy_test_name,
609}; 751};
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index cb018e272097..2e6fd89f2a72 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -85,9 +85,13 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
85#define EFX_MAX_CHANNELS 32 85#define EFX_MAX_CHANNELS 32
86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS 86#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
87 87
88#define EFX_TX_QUEUE_OFFLOAD_CSUM 0 88/* Checksum generation is a per-queue option in hardware, so each
89#define EFX_TX_QUEUE_NO_CSUM 1 89 * queue visible to the networking core is backed by two hardware TX
90#define EFX_TX_QUEUE_COUNT 2 90 * queues. */
91#define EFX_MAX_CORE_TX_QUEUES EFX_MAX_CHANNELS
92#define EFX_TXQ_TYPE_OFFLOAD 1
93#define EFX_TXQ_TYPES 2
94#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CORE_TX_QUEUES)
91 95
92/** 96/**
93 * struct efx_special_buffer - An Efx special buffer 97 * struct efx_special_buffer - An Efx special buffer
@@ -187,7 +191,7 @@ struct efx_tx_buffer {
187struct efx_tx_queue { 191struct efx_tx_queue {
188 /* Members which don't change on the fast path */ 192 /* Members which don't change on the fast path */
189 struct efx_nic *efx ____cacheline_aligned_in_smp; 193 struct efx_nic *efx ____cacheline_aligned_in_smp;
190 int queue; 194 unsigned queue;
191 struct efx_channel *channel; 195 struct efx_channel *channel;
192 struct efx_nic *nic; 196 struct efx_nic *nic;
193 struct efx_tx_buffer *buffer; 197 struct efx_tx_buffer *buffer;
@@ -306,11 +310,6 @@ struct efx_buffer {
306}; 310};
307 311
308 312
309/* Flags for channel->used_flags */
310#define EFX_USED_BY_RX 1
311#define EFX_USED_BY_TX 2
312#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
313
314enum efx_rx_alloc_method { 313enum efx_rx_alloc_method {
315 RX_ALLOC_METHOD_AUTO = 0, 314 RX_ALLOC_METHOD_AUTO = 0,
316 RX_ALLOC_METHOD_SKB = 1, 315 RX_ALLOC_METHOD_SKB = 1,
@@ -327,7 +326,6 @@ enum efx_rx_alloc_method {
327 * @efx: Associated Efx NIC 326 * @efx: Associated Efx NIC
328 * @channel: Channel instance number 327 * @channel: Channel instance number
329 * @name: Name for channel and IRQ 328 * @name: Name for channel and IRQ
330 * @used_flags: Channel is used by net driver
331 * @enabled: Channel enabled indicator 329 * @enabled: Channel enabled indicator
332 * @irq: IRQ number (MSI and MSI-X only) 330 * @irq: IRQ number (MSI and MSI-X only)
333 * @irq_moderation: IRQ moderation value (in hardware ticks) 331 * @irq_moderation: IRQ moderation value (in hardware ticks)
@@ -352,12 +350,14 @@ enum efx_rx_alloc_method {
352 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors 350 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
353 * @n_rx_overlength: Count of RX_OVERLENGTH errors 351 * @n_rx_overlength: Count of RX_OVERLENGTH errors
354 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 352 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
353 * @tx_queue: Pointer to first TX queue, or %NULL if not used for TX
354 * @tx_stop_count: Core TX queue stop count
355 * @tx_stop_lock: Core TX queue stop lock
355 */ 356 */
356struct efx_channel { 357struct efx_channel {
357 struct efx_nic *efx; 358 struct efx_nic *efx;
358 int channel; 359 int channel;
359 char name[IFNAMSIZ + 6]; 360 char name[IFNAMSIZ + 6];
360 int used_flags;
361 bool enabled; 361 bool enabled;
362 int irq; 362 int irq;
363 unsigned int irq_moderation; 363 unsigned int irq_moderation;
@@ -389,6 +389,9 @@ struct efx_channel {
389 struct efx_rx_buffer *rx_pkt; 389 struct efx_rx_buffer *rx_pkt;
390 bool rx_pkt_csummed; 390 bool rx_pkt_csummed;
391 391
392 struct efx_tx_queue *tx_queue;
393 atomic_t tx_stop_count;
394 spinlock_t tx_stop_lock;
392}; 395};
393 396
394enum efx_led_mode { 397enum efx_led_mode {
@@ -661,8 +664,9 @@ union efx_multicast_hash {
661 * @rx_queue: RX DMA queues 664 * @rx_queue: RX DMA queues
662 * @channel: Channels 665 * @channel: Channels
663 * @next_buffer_table: First available buffer table id 666 * @next_buffer_table: First available buffer table id
664 * @n_rx_queues: Number of RX queues
665 * @n_channels: Number of channels in use 667 * @n_channels: Number of channels in use
668 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
669 * @n_tx_channels: Number of channels used for TX
666 * @rx_buffer_len: RX buffer length 670 * @rx_buffer_len: RX buffer length
667 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 671 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
668 * @int_error_count: Number of internal errors seen recently 672 * @int_error_count: Number of internal errors seen recently
@@ -672,6 +676,8 @@ union efx_multicast_hash {
672 * This register is written with the SMP processor ID whenever an 676 * This register is written with the SMP processor ID whenever an
673 * interrupt is handled. It is used by efx_nic_test_interrupt() 677 * interrupt is handled. It is used by efx_nic_test_interrupt()
674 * to verify that an interrupt has occurred. 678 * to verify that an interrupt has occurred.
679 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
680 * @fatal_irq_level: IRQ level (bit number) used for serious errors
675 * @spi_flash: SPI flash device 681 * @spi_flash: SPI flash device
676 * This field will be %NULL if no flash device is present (or for Siena). 682 * This field will be %NULL if no flash device is present (or for Siena).
677 * @spi_eeprom: SPI EEPROM device 683 * @spi_eeprom: SPI EEPROM device
@@ -691,8 +697,6 @@ union efx_multicast_hash {
691 * @port_initialized: Port initialized? 697 * @port_initialized: Port initialized?
692 * @net_dev: Operating system network device. Consider holding the rtnl lock 698 * @net_dev: Operating system network device. Consider holding the rtnl lock
693 * @rx_checksum_enabled: RX checksumming enabled 699 * @rx_checksum_enabled: RX checksumming enabled
694 * @netif_stop_count: Port stop count
695 * @netif_stop_lock: Port stop lock
696 * @mac_stats: MAC statistics. These include all statistics the MACs 700 * @mac_stats: MAC statistics. These include all statistics the MACs
697 * can provide. Generic code converts these into a standard 701 * can provide. Generic code converts these into a standard
698 * &struct net_device_stats. 702 * &struct net_device_stats.
@@ -740,13 +744,14 @@ struct efx_nic {
740 enum nic_state state; 744 enum nic_state state;
741 enum reset_type reset_pending; 745 enum reset_type reset_pending;
742 746
743 struct efx_tx_queue tx_queue[EFX_TX_QUEUE_COUNT]; 747 struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
744 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 748 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
745 struct efx_channel channel[EFX_MAX_CHANNELS]; 749 struct efx_channel channel[EFX_MAX_CHANNELS];
746 750
747 unsigned next_buffer_table; 751 unsigned next_buffer_table;
748 int n_rx_queues; 752 unsigned n_channels;
749 int n_channels; 753 unsigned n_rx_channels;
754 unsigned n_tx_channels;
750 unsigned int rx_buffer_len; 755 unsigned int rx_buffer_len;
751 unsigned int rx_buffer_order; 756 unsigned int rx_buffer_order;
752 757
@@ -755,7 +760,8 @@ struct efx_nic {
755 760
756 struct efx_buffer irq_status; 761 struct efx_buffer irq_status;
757 volatile signed int last_irq_cpu; 762 volatile signed int last_irq_cpu;
758 unsigned long irq_zero_count; 763 unsigned irq_zero_count;
764 unsigned fatal_irq_level;
759 765
760 struct efx_spi_device *spi_flash; 766 struct efx_spi_device *spi_flash;
761 struct efx_spi_device *spi_eeprom; 767 struct efx_spi_device *spi_eeprom;
@@ -777,9 +783,6 @@ struct efx_nic {
777 struct net_device *net_dev; 783 struct net_device *net_dev;
778 bool rx_checksum_enabled; 784 bool rx_checksum_enabled;
779 785
780 atomic_t netif_stop_count;
781 spinlock_t netif_stop_lock;
782
783 struct efx_mac_stats mac_stats; 786 struct efx_mac_stats mac_stats;
784 struct efx_buffer stats_buffer; 787 struct efx_buffer stats_buffer;
785 spinlock_t stats_lock; 788 spinlock_t stats_lock;
@@ -924,40 +927,35 @@ struct efx_nic_type {
924 927
925/* Iterate over all used channels */ 928/* Iterate over all used channels */
926#define efx_for_each_channel(_channel, _efx) \ 929#define efx_for_each_channel(_channel, _efx) \
927 for (_channel = &_efx->channel[0]; \ 930 for (_channel = &((_efx)->channel[0]); \
928 _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ 931 _channel < &((_efx)->channel[(efx)->n_channels]); \
929 _channel++) \ 932 _channel++)
930 if (!_channel->used_flags) \
931 continue; \
932 else
933 933
934/* Iterate over all used TX queues */ 934/* Iterate over all used TX queues */
935#define efx_for_each_tx_queue(_tx_queue, _efx) \ 935#define efx_for_each_tx_queue(_tx_queue, _efx) \
936 for (_tx_queue = &_efx->tx_queue[0]; \ 936 for (_tx_queue = &((_efx)->tx_queue[0]); \
937 _tx_queue < &_efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 937 _tx_queue < &((_efx)->tx_queue[EFX_TXQ_TYPES * \
938 (_efx)->n_tx_channels]); \
938 _tx_queue++) 939 _tx_queue++)
939 940
940/* Iterate over all TX queues belonging to a channel */ 941/* Iterate over all TX queues belonging to a channel */
941#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ 942#define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
942 for (_tx_queue = &_channel->efx->tx_queue[0]; \ 943 for (_tx_queue = (_channel)->tx_queue; \
943 _tx_queue < &_channel->efx->tx_queue[EFX_TX_QUEUE_COUNT]; \ 944 _tx_queue && _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
944 _tx_queue++) \ 945 _tx_queue++)
945 if (_tx_queue->channel != _channel) \
946 continue; \
947 else
948 946
949/* Iterate over all used RX queues */ 947/* Iterate over all used RX queues */
950#define efx_for_each_rx_queue(_rx_queue, _efx) \ 948#define efx_for_each_rx_queue(_rx_queue, _efx) \
951 for (_rx_queue = &_efx->rx_queue[0]; \ 949 for (_rx_queue = &((_efx)->rx_queue[0]); \
952 _rx_queue < &_efx->rx_queue[_efx->n_rx_queues]; \ 950 _rx_queue < &((_efx)->rx_queue[(_efx)->n_rx_channels]); \
953 _rx_queue++) 951 _rx_queue++)
954 952
955/* Iterate over all RX queues belonging to a channel */ 953/* Iterate over all RX queues belonging to a channel */
956#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ 954#define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
957 for (_rx_queue = &_channel->efx->rx_queue[_channel->channel]; \ 955 for (_rx_queue = &((_channel)->efx->rx_queue[(_channel)->channel]); \
958 _rx_queue; \ 956 _rx_queue; \
959 _rx_queue = NULL) \ 957 _rx_queue = NULL) \
960 if (_rx_queue->channel != _channel) \ 958 if (_rx_queue->channel != (_channel)) \
961 continue; \ 959 continue; \
962 else 960 else
963 961
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index b06f8e348307..5d3aaec58556 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -418,7 +418,7 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
418 FRF_BZ_TX_NON_IP_DROP_DIS, 1); 418 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
419 419
420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 420 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
421 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 421 int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum); 422 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS, 423 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
424 !csum); 424 !csum);
@@ -431,10 +431,10 @@ void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
431 efx_oword_t reg; 431 efx_oword_t reg;
432 432
433 /* Only 128 bits in this register */ 433 /* Only 128 bits in this register */
434 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 434 BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
435 435
436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG); 436 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
437 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 437 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
438 clear_bit_le(tx_queue->queue, (void *)&reg); 438 clear_bit_le(tx_queue->queue, (void *)&reg);
439 else 439 else
440 set_bit_le(tx_queue->queue, (void *)&reg); 440 set_bit_le(tx_queue->queue, (void *)&reg);
@@ -654,22 +654,23 @@ void efx_generate_event(struct efx_channel *channel, efx_qword_t *event)
654 * The NIC batches TX completion events; the message we receive is of 654 * The NIC batches TX completion events; the message we receive is of
655 * the form "complete all TX events up to this index". 655 * the form "complete all TX events up to this index".
656 */ 656 */
657static void 657static int
658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) 658efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
659{ 659{
660 unsigned int tx_ev_desc_ptr; 660 unsigned int tx_ev_desc_ptr;
661 unsigned int tx_ev_q_label; 661 unsigned int tx_ev_q_label;
662 struct efx_tx_queue *tx_queue; 662 struct efx_tx_queue *tx_queue;
663 struct efx_nic *efx = channel->efx; 663 struct efx_nic *efx = channel->efx;
664 int tx_packets = 0;
664 665
665 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { 666 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
666 /* Transmit completion */ 667 /* Transmit completion */
667 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR); 668 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
668 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL); 669 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
669 tx_queue = &efx->tx_queue[tx_ev_q_label]; 670 tx_queue = &efx->tx_queue[tx_ev_q_label];
670 channel->irq_mod_score += 671 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
671 (tx_ev_desc_ptr - tx_queue->read_count) & 672 EFX_TXQ_MASK);
672 EFX_TXQ_MASK; 673 channel->irq_mod_score += tx_packets;
673 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 674 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
674 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) { 675 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
675 /* Rewrite the FIFO write pointer */ 676 /* Rewrite the FIFO write pointer */
@@ -689,6 +690,8 @@ efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
689 EFX_QWORD_FMT"\n", channel->channel, 690 EFX_QWORD_FMT"\n", channel->channel,
690 EFX_QWORD_VAL(*event)); 691 EFX_QWORD_VAL(*event));
691 } 692 }
693
694 return tx_packets;
692} 695}
693 696
694/* Detect errors included in the rx_evt_pkt_ok bit. */ 697/* Detect errors included in the rx_evt_pkt_ok bit. */
@@ -947,16 +950,17 @@ efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
947 } 950 }
948} 951}
949 952
950int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota) 953int efx_nic_process_eventq(struct efx_channel *channel, int budget)
951{ 954{
952 unsigned int read_ptr; 955 unsigned int read_ptr;
953 efx_qword_t event, *p_event; 956 efx_qword_t event, *p_event;
954 int ev_code; 957 int ev_code;
955 int rx_packets = 0; 958 int tx_packets = 0;
959 int spent = 0;
956 960
957 read_ptr = channel->eventq_read_ptr; 961 read_ptr = channel->eventq_read_ptr;
958 962
959 do { 963 for (;;) {
960 p_event = efx_event(channel, read_ptr); 964 p_event = efx_event(channel, read_ptr);
961 event = *p_event; 965 event = *p_event;
962 966
@@ -970,15 +974,23 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
970 /* Clear this event by marking it all ones */ 974 /* Clear this event by marking it all ones */
971 EFX_SET_QWORD(*p_event); 975 EFX_SET_QWORD(*p_event);
972 976
977 /* Increment read pointer */
978 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
979
973 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE); 980 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
974 981
975 switch (ev_code) { 982 switch (ev_code) {
976 case FSE_AZ_EV_CODE_RX_EV: 983 case FSE_AZ_EV_CODE_RX_EV:
977 efx_handle_rx_event(channel, &event); 984 efx_handle_rx_event(channel, &event);
978 ++rx_packets; 985 if (++spent == budget)
986 goto out;
979 break; 987 break;
980 case FSE_AZ_EV_CODE_TX_EV: 988 case FSE_AZ_EV_CODE_TX_EV:
981 efx_handle_tx_event(channel, &event); 989 tx_packets += efx_handle_tx_event(channel, &event);
990 if (tx_packets >= EFX_TXQ_SIZE) {
991 spent = budget;
992 goto out;
993 }
982 break; 994 break;
983 case FSE_AZ_EV_CODE_DRV_GEN_EV: 995 case FSE_AZ_EV_CODE_DRV_GEN_EV:
984 channel->eventq_magic = EFX_QWORD_FIELD( 996 channel->eventq_magic = EFX_QWORD_FIELD(
@@ -1001,14 +1013,11 @@ int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota)
1001 " (data " EFX_QWORD_FMT ")\n", channel->channel, 1013 " (data " EFX_QWORD_FMT ")\n", channel->channel,
1002 ev_code, EFX_QWORD_VAL(event)); 1014 ev_code, EFX_QWORD_VAL(event));
1003 } 1015 }
1016 }
1004 1017
1005 /* Increment read pointer */ 1018out:
1006 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1007
1008 } while (rx_packets < rx_quota);
1009
1010 channel->eventq_read_ptr = read_ptr; 1019 channel->eventq_read_ptr = read_ptr;
1011 return rx_packets; 1020 return spent;
1012} 1021}
1013 1022
1014 1023
@@ -1123,7 +1132,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1123 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) { 1132 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1124 ev_queue = EFX_QWORD_FIELD(*event, 1133 ev_queue = EFX_QWORD_FIELD(*event,
1125 FSF_AZ_DRIVER_EV_SUBDATA); 1134 FSF_AZ_DRIVER_EV_SUBDATA);
1126 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1135 if (ev_queue < EFX_TXQ_TYPES * efx->n_tx_channels) {
1127 tx_queue = efx->tx_queue + ev_queue; 1136 tx_queue = efx->tx_queue + ev_queue;
1128 tx_queue->flushed = FLUSH_DONE; 1137 tx_queue->flushed = FLUSH_DONE;
1129 } 1138 }
@@ -1133,7 +1142,7 @@ static void efx_poll_flush_events(struct efx_nic *efx)
1133 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 1142 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1134 ev_failed = EFX_QWORD_FIELD( 1143 ev_failed = EFX_QWORD_FIELD(
1135 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 1144 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1136 if (ev_queue < efx->n_rx_queues) { 1145 if (ev_queue < efx->n_rx_channels) {
1137 rx_queue = efx->rx_queue + ev_queue; 1146 rx_queue = efx->rx_queue + ev_queue;
1138 rx_queue->flushed = 1147 rx_queue->flushed =
1139 ev_failed ? FLUSH_FAILED : FLUSH_DONE; 1148 ev_failed ? FLUSH_FAILED : FLUSH_DONE;
@@ -1229,15 +1238,9 @@ static inline void efx_nic_interrupts(struct efx_nic *efx,
1229 bool enabled, bool force) 1238 bool enabled, bool force)
1230{ 1239{
1231 efx_oword_t int_en_reg_ker; 1240 efx_oword_t int_en_reg_ker;
1232 unsigned int level = 0;
1233
1234 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1235 /* Set the level always even if we're generating a test
1236 * interrupt, because our legacy interrupt handler is safe */
1237 level = 0x1f;
1238 1241
1239 EFX_POPULATE_OWORD_3(int_en_reg_ker, 1242 EFX_POPULATE_OWORD_3(int_en_reg_ker,
1240 FRF_AZ_KER_INT_LEVE_SEL, level, 1243 FRF_AZ_KER_INT_LEVE_SEL, efx->fatal_irq_level,
1241 FRF_AZ_KER_INT_KER, force, 1244 FRF_AZ_KER_INT_KER, force,
1242 FRF_AZ_DRV_INT_EN_KER, enabled); 1245 FRF_AZ_DRV_INT_EN_KER, enabled);
1243 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER); 1246 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
@@ -1291,11 +1294,10 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1291 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1294 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
1292 EFX_OWORD_VAL(fatal_intr), 1295 EFX_OWORD_VAL(fatal_intr),
1293 error ? "disabling bus mastering" : "no recognised error"); 1296 error ? "disabling bus mastering" : "no recognised error");
1294 if (error == 0)
1295 goto out;
1296 1297
1297 /* If this is a memory parity error dump which blocks are offending */ 1298 /* If this is a memory parity error dump which blocks are offending */
1298 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER); 1299 mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1300 EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1299 if (mem_perr) { 1301 if (mem_perr) {
1300 efx_oword_t reg; 1302 efx_oword_t reg;
1301 efx_reado(efx, &reg, FR_AZ_MEM_STAT); 1303 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
@@ -1324,7 +1326,7 @@ irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
1324 "NIC will be disabled\n"); 1326 "NIC will be disabled\n");
1325 efx_schedule_reset(efx, RESET_TYPE_DISABLE); 1327 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
1326 } 1328 }
1327out: 1329
1328 return IRQ_HANDLED; 1330 return IRQ_HANDLED;
1329} 1331}
1330 1332
@@ -1346,9 +1348,11 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1346 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1348 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1347 1349
1348 /* Check to see if we have a serious error condition */ 1350 /* Check to see if we have a serious error condition */
1349 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1351 if (queues & (1U << efx->fatal_irq_level)) {
1350 if (unlikely(syserr)) 1352 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1351 return efx_nic_fatal_interrupt(efx); 1353 if (unlikely(syserr))
1354 return efx_nic_fatal_interrupt(efx);
1355 }
1352 1356
1353 if (queues != 0) { 1357 if (queues != 0) {
1354 if (EFX_WORKAROUND_15783(efx)) 1358 if (EFX_WORKAROUND_15783(efx))
@@ -1362,33 +1366,28 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
1362 } 1366 }
1363 result = IRQ_HANDLED; 1367 result = IRQ_HANDLED;
1364 1368
1365 } else if (EFX_WORKAROUND_15783(efx) && 1369 } else if (EFX_WORKAROUND_15783(efx)) {
1366 efx->irq_zero_count++ == 0) {
1367 efx_qword_t *event; 1370 efx_qword_t *event;
1368 1371
1369 /* Ensure we rearm all event queues */ 1372 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1373 * because this might be a shared interrupt. */
1374 if (efx->irq_zero_count++ == 0)
1375 result = IRQ_HANDLED;
1376
1377 /* Ensure we schedule or rearm all event queues */
1370 efx_for_each_channel(channel, efx) { 1378 efx_for_each_channel(channel, efx) {
1371 event = efx_event(channel, channel->eventq_read_ptr); 1379 event = efx_event(channel, channel->eventq_read_ptr);
1372 if (efx_event_present(event)) 1380 if (efx_event_present(event))
1373 efx_schedule_channel(channel); 1381 efx_schedule_channel(channel);
1382 else
1383 efx_nic_eventq_read_ack(channel);
1374 } 1384 }
1375
1376 result = IRQ_HANDLED;
1377 } 1385 }
1378 1386
1379 if (result == IRQ_HANDLED) { 1387 if (result == IRQ_HANDLED) {
1380 efx->last_irq_cpu = raw_smp_processor_id(); 1388 efx->last_irq_cpu = raw_smp_processor_id();
1381 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", 1389 EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1382 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); 1390 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1383 } else if (EFX_WORKAROUND_15783(efx)) {
1384 /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
1385 * because this might be a shared interrupt, but we do need to
1386 * check the channel every time and preemptively rearm it if
1387 * it's idle. */
1388 efx_for_each_channel(channel, efx) {
1389 if (!channel->work_pending)
1390 efx_nic_eventq_read_ack(channel);
1391 }
1392 } 1391 }
1393 1392
1394 return result; 1393 return result;
@@ -1413,9 +1412,11 @@ static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
1413 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1412 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1414 1413
1415 /* Check to see if we have a serious error condition */ 1414 /* Check to see if we have a serious error condition */
1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); 1415 if (channel->channel == efx->fatal_irq_level) {
1417 if (unlikely(syserr)) 1416 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1418 return efx_nic_fatal_interrupt(efx); 1417 if (unlikely(syserr))
1418 return efx_nic_fatal_interrupt(efx);
1419 }
1419 1420
1420 /* Schedule processing of the channel */ 1421 /* Schedule processing of the channel */
1421 efx_schedule_channel(channel); 1422 efx_schedule_channel(channel);
@@ -1440,7 +1441,7 @@ static void efx_setup_rss_indir_table(struct efx_nic *efx)
1440 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800; 1441 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1441 offset += 0x10) { 1442 offset += 0x10) {
1442 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE, 1443 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1443 i % efx->n_rx_queues); 1444 i % efx->n_rx_channels);
1444 efx_writed(efx, &dword, offset); 1445 efx_writed(efx, &dword, offset);
1445 i++; 1446 i++;
1446 } 1447 }
@@ -1553,6 +1554,13 @@ void efx_nic_init_common(struct efx_nic *efx)
1553 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr); 1554 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1554 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER); 1555 efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1555 1556
1557 if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
1558 /* Use an interrupt level unused by event queues */
1559 efx->fatal_irq_level = 0x1f;
1560 else
1561 /* Use a valid MSI-X vector */
1562 efx->fatal_irq_level = 0;
1563
1556 /* Enable all the genuinely fatal interrupts. (They are still 1564 /* Enable all the genuinely fatal interrupts. (They are still
1557 * masked by the overall interrupt mask, controlled by 1565 * masked by the overall interrupt mask, controlled by
1558 * falcon_interrupts()). 1566 * falcon_interrupts()).
@@ -1563,6 +1571,8 @@ void efx_nic_init_common(struct efx_nic *efx)
1563 FRF_AZ_ILL_ADR_INT_KER_EN, 1, 1571 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1564 FRF_AZ_RBUF_OWN_INT_KER_EN, 1, 1572 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1565 FRF_AZ_TBUF_OWN_INT_KER_EN, 1); 1573 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1574 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
1575 EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
1566 EFX_INVERT_OWORD(temp); 1576 EFX_INVERT_OWORD(temp);
1567 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER); 1577 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1568 1578
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 9351c0331a47..bbc2c0c2f843 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -135,12 +135,14 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 135 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 136 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 137 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
138 */ 139 */
139struct siena_nic_data { 140struct siena_nic_data {
140 u64 fw_version; 141 u64 fw_version;
141 u32 fw_build; 142 u32 fw_build;
142 struct efx_mcdi_iface mcdi; 143 struct efx_mcdi_iface mcdi;
143 int wol_filter_id; 144 int wol_filter_id;
145 u8 ipv6_rss_key[40];
144}; 146};
145 147
146extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len); 148extern void siena_print_fwver(struct efx_nic *efx, char *buf, size_t len);
@@ -156,7 +158,7 @@ extern struct efx_nic_type siena_a0_nic_type;
156 ************************************************************************** 158 **************************************************************************
157 */ 159 */
158 160
159extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info); 161extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
160 162
161/* TX data path */ 163/* TX data path */
162extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue); 164extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
@@ -203,6 +205,7 @@ extern void falcon_irq_ack_a1(struct efx_nic *efx);
203extern int efx_nic_flush_queues(struct efx_nic *efx); 205extern int efx_nic_flush_queues(struct efx_nic *efx);
204extern void falcon_start_nic_stats(struct efx_nic *efx); 206extern void falcon_start_nic_stats(struct efx_nic *efx);
205extern void falcon_stop_nic_stats(struct efx_nic *efx); 207extern void falcon_stop_nic_stats(struct efx_nic *efx);
208extern void falcon_setup_xaui(struct efx_nic *efx);
206extern int falcon_reset_xaui(struct efx_nic *efx); 209extern int falcon_reset_xaui(struct efx_nic *efx);
207extern void efx_nic_init_common(struct efx_nic *efx); 210extern void efx_nic_init_common(struct efx_nic *efx);
208 211
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 0106b1d9aae2..371e86cc090f 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -616,10 +616,10 @@ static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
616 goto out; 616 goto out;
617 } 617 }
618 618
619 /* Test every TX queue */ 619 /* Test both types of TX queue */
620 efx_for_each_tx_queue(tx_queue, efx) { 620 efx_for_each_channel_tx_queue(tx_queue, &efx->channel[0]) {
621 state->offload_csum = (tx_queue->queue == 621 state->offload_csum = (tx_queue->queue &
622 EFX_TX_QUEUE_OFFLOAD_CSUM); 622 EFX_TXQ_TYPE_OFFLOAD);
623 rc = efx_test_loopback(tx_queue, 623 rc = efx_test_loopback(tx_queue,
624 &tests->loopback[mode]); 624 &tests->loopback[mode]);
625 if (rc) 625 if (rc)
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index 643bef72b99d..aed495a4dad7 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20struct efx_loopback_self_tests { 20struct efx_loopback_self_tests {
21 int tx_sent[EFX_TX_QUEUE_COUNT]; 21 int tx_sent[EFX_TXQ_TYPES];
22 int tx_done[EFX_TX_QUEUE_COUNT]; 22 int tx_done[EFX_TXQ_TYPES];
23 int rx_good; 23 int rx_good;
24 int rx_bad; 24 int rx_bad;
25}; 25};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 38dcc42c4f79..727b4228e081 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/random.h>
16#include "net_driver.h" 17#include "net_driver.h"
17#include "bitfield.h" 18#include "bitfield.h"
18#include "efx.h" 19#include "efx.h"
@@ -274,6 +275,9 @@ static int siena_probe_nic(struct efx_nic *efx)
274 goto fail5; 275 goto fail5;
275 } 276 }
276 277
278 get_random_bytes(&nic_data->ipv6_rss_key,
279 sizeof(nic_data->ipv6_rss_key));
280
277 return 0; 281 return 0;
278 282
279fail5: 283fail5:
@@ -293,6 +297,7 @@ fail1:
293 */ 297 */
294static int siena_init_nic(struct efx_nic *efx) 298static int siena_init_nic(struct efx_nic *efx)
295{ 299{
300 struct siena_nic_data *nic_data = efx->nic_data;
296 efx_oword_t temp; 301 efx_oword_t temp;
297 int rc; 302 int rc;
298 303
@@ -319,6 +324,20 @@ static int siena_init_nic(struct efx_nic *efx)
319 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1); 324 EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_INGR_EN, 1);
320 efx_writeo(efx, &temp, FR_AZ_RX_CFG); 325 efx_writeo(efx, &temp, FR_AZ_RX_CFG);
321 326
327 /* Enable IPv6 RSS */
328 BUILD_BUG_ON(sizeof(nic_data->ipv6_rss_key) !=
329 2 * sizeof(temp) + FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8 ||
330 FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN != 0);
331 memcpy(&temp, nic_data->ipv6_rss_key, sizeof(temp));
332 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
333 memcpy(&temp, nic_data->ipv6_rss_key + sizeof(temp), sizeof(temp));
334 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
335 EFX_POPULATE_OWORD_2(temp, FRF_CZ_RX_RSS_IPV6_THASH_ENABLE, 1,
336 FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE, 1);
337 memcpy(&temp, nic_data->ipv6_rss_key + 2 * sizeof(temp),
338 FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
339 efx_writeo(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
340
322 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0) 341 if (efx_nic_rx_xoff_thresh >= 0 || efx_nic_rx_xon_thresh >= 0)
323 /* No MCDI operation has been defined to set thresholds */ 342 /* No MCDI operation has been defined to set thresholds */
324 EFX_ERR(efx, "ignoring RX flow control thresholds\n"); 343 EFX_ERR(efx, "ignoring RX flow control thresholds\n");
@@ -456,8 +475,17 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
456 475
457static void siena_update_nic_stats(struct efx_nic *efx) 476static void siena_update_nic_stats(struct efx_nic *efx)
458{ 477{
459 while (siena_try_update_nic_stats(efx) == -EAGAIN) 478 int retry;
460 cpu_relax(); 479
480 /* If we're unlucky enough to read statistics wduring the DMA, wait
481 * up to 10ms for it to finish (typically takes <500us) */
482 for (retry = 0; retry < 100; ++retry) {
483 if (siena_try_update_nic_stats(efx) == 0)
484 return;
485 udelay(100);
486 }
487
488 /* Use the old values instead */
461} 489}
462 490
463static void siena_start_nic_stats(struct efx_nic *efx) 491static void siena_start_nic_stats(struct efx_nic *efx)
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index be0e110a1f73..6bb12a87ef2d 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,32 +30,46 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u) 31#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
32 32
33/* We want to be able to nest calls to netif_stop_queue(), since each 33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * channel can have an individual stop on the queue. 34 * because of the 2 hardware queues associated with each core queue,
35 */ 35 * but also so that we can inhibit TX for reasons other than a full
36void efx_stop_queue(struct efx_nic *efx) 36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
37{ 38{
38 spin_lock_bh(&efx->netif_stop_lock); 39 struct efx_nic *efx = channel->efx;
40
41 if (!channel->tx_queue)
42 return;
43
44 spin_lock_bh(&channel->tx_stop_lock);
39 EFX_TRACE(efx, "stop TX queue\n"); 45 EFX_TRACE(efx, "stop TX queue\n");
40 46
41 atomic_inc(&efx->netif_stop_count); 47 atomic_inc(&channel->tx_stop_count);
42 netif_stop_queue(efx->net_dev); 48 netif_tx_stop_queue(
49 netdev_get_tx_queue(
50 efx->net_dev,
51 channel->tx_queue->queue / EFX_TXQ_TYPES));
43 52
44 spin_unlock_bh(&efx->netif_stop_lock); 53 spin_unlock_bh(&channel->tx_stop_lock);
45} 54}
46 55
47/* Wake netif's TX queue 56/* Decrement core TX queue stop count and wake it if the count is 0 */
48 * We want to be able to nest calls to netif_stop_queue(), since each 57void efx_wake_queue(struct efx_channel *channel)
49 * channel can have an individual stop on the queue.
50 */
51void efx_wake_queue(struct efx_nic *efx)
52{ 58{
59 struct efx_nic *efx = channel->efx;
60
61 if (!channel->tx_queue)
62 return;
63
53 local_bh_disable(); 64 local_bh_disable();
54 if (atomic_dec_and_lock(&efx->netif_stop_count, 65 if (atomic_dec_and_lock(&channel->tx_stop_count,
55 &efx->netif_stop_lock)) { 66 &channel->tx_stop_lock)) {
56 EFX_TRACE(efx, "waking TX queue\n"); 67 EFX_TRACE(efx, "waking TX queue\n");
57 netif_wake_queue(efx->net_dev); 68 netif_tx_wake_queue(
58 spin_unlock(&efx->netif_stop_lock); 69 netdev_get_tx_queue(
70 efx->net_dev,
71 channel->tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
59 } 73 }
60 local_bh_enable(); 74 local_bh_enable();
61} 75}
@@ -298,7 +312,7 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
298 rc = NETDEV_TX_BUSY; 312 rc = NETDEV_TX_BUSY;
299 313
300 if (tx_queue->stopped == 1) 314 if (tx_queue->stopped == 1)
301 efx_stop_queue(efx); 315 efx_stop_queue(tx_queue->channel);
302 316
303 unwind: 317 unwind:
304 /* Work backwards until we hit the original insert pointer value */ 318 /* Work backwards until we hit the original insert pointer value */
@@ -374,10 +388,9 @@ netdev_tx_t efx_hard_start_xmit(struct sk_buff *skb,
374 if (unlikely(efx->port_inhibited)) 388 if (unlikely(efx->port_inhibited))
375 return NETDEV_TX_BUSY; 389 return NETDEV_TX_BUSY;
376 390
391 tx_queue = &efx->tx_queue[EFX_TXQ_TYPES * skb_get_queue_mapping(skb)];
377 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) 392 if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
378 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_OFFLOAD_CSUM]; 393 tx_queue += EFX_TXQ_TYPE_OFFLOAD;
379 else
380 tx_queue = &efx->tx_queue[EFX_TX_QUEUE_NO_CSUM];
381 394
382 return efx_enqueue_skb(tx_queue, skb); 395 return efx_enqueue_skb(tx_queue, skb);
383} 396}
@@ -405,7 +418,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
405 netif_tx_lock(efx->net_dev); 418 netif_tx_lock(efx->net_dev);
406 if (tx_queue->stopped) { 419 if (tx_queue->stopped) {
407 tx_queue->stopped = 0; 420 tx_queue->stopped = 0;
408 efx_wake_queue(efx); 421 efx_wake_queue(tx_queue->channel);
409 } 422 }
410 netif_tx_unlock(efx->net_dev); 423 netif_tx_unlock(efx->net_dev);
411 } 424 }
@@ -488,7 +501,7 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
488 /* Release queue's stop on port, if any */ 501 /* Release queue's stop on port, if any */
489 if (tx_queue->stopped) { 502 if (tx_queue->stopped) {
490 tx_queue->stopped = 0; 503 tx_queue->stopped = 0;
491 efx_wake_queue(tx_queue->efx); 504 efx_wake_queue(tx_queue->channel);
492 } 505 }
493} 506}
494 507
@@ -1120,7 +1133,7 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1120 1133
1121 /* Stop the queue if it wasn't stopped before. */ 1134 /* Stop the queue if it wasn't stopped before. */
1122 if (tx_queue->stopped == 1) 1135 if (tx_queue->stopped == 1)
1123 efx_stop_queue(efx); 1136 efx_stop_queue(tx_queue->channel);
1124 1137
1125 unwind: 1138 unwind:
1126 /* Free the DMA mapping we were in the process of writing out */ 1139 /* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index acd9c734e483..518f7fc91473 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -37,7 +37,7 @@
37/* Truncated IPv4 packets can confuse the TX packet parser */ 37/* Truncated IPv4 packets can confuse the TX packet parser */
38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB 38#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
39/* Legacy ISR read can return zero once */ 39/* Legacy ISR read can return zero once */
40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_SIENA 40#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
41/* Legacy interrupt storm when interrupt fifo fills */ 41/* Legacy interrupt storm when interrupt fifo fills */
42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA 42#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
43 43
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index c8fc896fc460..cc4bd8c65f8b 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -574,7 +574,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
574 if (err) 574 if (err)
575 return err; 575 return err;
576 576
577 dev->trans_start = jiffies; 577 dev->trans_start = jiffies; /* prevent tx timeout */
578 netif_wake_queue(dev); 578 netif_wake_queue(dev);
579 579
580 return 0; 580 return 0;
@@ -638,8 +638,6 @@ static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
638 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE)) 638 if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
639 kick_tx(dev, sp, hregs); 639 kick_tx(dev, sp, hregs);
640 640
641 dev->trans_start = jiffies;
642
643 if (!TX_BUFFS_AVAIL(sp)) 641 if (!TX_BUFFS_AVAIL(sp))
644 netif_stop_queue(dev); 642 netif_stop_queue(dev);
645 spin_unlock_irqrestore(&sp->tx_lock, flags); 643 spin_unlock_irqrestore(&sp->tx_lock, flags);
@@ -652,7 +650,7 @@ static void timeout(struct net_device *dev)
652 printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name); 650 printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
653 sgiseeq_reset(dev); 651 sgiseeq_reset(dev);
654 652
655 dev->trans_start = jiffies; 653 dev->trans_start = jiffies; /* prevent tx timeout */
656 netif_wake_queue(dev); 654 netif_wake_queue(dev);
657} 655}
658 656
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 6242b85d5d15..586ed0915a29 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1148,8 +1148,6 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1148 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1148 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1149 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 1149 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
1150 1150
1151 ndev->trans_start = jiffies;
1152
1153 return NETDEV_TX_OK; 1151 return NETDEV_TX_OK;
1154} 1152}
1155 1153
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce752bbf3..a5d6a6bd0c1a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
850 mc_filter[1] = mc_filter[0] = 0xffffffff; 850 mc_filter[1] = mc_filter[0] = 0xffffffff;
851 } else { 851 } else {
852 struct dev_mc_list *mclist; 852 struct netdev_hw_addr *ha;
853 853
854 rx_mode = AcceptBroadcast | AcceptMyPhys; 854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0; 855 mc_filter[1] = mc_filter[0] = 0;
856 netdev_for_each_mc_addr(mclist, dev) { 856 netdev_for_each_mc_addr(ha, dev) {
857 int bit_nr = 857 int bit_nr =
858 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 858 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
860 rx_mode |= AcceptMulticast; 860 rx_mode |= AcceptMulticast;
861 } 861 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731c4f09..bbbded76ff14 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -858,7 +858,6 @@ static void mdio_reset(long mdio_addr)
858 outl(MDDIR | MDIO | MDC, mdio_addr); 858 outl(MDDIR | MDIO | MDC, mdio_addr);
859 mdio_delay(); 859 mdio_delay();
860 } 860 }
861 return;
862} 861}
863 862
864/** 863/**
@@ -953,8 +952,6 @@ static void mdio_write(struct net_device *net_dev, int phy_id, int location,
953 mdio_delay(); 952 mdio_delay();
954 } 953 }
955 outl(0x00, mdio_addr); 954 outl(0x00, mdio_addr);
956
957 return;
958} 955}
959 956
960 957
@@ -1264,7 +1261,6 @@ static void sis630_set_eq(struct net_device *net_dev, u8 revision)
1264 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV, 1261 mdio_write(net_dev, sis_priv->cur_phy, MII_RESV,
1265 (reg14h | 0x2000) & 0xBFFF); 1262 (reg14h | 0x2000) & 0xBFFF);
1266 } 1263 }
1267 return;
1268} 1264}
1269 1265
1270/** 1266/**
@@ -1499,7 +1495,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1499 } 1495 }
1500 1496
1501 if(netif_msg_link(sis_priv)) 1497 if(netif_msg_link(sis_priv))
1502 printk(KERN_INFO "%s: Media Link On %s %s-duplex \n", 1498 printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
1503 net_dev->name, 1499 net_dev->name,
1504 *speed == HW_SPEED_100_MBPS ? 1500 *speed == HW_SPEED_100_MBPS ?
1505 "100mbps" : "10mbps", 1501 "100mbps" : "10mbps",
@@ -1523,7 +1519,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1523 int i; 1519 int i;
1524 1520
1525 if(netif_msg_tx_err(sis_priv)) 1521 if(netif_msg_tx_err(sis_priv))
1526 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n", 1522 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1527 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); 1523 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
1528 1524
1529 /* Disable interrupts by clearing the interrupt mask. */ 1525 /* Disable interrupts by clearing the interrupt mask. */
@@ -1553,14 +1549,13 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1553 1549
1554 spin_unlock_irqrestore(&sis_priv->lock, flags); 1550 spin_unlock_irqrestore(&sis_priv->lock, flags);
1555 1551
1556 net_dev->trans_start = jiffies; 1552 net_dev->trans_start = jiffies; /* prevent tx timeout */
1557 1553
1558 /* load Transmit Descriptor Register */ 1554 /* load Transmit Descriptor Register */
1559 outl(sis_priv->tx_ring_dma, ioaddr + txdp); 1555 outl(sis_priv->tx_ring_dma, ioaddr + txdp);
1560 1556
1561 /* Enable all known interrupts by setting the interrupt mask. */ 1557 /* Enable all known interrupts by setting the interrupt mask. */
1562 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr); 1558 outl((RxSOVR|RxORN|RxERR|RxOK|TxURN|TxERR|TxIDLE), ioaddr + imr);
1563 return;
1564} 1559}
1565 1560
1566/** 1561/**
@@ -1623,8 +1618,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
1623 1618
1624 spin_unlock_irqrestore(&sis_priv->lock, flags); 1619 spin_unlock_irqrestore(&sis_priv->lock, flags);
1625 1620
1626 net_dev->trans_start = jiffies;
1627
1628 if (netif_msg_tx_queued(sis_priv)) 1621 if (netif_msg_tx_queued(sis_priv))
1629 printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d " 1622 printk(KERN_DEBUG "%s: Queued Tx packet at %p size %d "
1630 "to slot %d.\n", 1623 "to slot %d.\n",
@@ -2298,12 +2291,14 @@ static void set_rx_mode(struct net_device *net_dev)
2298 /* Accept Broadcast packet, destination address matchs our 2291 /* Accept Broadcast packet, destination address matchs our
2299 * MAC address, use Receive Filter to reject unwanted MCAST 2292 * MAC address, use Receive Filter to reject unwanted MCAST
2300 * packets */ 2293 * packets */
2301 struct dev_mc_list *mclist; 2294 struct netdev_hw_addr *ha;
2302 rx_mode = RFAAB; 2295 rx_mode = RFAAB;
2303 2296
2304 netdev_for_each_mc_addr(mclist, net_dev) { 2297 netdev_for_each_mc_addr(ha, net_dev) {
2305 unsigned int bit_nr = 2298 unsigned int bit_nr;
2306 sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev); 2299
2300 bit_nr = sis900_mcast_bitnr(ha->addr,
2301 sis_priv->chipset_rev);
2307 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); 2302 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
2308 } 2303 }
2309 } 2304 }
@@ -2330,8 +2325,6 @@ static void set_rx_mode(struct net_device *net_dev)
2330 /* restore cr */ 2325 /* restore cr */
2331 outl(cr_saved, ioaddr + cr); 2326 outl(cr_saved, ioaddr + cr);
2332 } 2327 }
2333
2334 return;
2335} 2328}
2336 2329
2337/** 2330/**
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb3b28a..9d8d1ac48176 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
1352 /* 1352 /*
1353 * MIB timer and hardware timer have the same resolution of 80nS 1353 * MIB timer and hardware timer have the same resolution of 80nS
1354 */ 1354 */
1355 DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n", 1355 DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
1356 (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ; 1356 (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
1357 outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ; 1357 outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
1358} 1358}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee05ede..ba45bc794d77 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
1277 1277
1278 mib = phy->mib ; 1278 mib = phy->mib ;
1279 1279
1280 DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ; 1280 DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
1281 bit++ ; 1281 bit++ ;
1282 1282
1283 switch(bit) { 1283 switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
1580 mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; 1580 mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
1581 break ; 1581 break ;
1582 } 1582 }
1583 DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ; 1583 DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
1584} 1584}
1585 1585
1586/* 1586/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b75abc2..31b2dabf094c 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -844,7 +844,6 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
844 spin_lock_irqsave(&bp->DriverLock, Flags); 844 spin_lock_irqsave(&bp->DriverLock, Flags);
845 skfp_ctl_set_multicast_list_wo_lock(dev); 845 skfp_ctl_set_multicast_list_wo_lock(dev);
846 spin_unlock_irqrestore(&bp->DriverLock, Flags); 846 spin_unlock_irqrestore(&bp->DriverLock, Flags);
847 return;
848} // skfp_ctl_set_multicast_list 847} // skfp_ctl_set_multicast_list
849 848
850 849
@@ -852,7 +851,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
852static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) 851static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
853{ 852{
854 struct s_smc *smc = netdev_priv(dev); 853 struct s_smc *smc = netdev_priv(dev);
855 struct dev_mc_list *dmi; 854 struct netdev_hw_addr *ha;
856 855
857 /* Enable promiscuous mode, if necessary */ 856 /* Enable promiscuous mode, if necessary */
858 if (dev->flags & IFF_PROMISC) { 857 if (dev->flags & IFF_PROMISC) {
@@ -876,13 +875,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
876 /* use exact filtering */ 875 /* use exact filtering */
877 876
878 // point to first multicast addr 877 // point to first multicast addr
879 netdev_for_each_mc_addr(dmi, dev) { 878 netdev_for_each_mc_addr(ha, dev) {
880 mac_add_multicast(smc, 879 mac_add_multicast(smc,
881 (struct fddi_addr *)dmi->dmi_addr, 880 (struct fddi_addr *)ha->addr,
882 1); 881 1);
883 882
884 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n", 883 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
885 dmi->dmi_addr); 884 ha->addr);
886 } 885 }
887 886
888 } else { // more MC addresses than HW supports 887 } else { // more MC addresses than HW supports
@@ -898,7 +897,6 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
898 /* Update adapter filters */ 897 /* Update adapter filters */
899 mac_update_multicast(smc); 898 mac_update_multicast(smc);
900 } 899 }
901 return;
902} // skfp_ctl_set_multicast_list_wo_lock 900} // skfp_ctl_set_multicast_list_wo_lock
903 901
904 902
@@ -1076,7 +1074,6 @@ static netdev_tx_t skfp_send_pkt(struct sk_buff *skb,
1076 if (bp->QueueSkb == 0) { 1074 if (bp->QueueSkb == 0) {
1077 netif_stop_queue(dev); 1075 netif_stop_queue(dev);
1078 } 1076 }
1079 dev->trans_start = jiffies;
1080 return NETDEV_TX_OK; 1077 return NETDEV_TX_OK;
1081 1078
1082} // skfp_send_pkt 1079} // skfp_send_pkt
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 83d16fecfac4..6f35bb77595f 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -574,7 +574,7 @@ void smt_received_pack(struct s_smc *smc, SMbuf *mb, int fs)
574 if (smt_check_para(smc,sm,plist_nif)) { 574 if (smt_check_para(smc,sm,plist_nif)) {
575 DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ; 575 DB_SMT("SMT: NIF with para problem, ignoring\n",0,0) ;
576 break ; 576 break ;
577 } ; 577 }
578 switch (sm->smt_type) { 578 switch (sm->smt_type) {
579 case SMT_ANNOUNCE : 579 case SMT_ANNOUNCE :
580 case SMT_REQUEST : 580 case SMT_REQUEST :
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713b744c..40882b3faba6 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
414 smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; 414 smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
415 mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; 415 mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
416 416
417 DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ; 417 DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
418 DB_SMT("SRF: state SR%d Threshold %d\n", 418 DB_SMT("SRF: state SR%d Threshold %d\n",
419 smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; 419 smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
420#ifdef DEBUG 420#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb70609f20..40e5c46e7571 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -984,8 +984,8 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
984 wmb(); 984 wmb();
985 985
986 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 986 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
987 pci_unmap_addr_set(e, mapaddr, map); 987 dma_unmap_addr_set(e, mapaddr, map);
988 pci_unmap_len_set(e, maplen, bufsize); 988 dma_unmap_len_set(e, maplen, bufsize);
989} 989}
990 990
991/* Resume receiving using existing skb, 991/* Resume receiving using existing skb,
@@ -1018,8 +1018,8 @@ static void skge_rx_clean(struct skge_port *skge)
1018 rd->control = 0; 1018 rd->control = 0;
1019 if (e->skb) { 1019 if (e->skb) {
1020 pci_unmap_single(hw->pdev, 1020 pci_unmap_single(hw->pdev,
1021 pci_unmap_addr(e, mapaddr), 1021 dma_unmap_addr(e, mapaddr),
1022 pci_unmap_len(e, maplen), 1022 dma_unmap_len(e, maplen),
1023 PCI_DMA_FROMDEVICE); 1023 PCI_DMA_FROMDEVICE);
1024 dev_kfree_skb(e->skb); 1024 dev_kfree_skb(e->skb);
1025 e->skb = NULL; 1025 e->skb = NULL;
@@ -2756,8 +2756,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2756 e->skb = skb; 2756 e->skb = skb;
2757 len = skb_headlen(skb); 2757 len = skb_headlen(skb);
2758 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); 2758 map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
2759 pci_unmap_addr_set(e, mapaddr, map); 2759 dma_unmap_addr_set(e, mapaddr, map);
2760 pci_unmap_len_set(e, maplen, len); 2760 dma_unmap_len_set(e, maplen, len);
2761 2761
2762 td->dma_lo = map; 2762 td->dma_lo = map;
2763 td->dma_hi = map >> 32; 2763 td->dma_hi = map >> 32;
@@ -2799,8 +2799,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2799 2799
2800 tf->dma_lo = map; 2800 tf->dma_lo = map;
2801 tf->dma_hi = (u64) map >> 32; 2801 tf->dma_hi = (u64) map >> 32;
2802 pci_unmap_addr_set(e, mapaddr, map); 2802 dma_unmap_addr_set(e, mapaddr, map);
2803 pci_unmap_len_set(e, maplen, frag->size); 2803 dma_unmap_len_set(e, maplen, frag->size);
2804 2804
2805 tf->control = BMU_OWN | BMU_SW | control | frag->size; 2805 tf->control = BMU_OWN | BMU_SW | control | frag->size;
2806 } 2806 }
@@ -2837,12 +2837,12 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
2837 2837
2838 /* skb header vs. fragment */ 2838 /* skb header vs. fragment */
2839 if (control & BMU_STF) 2839 if (control & BMU_STF)
2840 pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr), 2840 pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
2841 pci_unmap_len(e, maplen), 2841 dma_unmap_len(e, maplen),
2842 PCI_DMA_TODEVICE); 2842 PCI_DMA_TODEVICE);
2843 else 2843 else
2844 pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr), 2844 pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
2845 pci_unmap_len(e, maplen), 2845 dma_unmap_len(e, maplen),
2846 PCI_DMA_TODEVICE); 2846 PCI_DMA_TODEVICE);
2847 2847
2848 if (control & BMU_EOF) { 2848 if (control & BMU_EOF) {
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
2918 struct skge_port *skge = netdev_priv(dev); 2918 struct skge_port *skge = netdev_priv(dev);
2919 struct skge_hw *hw = skge->hw; 2919 struct skge_hw *hw = skge->hw;
2920 int port = skge->port; 2920 int port = skge->port;
2921 struct dev_mc_list *list; 2921 struct netdev_hw_addr *ha;
2922 u32 mode; 2922 u32 mode;
2923 u8 filter[8]; 2923 u8 filter[8];
2924 2924
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
2938 skge->flow_status == FLOW_STAT_SYMMETRIC) 2938 skge->flow_status == FLOW_STAT_SYMMETRIC)
2939 genesis_add_filter(filter, pause_mc_addr); 2939 genesis_add_filter(filter, pause_mc_addr);
2940 2940
2941 netdev_for_each_mc_addr(list, dev) 2941 netdev_for_each_mc_addr(ha, dev)
2942 genesis_add_filter(filter, list->dmi_addr); 2942 genesis_add_filter(filter, ha->addr);
2943 } 2943 }
2944 2944
2945 xm_write32(hw, port, XM_MODE, mode); 2945 xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
2957 struct skge_port *skge = netdev_priv(dev); 2957 struct skge_port *skge = netdev_priv(dev);
2958 struct skge_hw *hw = skge->hw; 2958 struct skge_hw *hw = skge->hw;
2959 int port = skge->port; 2959 int port = skge->port;
2960 struct dev_mc_list *list; 2960 struct netdev_hw_addr *ha;
2961 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || 2961 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
2962 skge->flow_status == FLOW_STAT_SYMMETRIC); 2962 skge->flow_status == FLOW_STAT_SYMMETRIC);
2963 u16 reg; 2963 u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
2980 if (rx_pause) 2980 if (rx_pause)
2981 yukon_add_filter(filter, pause_mc_addr); 2981 yukon_add_filter(filter, pause_mc_addr);
2982 2982
2983 netdev_for_each_mc_addr(list, dev) 2983 netdev_for_each_mc_addr(ha, dev)
2984 yukon_add_filter(filter, list->dmi_addr); 2984 yukon_add_filter(filter, ha->addr);
2985 } 2985 }
2986 2986
2987 2987
@@ -3060,11 +3060,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3060 goto resubmit; 3060 goto resubmit;
3061 3061
3062 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3062 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3063 pci_unmap_addr(e, mapaddr), 3063 dma_unmap_addr(e, mapaddr),
3064 len, PCI_DMA_FROMDEVICE); 3064 len, PCI_DMA_FROMDEVICE);
3065 skb_copy_from_linear_data(e->skb, skb->data, len); 3065 skb_copy_from_linear_data(e->skb, skb->data, len);
3066 pci_dma_sync_single_for_device(skge->hw->pdev, 3066 pci_dma_sync_single_for_device(skge->hw->pdev,
3067 pci_unmap_addr(e, mapaddr), 3067 dma_unmap_addr(e, mapaddr),
3068 len, PCI_DMA_FROMDEVICE); 3068 len, PCI_DMA_FROMDEVICE);
3069 skge_rx_reuse(e, skge->rx_buf_size); 3069 skge_rx_reuse(e, skge->rx_buf_size);
3070 } else { 3070 } else {
@@ -3075,8 +3075,8 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3075 goto resubmit; 3075 goto resubmit;
3076 3076
3077 pci_unmap_single(skge->hw->pdev, 3077 pci_unmap_single(skge->hw->pdev,
3078 pci_unmap_addr(e, mapaddr), 3078 dma_unmap_addr(e, mapaddr),
3079 pci_unmap_len(e, maplen), 3079 dma_unmap_len(e, maplen),
3080 PCI_DMA_FROMDEVICE); 3080 PCI_DMA_FROMDEVICE);
3081 skb = e->skb; 3081 skb = e->skb;
3082 prefetch(skb->data); 3082 prefetch(skb->data);
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
3667 t->csum_offs, t->csum_write, t->csum_start); 3667 t->csum_offs, t->csum_write, t->csum_start);
3668 } 3668 }
3669 3669
3670 seq_printf(seq, "\nRx Ring: \n"); 3670 seq_printf(seq, "\nRx Ring:\n");
3671 for (e = skge->rx_ring.to_clean; ; e = e->next) { 3671 for (e = skge->rx_ring.to_clean; ; e = e->next) {
3672 const struct skge_rx_desc *r = e->desc; 3672 const struct skge_rx_desc *r = e->desc;
3673 3673
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 831de1b6e96e..507addcaffa3 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2393,8 +2393,8 @@ struct skge_element {
2393 struct skge_element *next; 2393 struct skge_element *next;
2394 void *desc; 2394 void *desc;
2395 struct sk_buff *skb; 2395 struct sk_buff *skb;
2396 DECLARE_PCI_UNMAP_ADDR(mapaddr); 2396 DEFINE_DMA_UNMAP_ADDR(mapaddr);
2397 DECLARE_PCI_UNMAP_LEN(maplen); 2397 DEFINE_DMA_UNMAP_LEN(maplen);
2398}; 2398};
2399 2399
2400struct skge_ring { 2400struct skge_ring {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797eb73b..2111c7bbf578 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -53,7 +53,7 @@
53#include "sky2.h" 53#include "sky2.h"
54 54
55#define DRV_NAME "sky2" 55#define DRV_NAME "sky2"
56#define DRV_VERSION "1.27" 56#define DRV_VERSION "1.28"
57 57
58/* 58/*
59 * The Yukon II chipset takes 64 bit command blocks (called list elements) 59 * The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -70,18 +70,15 @@
70 VLAN:GSO + CKSUM + Data + skb_frags * DMA */ 70 VLAN:GSO + CKSUM + Data + skb_frags * DMA */
71#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1)) 71#define MAX_SKB_TX_LE (2 + (sizeof(dma_addr_t)/sizeof(u32))*(MAX_SKB_FRAGS+1))
72#define TX_MIN_PENDING (MAX_SKB_TX_LE+1) 72#define TX_MIN_PENDING (MAX_SKB_TX_LE+1)
73#define TX_MAX_PENDING 4096 73#define TX_MAX_PENDING 1024
74#define TX_DEF_PENDING 127 74#define TX_DEF_PENDING 127
75 75
76#define STATUS_RING_SIZE 2048 /* 2 ports * (TX + 2*RX) */
77#define STATUS_LE_BYTES (STATUS_RING_SIZE*sizeof(struct sky2_status_le))
78#define TX_WATCHDOG (5 * HZ) 76#define TX_WATCHDOG (5 * HZ)
79#define NAPI_WEIGHT 64 77#define NAPI_WEIGHT 64
80#define PHY_RETRIES 1000 78#define PHY_RETRIES 1000
81 79
82#define SKY2_EEPROM_MAGIC 0x9955aabb 80#define SKY2_EEPROM_MAGIC 0x9955aabb
83 81
84
85#define RING_NEXT(x,s) (((x)+1) & ((s)-1)) 82#define RING_NEXT(x,s) (((x)+1) & ((s)-1))
86 83
87static const u32 default_msg = 84static const u32 default_msg =
@@ -227,7 +224,7 @@ static void sky2_power_on(struct sky2_hw *hw)
227 /* disable Core Clock Division, */ 224 /* disable Core Clock Division, */
228 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 225 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
229 226
230 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 227 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
231 /* enable bits are inverted */ 228 /* enable bits are inverted */
232 sky2_write8(hw, B2_Y2_CLK_GATE, 229 sky2_write8(hw, B2_Y2_CLK_GATE,
233 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 230 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +266,7 @@ static void sky2_power_on(struct sky2_hw *hw)
269 266
270static void sky2_power_aux(struct sky2_hw *hw) 267static void sky2_power_aux(struct sky2_hw *hw)
271{ 268{
272 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 269 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
273 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 270 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
274 else 271 else
275 /* enable bits are inverted */ 272 /* enable bits are inverted */
@@ -652,7 +649,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
652 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 649 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
653 reg1 &= ~phy_power[port]; 650 reg1 &= ~phy_power[port];
654 651
655 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 652 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
656 reg1 |= coma_mode[port]; 653 reg1 |= coma_mode[port];
657 654
658 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 655 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +821,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
824 821
825 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 822 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
826 823
827 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) { 824 if (hw->chip_id == CHIP_ID_YUKON_XL &&
825 hw->chip_rev == CHIP_REV_YU_XL_A0 &&
826 port == 1) {
828 /* WA DEV_472 -- looks like crossed wires on port 2 */ 827 /* WA DEV_472 -- looks like crossed wires on port 2 */
829 /* clear GMAC 1 Control reset */ 828 /* clear GMAC 1 Control reset */
830 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); 829 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +877,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
878 if (hw->dev[port]->mtu > ETH_DATA_LEN) 877 if (hw->dev[port]->mtu > ETH_DATA_LEN)
879 reg |= GM_SMOD_JUMBO_ENA; 878 reg |= GM_SMOD_JUMBO_ENA;
880 879
880 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
881 hw->chip_rev == CHIP_REV_YU_EC_U_B1)
882 reg |= GM_NEW_FLOW_CTRL;
883
881 gma_write16(hw, port, GM_SERIAL_MODE, reg); 884 gma_write16(hw, port, GM_SERIAL_MODE, reg);
882 885
883 /* virtual address for data */ 886 /* virtual address for data */
@@ -1126,7 +1129,7 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
1126 if (pci_dma_mapping_error(pdev, re->data_addr)) 1129 if (pci_dma_mapping_error(pdev, re->data_addr))
1127 goto mapping_error; 1130 goto mapping_error;
1128 1131
1129 pci_unmap_len_set(re, data_size, size); 1132 dma_unmap_len_set(re, data_size, size);
1130 1133
1131 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1132 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1135 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1148,7 +1151,7 @@ map_page_error:
1148 PCI_DMA_FROMDEVICE); 1151 PCI_DMA_FROMDEVICE);
1149 } 1152 }
1150 1153
1151 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), 1154 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1152 PCI_DMA_FROMDEVICE); 1155 PCI_DMA_FROMDEVICE);
1153 1156
1154mapping_error: 1157mapping_error:
@@ -1163,7 +1166,7 @@ static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
1163 struct sk_buff *skb = re->skb; 1166 struct sk_buff *skb = re->skb;
1164 int i; 1167 int i;
1165 1168
1166 pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size), 1169 pci_unmap_single(pdev, re->data_addr, dma_unmap_len(re, data_size),
1167 PCI_DMA_FROMDEVICE); 1170 PCI_DMA_FROMDEVICE);
1168 1171
1169 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1172 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
@@ -1190,6 +1193,39 @@ static void rx_set_checksum(struct sky2_port *sky2)
1190 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 1193 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
1191} 1194}
1192 1195
1196/* Enable/disable receive hash calculation (RSS) */
1197static void rx_set_rss(struct net_device *dev)
1198{
1199 struct sky2_port *sky2 = netdev_priv(dev);
1200 struct sky2_hw *hw = sky2->hw;
1201 int i, nkeys = 4;
1202
1203 /* Supports IPv6 and other modes */
1204 if (hw->flags & SKY2_HW_NEW_LE) {
1205 nkeys = 10;
1206 sky2_write32(hw, SK_REG(sky2->port, RSS_CFG), HASH_ALL);
1207 }
1208
1209 /* Program RSS initial values */
1210 if (dev->features & NETIF_F_RXHASH) {
1211 u32 key[nkeys];
1212
1213 get_random_bytes(key, nkeys * sizeof(u32));
1214 for (i = 0; i < nkeys; i++)
1215 sky2_write32(hw, SK_REG(sky2->port, RSS_KEY + i * 4),
1216 key[i]);
1217
1218 /* Need to turn on (undocumented) flag to make hashing work */
1219 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T),
1220 RX_STFW_ENA);
1221
1222 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1223 BMU_ENA_RX_RSS_HASH);
1224 } else
1225 sky2_write32(hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
1226 BMU_DIS_RX_RSS_HASH);
1227}
1228
1193/* 1229/*
1194 * The RX Stop command will not work for Yukon-2 if the BMU does not 1230 * The RX Stop command will not work for Yukon-2 if the BMU does not
1195 * reach the end of packet and since we can't make sure that we have 1231 * reach the end of packet and since we can't make sure that we have
@@ -1414,8 +1450,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
1414 /* These chips have no ram buffer? 1450 /* These chips have no ram buffer?
1415 * MAC Rx RAM Read is controlled by hardware */ 1451 * MAC Rx RAM Read is controlled by hardware */
1416 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 1452 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1417 (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || 1453 hw->chip_rev > CHIP_REV_YU_EC_U_A0)
1418 hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1419 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); 1454 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1420 1455
1421 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 1456 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -1423,6 +1458,9 @@ static void sky2_rx_start(struct sky2_port *sky2)
1423 if (!(hw->flags & SKY2_HW_NEW_LE)) 1458 if (!(hw->flags & SKY2_HW_NEW_LE))
1424 rx_set_checksum(sky2); 1459 rx_set_checksum(sky2);
1425 1460
1461 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
1462 rx_set_rss(sky2->netdev);
1463
1426 /* submit Rx ring */ 1464 /* submit Rx ring */
1427 for (i = 0; i < sky2->rx_pending; i++) { 1465 for (i = 0; i < sky2->rx_pending; i++) {
1428 re = sky2->rx_ring + i; 1466 re = sky2->rx_ring + i;
@@ -1657,12 +1695,12 @@ static unsigned tx_le_req(const struct sk_buff *skb)
1657static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re) 1695static void sky2_tx_unmap(struct pci_dev *pdev, struct tx_ring_info *re)
1658{ 1696{
1659 if (re->flags & TX_MAP_SINGLE) 1697 if (re->flags & TX_MAP_SINGLE)
1660 pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr), 1698 pci_unmap_single(pdev, dma_unmap_addr(re, mapaddr),
1661 pci_unmap_len(re, maplen), 1699 dma_unmap_len(re, maplen),
1662 PCI_DMA_TODEVICE); 1700 PCI_DMA_TODEVICE);
1663 else if (re->flags & TX_MAP_PAGE) 1701 else if (re->flags & TX_MAP_PAGE)
1664 pci_unmap_page(pdev, pci_unmap_addr(re, mapaddr), 1702 pci_unmap_page(pdev, dma_unmap_addr(re, mapaddr),
1665 pci_unmap_len(re, maplen), 1703 dma_unmap_len(re, maplen),
1666 PCI_DMA_TODEVICE); 1704 PCI_DMA_TODEVICE);
1667 re->flags = 0; 1705 re->flags = 0;
1668} 1706}
@@ -1773,8 +1811,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1773 1811
1774 re = sky2->tx_ring + slot; 1812 re = sky2->tx_ring + slot;
1775 re->flags = TX_MAP_SINGLE; 1813 re->flags = TX_MAP_SINGLE;
1776 pci_unmap_addr_set(re, mapaddr, mapping); 1814 dma_unmap_addr_set(re, mapaddr, mapping);
1777 pci_unmap_len_set(re, maplen, len); 1815 dma_unmap_len_set(re, maplen, len);
1778 1816
1779 le = get_tx_le(sky2, &slot); 1817 le = get_tx_le(sky2, &slot);
1780 le->addr = cpu_to_le32(lower_32_bits(mapping)); 1818 le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -1802,8 +1840,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1802 1840
1803 re = sky2->tx_ring + slot; 1841 re = sky2->tx_ring + slot;
1804 re->flags = TX_MAP_PAGE; 1842 re->flags = TX_MAP_PAGE;
1805 pci_unmap_addr_set(re, mapaddr, mapping); 1843 dma_unmap_addr_set(re, mapaddr, mapping);
1806 pci_unmap_len_set(re, maplen, frag->size); 1844 dma_unmap_len_set(re, maplen, frag->size);
1807 1845
1808 le = get_tx_le(sky2, &slot); 1846 le = get_tx_le(sky2, &slot);
1809 le->addr = cpu_to_le32(lower_32_bits(mapping)); 1847 le->addr = cpu_to_le32(lower_32_bits(mapping));
@@ -2142,7 +2180,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2142 istatus, phystat); 2180 istatus, phystat);
2143 2181
2144 if (istatus & PHY_M_IS_AN_COMPL) { 2182 if (istatus & PHY_M_IS_AN_COMPL) {
2145 if (sky2_autoneg_done(sky2, phystat) == 0) 2183 if (sky2_autoneg_done(sky2, phystat) == 0 &&
2184 !netif_carrier_ok(dev))
2146 sky2_link_up(sky2); 2185 sky2_link_up(sky2);
2147 goto out; 2186 goto out;
2148 } 2187 }
@@ -2236,8 +2275,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
2236 sky2_write32(hw, B0_IMSK, 0); 2275 sky2_write32(hw, B0_IMSK, 0);
2237 2276
2238 dev->trans_start = jiffies; /* prevent tx timeout */ 2277 dev->trans_start = jiffies; /* prevent tx timeout */
2239 netif_stop_queue(dev);
2240 napi_disable(&hw->napi); 2278 napi_disable(&hw->napi);
2279 netif_tx_disable(dev);
2241 2280
2242 synchronize_irq(hw->pdev->irq); 2281 synchronize_irq(hw->pdev->irq);
2243 2282
@@ -2531,6 +2570,14 @@ static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
2531 } 2570 }
2532} 2571}
2533 2572
2573static void sky2_rx_hash(struct sky2_port *sky2, u32 status)
2574{
2575 struct sk_buff *skb;
2576
2577 skb = sky2->rx_ring[sky2->rx_next].skb;
2578 skb->rxhash = le32_to_cpu(status);
2579}
2580
2534/* Process status response ring */ 2581/* Process status response ring */
2535static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx) 2582static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2536{ 2583{
@@ -2552,7 +2599,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2552 if (!(opcode & HW_OWNER)) 2599 if (!(opcode & HW_OWNER))
2553 break; 2600 break;
2554 2601
2555 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); 2602 hw->st_idx = RING_NEXT(hw->st_idx, hw->st_size);
2556 2603
2557 port = le->css & CSS_LINK_BIT; 2604 port = le->css & CSS_LINK_BIT;
2558 dev = hw->dev[port]; 2605 dev = hw->dev[port];
@@ -2603,6 +2650,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2603 sky2_rx_checksum(sky2, status); 2650 sky2_rx_checksum(sky2, status);
2604 break; 2651 break;
2605 2652
2653 case OP_RSS_HASH:
2654 sky2_rx_hash(sky2, status);
2655 break;
2656
2606 case OP_TXINDEXLE: 2657 case OP_TXINDEXLE:
2607 /* TX index reports status for both ports */ 2658 /* TX index reports status for both ports */
2608 sky2_tx_done(hw->dev[0], status & 0xfff); 2659 sky2_tx_done(hw->dev[0], status & 0xfff);
@@ -2957,6 +3008,8 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2957 switch(hw->chip_id) { 3008 switch(hw->chip_id) {
2958 case CHIP_ID_YUKON_XL: 3009 case CHIP_ID_YUKON_XL:
2959 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY; 3010 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_NEWER_PHY;
3011 if (hw->chip_rev < CHIP_REV_YU_XL_A2)
3012 hw->flags |= SKY2_HW_RSS_BROKEN;
2960 break; 3013 break;
2961 3014
2962 case CHIP_ID_YUKON_EC_U: 3015 case CHIP_ID_YUKON_EC_U:
@@ -2982,10 +3035,11 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2982 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n"); 3035 dev_err(&hw->pdev->dev, "unsupported revision Yukon-EC rev A1\n");
2983 return -EOPNOTSUPP; 3036 return -EOPNOTSUPP;
2984 } 3037 }
2985 hw->flags = SKY2_HW_GIGABIT; 3038 hw->flags = SKY2_HW_GIGABIT | SKY2_HW_RSS_BROKEN;
2986 break; 3039 break;
2987 3040
2988 case CHIP_ID_YUKON_FE: 3041 case CHIP_ID_YUKON_FE:
3042 hw->flags = SKY2_HW_RSS_BROKEN;
2989 break; 3043 break;
2990 3044
2991 case CHIP_ID_YUKON_FE_P: 3045 case CHIP_ID_YUKON_FE_P:
@@ -3192,7 +3246,7 @@ static void sky2_reset(struct sky2_hw *hw)
3192 for (i = 0; i < hw->ports; i++) 3246 for (i = 0; i < hw->ports; i++)
3193 sky2_gmac_reset(hw, i); 3247 sky2_gmac_reset(hw, i);
3194 3248
3195 memset(hw->st_le, 0, STATUS_LE_BYTES); 3249 memset(hw->st_le, 0, hw->st_size * sizeof(struct sky2_status_le));
3196 hw->st_idx = 0; 3250 hw->st_idx = 0;
3197 3251
3198 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET); 3252 sky2_write32(hw, STAT_CTRL, SC_STAT_RST_SET);
@@ -3202,7 +3256,7 @@ static void sky2_reset(struct sky2_hw *hw)
3202 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32); 3256 sky2_write32(hw, STAT_LIST_ADDR_HI, (u64) hw->st_dma >> 32);
3203 3257
3204 /* Set the list last index */ 3258 /* Set the list last index */
3205 sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1); 3259 sky2_write16(hw, STAT_LAST_IDX, hw->st_size - 1);
3206 3260
3207 sky2_write16(hw, STAT_TX_IDX_TH, 10); 3261 sky2_write16(hw, STAT_TX_IDX_TH, 10);
3208 sky2_write8(hw, STAT_FIFO_WM, 16); 3262 sky2_write8(hw, STAT_FIFO_WM, 16);
@@ -3258,18 +3312,14 @@ static int sky2_reattach(struct net_device *dev)
3258 return err; 3312 return err;
3259} 3313}
3260 3314
3261static void sky2_restart(struct work_struct *work) 3315static void sky2_all_down(struct sky2_hw *hw)
3262{ 3316{
3263 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3264 u32 imask;
3265 int i; 3317 int i;
3266 3318
3267 rtnl_lock(); 3319 sky2_read32(hw, B0_IMSK);
3268
3269 napi_disable(&hw->napi);
3270 synchronize_irq(hw->pdev->irq);
3271 imask = sky2_read32(hw, B0_IMSK);
3272 sky2_write32(hw, B0_IMSK, 0); 3320 sky2_write32(hw, B0_IMSK, 0);
3321 synchronize_irq(hw->pdev->irq);
3322 napi_disable(&hw->napi);
3273 3323
3274 for (i = 0; i < hw->ports; i++) { 3324 for (i = 0; i < hw->ports; i++) {
3275 struct net_device *dev = hw->dev[i]; 3325 struct net_device *dev = hw->dev[i];
@@ -3282,8 +3332,12 @@ static void sky2_restart(struct work_struct *work)
3282 netif_tx_disable(dev); 3332 netif_tx_disable(dev);
3283 sky2_hw_down(sky2); 3333 sky2_hw_down(sky2);
3284 } 3334 }
3335}
3285 3336
3286 sky2_reset(hw); 3337static void sky2_all_up(struct sky2_hw *hw)
3338{
3339 u32 imask = Y2_IS_BASE;
3340 int i;
3287 3341
3288 for (i = 0; i < hw->ports; i++) { 3342 for (i = 0; i < hw->ports; i++) {
3289 struct net_device *dev = hw->dev[i]; 3343 struct net_device *dev = hw->dev[i];
@@ -3293,6 +3347,8 @@ static void sky2_restart(struct work_struct *work)
3293 continue; 3347 continue;
3294 3348
3295 sky2_hw_up(sky2); 3349 sky2_hw_up(sky2);
3350 sky2_set_multicast(dev);
3351 imask |= portirq_msk[i];
3296 netif_wake_queue(dev); 3352 netif_wake_queue(dev);
3297 } 3353 }
3298 3354
@@ -3301,6 +3357,17 @@ static void sky2_restart(struct work_struct *work)
3301 3357
3302 sky2_read32(hw, B0_Y2_SP_LISR); 3358 sky2_read32(hw, B0_Y2_SP_LISR);
3303 napi_enable(&hw->napi); 3359 napi_enable(&hw->napi);
3360}
3361
3362static void sky2_restart(struct work_struct *work)
3363{
3364 struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
3365
3366 rtnl_lock();
3367
3368 sky2_all_down(hw);
3369 sky2_reset(hw);
3370 sky2_all_up(hw);
3304 3371
3305 rtnl_unlock(); 3372 rtnl_unlock();
3306} 3373}
@@ -3622,7 +3689,7 @@ static void sky2_set_multicast(struct net_device *dev)
3622 struct sky2_port *sky2 = netdev_priv(dev); 3689 struct sky2_port *sky2 = netdev_priv(dev);
3623 struct sky2_hw *hw = sky2->hw; 3690 struct sky2_hw *hw = sky2->hw;
3624 unsigned port = sky2->port; 3691 unsigned port = sky2->port;
3625 struct dev_mc_list *list; 3692 struct netdev_hw_addr *ha;
3626 u16 reg; 3693 u16 reg;
3627 u8 filter[8]; 3694 u8 filter[8];
3628 int rx_pause; 3695 int rx_pause;
@@ -3646,8 +3713,8 @@ static void sky2_set_multicast(struct net_device *dev)
3646 if (rx_pause) 3713 if (rx_pause)
3647 sky2_add_filter(filter, pause_mc_addr); 3714 sky2_add_filter(filter, pause_mc_addr);
3648 3715
3649 netdev_for_each_mc_addr(list, dev) 3716 netdev_for_each_mc_addr(ha, dev)
3650 sky2_add_filter(filter, list->dmi_addr); 3717 sky2_add_filter(filter, ha->addr);
3651 } 3718 }
3652 3719
3653 gma_write16(hw, port, GM_MC_ADDR_H1, 3720 gma_write16(hw, port, GM_MC_ADDR_H1,
@@ -4109,6 +4176,25 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4109 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len); 4176 return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
4110} 4177}
4111 4178
4179static int sky2_set_flags(struct net_device *dev, u32 data)
4180{
4181 struct sky2_port *sky2 = netdev_priv(dev);
4182
4183 if (data & ~ETH_FLAG_RXHASH)
4184 return -EOPNOTSUPP;
4185
4186 if (data & ETH_FLAG_RXHASH) {
4187 if (sky2->hw->flags & SKY2_HW_RSS_BROKEN)
4188 return -EINVAL;
4189
4190 dev->features |= NETIF_F_RXHASH;
4191 } else
4192 dev->features &= ~NETIF_F_RXHASH;
4193
4194 rx_set_rss(dev);
4195
4196 return 0;
4197}
4112 4198
4113static const struct ethtool_ops sky2_ethtool_ops = { 4199static const struct ethtool_ops sky2_ethtool_ops = {
4114 .get_settings = sky2_get_settings, 4200 .get_settings = sky2_get_settings,
@@ -4140,6 +4226,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4140 .phys_id = sky2_phys_id, 4226 .phys_id = sky2_phys_id,
4141 .get_sset_count = sky2_get_sset_count, 4227 .get_sset_count = sky2_get_sset_count,
4142 .get_ethtool_stats = sky2_get_ethtool_stats, 4228 .get_ethtool_stats = sky2_get_ethtool_stats,
4229 .set_flags = sky2_set_flags,
4143}; 4230};
4144 4231
4145#ifdef CONFIG_SKY2_DEBUG 4232#ifdef CONFIG_SKY2_DEBUG
@@ -4250,12 +4337,13 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
4250 napi_disable(&hw->napi); 4337 napi_disable(&hw->napi);
4251 last = sky2_read16(hw, STAT_PUT_IDX); 4338 last = sky2_read16(hw, STAT_PUT_IDX);
4252 4339
4340 seq_printf(seq, "Status ring %u\n", hw->st_size);
4253 if (hw->st_idx == last) 4341 if (hw->st_idx == last)
4254 seq_puts(seq, "Status ring (empty)\n"); 4342 seq_puts(seq, "Status ring (empty)\n");
4255 else { 4343 else {
4256 seq_puts(seq, "Status ring\n"); 4344 seq_puts(seq, "Status ring\n");
4257 for (idx = hw->st_idx; idx != last && idx < STATUS_RING_SIZE; 4345 for (idx = hw->st_idx; idx != last && idx < hw->st_size;
4258 idx = RING_NEXT(idx, STATUS_RING_SIZE)) { 4346 idx = RING_NEXT(idx, hw->st_size)) {
4259 const struct sky2_status_le *le = hw->st_le + idx; 4347 const struct sky2_status_le *le = hw->st_le + idx;
4260 seq_printf(seq, "[%d] %#x %d %#x\n", 4348 seq_printf(seq, "[%d] %#x %d %#x\n",
4261 idx, le->opcode, le->length, le->status); 4349 idx, le->opcode, le->length, le->status);
@@ -4492,6 +4580,10 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4492 if (highmem) 4580 if (highmem)
4493 dev->features |= NETIF_F_HIGHDMA; 4581 dev->features |= NETIF_F_HIGHDMA;
4494 4582
4583 /* Enable receive hashing unless hardware is known broken */
4584 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4585 dev->features |= NETIF_F_RXHASH;
4586
4495#ifdef SKY2_VLAN_TAG_USED 4587#ifdef SKY2_VLAN_TAG_USED
4496 /* The workaround for FE+ status conflicts with VLAN tag detection. */ 4588 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4497 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P && 4589 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
@@ -4683,15 +4775,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
4683 goto err_out_free_hw; 4775 goto err_out_free_hw;
4684 } 4776 }
4685 4777
4686 /* ring for status responses */
4687 hw->st_le = pci_alloc_consistent(pdev, STATUS_LE_BYTES, &hw->st_dma);
4688 if (!hw->st_le)
4689 goto err_out_iounmap;
4690
4691 err = sky2_init(hw); 4778 err = sky2_init(hw);
4692 if (err) 4779 if (err)
4693 goto err_out_iounmap; 4780 goto err_out_iounmap;
4694 4781
4782 /* ring for status responses */
4783 hw->st_size = hw->ports * roundup_pow_of_two(3*RX_MAX_PENDING + TX_MAX_PENDING);
4784 hw->st_le = pci_alloc_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4785 &hw->st_dma);
4786 if (!hw->st_le)
4787 goto err_out_reset;
4788
4695 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n", 4789 dev_info(&pdev->dev, "Yukon-2 %s chip revision %d\n",
4696 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev); 4790 sky2_name(hw->chip_id, buf1, sizeof(buf1)), hw->chip_rev);
4697 4791
@@ -4765,8 +4859,10 @@ err_out_unregister:
4765err_out_free_netdev: 4859err_out_free_netdev:
4766 free_netdev(dev); 4860 free_netdev(dev);
4767err_out_free_pci: 4861err_out_free_pci:
4862 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4863 hw->st_le, hw->st_dma);
4864err_out_reset:
4768 sky2_write8(hw, B0_CTST, CS_RST_SET); 4865 sky2_write8(hw, B0_CTST, CS_RST_SET);
4769 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma);
4770err_out_iounmap: 4866err_out_iounmap:
4771 iounmap(hw->regs); 4867 iounmap(hw->regs);
4772err_out_free_hw: 4868err_out_free_hw:
@@ -4804,7 +4900,8 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4804 free_irq(pdev->irq, hw); 4900 free_irq(pdev->irq, hw);
4805 if (hw->flags & SKY2_HW_USE_MSI) 4901 if (hw->flags & SKY2_HW_USE_MSI)
4806 pci_disable_msi(pdev); 4902 pci_disable_msi(pdev);
4807 pci_free_consistent(pdev, STATUS_LE_BYTES, hw->st_le, hw->st_dma); 4903 pci_free_consistent(pdev, hw->st_size * sizeof(struct sky2_status_le),
4904 hw->st_le, hw->st_dma);
4808 pci_release_regions(pdev); 4905 pci_release_regions(pdev);
4809 pci_disable_device(pdev); 4906 pci_disable_device(pdev);
4810 4907
@@ -4829,12 +4926,12 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4829 cancel_work_sync(&hw->restart_work); 4926 cancel_work_sync(&hw->restart_work);
4830 4927
4831 rtnl_lock(); 4928 rtnl_lock();
4929
4930 sky2_all_down(hw);
4832 for (i = 0; i < hw->ports; i++) { 4931 for (i = 0; i < hw->ports; i++) {
4833 struct net_device *dev = hw->dev[i]; 4932 struct net_device *dev = hw->dev[i];
4834 struct sky2_port *sky2 = netdev_priv(dev); 4933 struct sky2_port *sky2 = netdev_priv(dev);
4835 4934
4836 sky2_detach(dev);
4837
4838 if (sky2->wol) 4935 if (sky2->wol)
4839 sky2_wol_init(sky2); 4936 sky2_wol_init(sky2);
4840 4937
@@ -4843,8 +4940,6 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4843 4940
4844 device_set_wakeup_enable(&pdev->dev, wol != 0); 4941 device_set_wakeup_enable(&pdev->dev, wol != 0);
4845 4942
4846 sky2_write32(hw, B0_IMSK, 0);
4847 napi_disable(&hw->napi);
4848 sky2_power_aux(hw); 4943 sky2_power_aux(hw);
4849 rtnl_unlock(); 4944 rtnl_unlock();
4850 4945
@@ -4859,12 +4954,11 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4859static int sky2_resume(struct pci_dev *pdev) 4954static int sky2_resume(struct pci_dev *pdev)
4860{ 4955{
4861 struct sky2_hw *hw = pci_get_drvdata(pdev); 4956 struct sky2_hw *hw = pci_get_drvdata(pdev);
4862 int i, err; 4957 int err;
4863 4958
4864 if (!hw) 4959 if (!hw)
4865 return 0; 4960 return 0;
4866 4961
4867 rtnl_lock();
4868 err = pci_set_power_state(pdev, PCI_D0); 4962 err = pci_set_power_state(pdev, PCI_D0);
4869 if (err) 4963 if (err)
4870 goto out; 4964 goto out;
@@ -4882,20 +4976,13 @@ static int sky2_resume(struct pci_dev *pdev)
4882 goto out; 4976 goto out;
4883 } 4977 }
4884 4978
4979 rtnl_lock();
4885 sky2_reset(hw); 4980 sky2_reset(hw);
4886 sky2_write32(hw, B0_IMSK, Y2_IS_BASE); 4981 sky2_all_up(hw);
4887 napi_enable(&hw->napi);
4888
4889 for (i = 0; i < hw->ports; i++) {
4890 err = sky2_reattach(hw->dev[i]);
4891 if (err)
4892 goto out;
4893 }
4894 rtnl_unlock(); 4982 rtnl_unlock();
4895 4983
4896 return 0; 4984 return 0;
4897out: 4985out:
4898 rtnl_unlock();
4899 4986
4900 dev_err(&pdev->dev, "resume failed (%d)\n", err); 4987 dev_err(&pdev->dev, "resume failed (%d)\n", err);
4901 pci_disable_device(pdev); 4988 pci_disable_device(pdev);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182dd9819..084eff21b67a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ 548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ 549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
550}; 550};
551
552enum yukon_xl_rev {
553 CHIP_REV_YU_XL_A0 = 0,
554 CHIP_REV_YU_XL_A1 = 1,
555 CHIP_REV_YU_XL_A2 = 2,
556 CHIP_REV_YU_XL_A3 = 3,
557};
558
551enum yukon_ec_rev { 559enum yukon_ec_rev {
552 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 560 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
553 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ 561 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
557 CHIP_REV_YU_EC_U_A0 = 1, 565 CHIP_REV_YU_EC_U_A0 = 1,
558 CHIP_REV_YU_EC_U_A1 = 2, 566 CHIP_REV_YU_EC_U_A1 = 2,
559 CHIP_REV_YU_EC_U_B0 = 3, 567 CHIP_REV_YU_EC_U_B0 = 3,
568 CHIP_REV_YU_EC_U_B1 = 5,
560}; 569};
561enum yukon_fe_rev { 570enum yukon_fe_rev {
562 CHIP_REV_YU_FE_A1 = 1, 571 CHIP_REV_YU_FE_A1 = 1,
@@ -685,8 +694,21 @@ enum {
685 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ 694 TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */
686 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ 695 TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */
687 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ 696 TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */
697
698 RSS_KEY = 0x0220, /* RSS Key setup */
699 RSS_CFG = 0x0248, /* RSS Configuration */
688}; 700};
689 701
702enum {
703 HASH_TCP_IPV6_EX_CTRL = 1<<5,
704 HASH_IPV6_EX_CTRL = 1<<4,
705 HASH_TCP_IPV6_CTRL = 1<<3,
706 HASH_IPV6_CTRL = 1<<2,
707 HASH_TCP_IPV4_CTRL = 1<<1,
708 HASH_IPV4_CTRL = 1<<0,
709
710 HASH_ALL = 0x3f,
711};
690 712
691enum { 713enum {
692 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ 714 B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */
@@ -1775,10 +1797,13 @@ enum {
1775/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ 1797/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1776enum { 1798enum {
1777 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ 1799 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1778 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ 1800 GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
1779 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ 1801 GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
1780 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ 1802 GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
1781 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ 1803
1804 GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
1805
1806 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1782}; 1807};
1783 1808
1784#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) 1809#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
@@ -2157,14 +2182,14 @@ struct tx_ring_info {
2157 unsigned long flags; 2182 unsigned long flags;
2158#define TX_MAP_SINGLE 0x0001 2183#define TX_MAP_SINGLE 0x0001
2159#define TX_MAP_PAGE 0x0002 2184#define TX_MAP_PAGE 0x0002
2160 DECLARE_PCI_UNMAP_ADDR(mapaddr); 2185 DEFINE_DMA_UNMAP_ADDR(mapaddr);
2161 DECLARE_PCI_UNMAP_LEN(maplen); 2186 DEFINE_DMA_UNMAP_LEN(maplen);
2162}; 2187};
2163 2188
2164struct rx_ring_info { 2189struct rx_ring_info {
2165 struct sk_buff *skb; 2190 struct sk_buff *skb;
2166 dma_addr_t data_addr; 2191 dma_addr_t data_addr;
2167 DECLARE_PCI_UNMAP_LEN(data_size); 2192 DEFINE_DMA_UNMAP_LEN(data_size);
2168 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT]; 2193 dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
2169}; 2194};
2170 2195
@@ -2249,6 +2274,7 @@ struct sky2_hw {
2249#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */ 2274#define SKY2_HW_NEW_LE 0x00000020 /* new LSOv2 format */
2250#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2275#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2251#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2276#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2277#define SKY2_HW_RSS_BROKEN 0x00000100
2252 2278
2253 u8 chip_id; 2279 u8 chip_id;
2254 u8 chip_rev; 2280 u8 chip_rev;
@@ -2256,6 +2282,7 @@ struct sky2_hw {
2256 u8 ports; 2282 u8 ports;
2257 2283
2258 struct sky2_status_le *st_le; 2284 struct sky2_status_le *st_le;
2285 u32 st_size;
2259 u32 st_idx; 2286 u32 st_idx;
2260 dma_addr_t st_dma; 2287 dma_addr_t st_dma;
2261 2288
diff --git a/drivers/net/slhc.c b/drivers/net/slhc.c
index 140d63f3cafa..ac279fad9d45 100644
--- a/drivers/net/slhc.c
+++ b/drivers/net/slhc.c
@@ -731,7 +731,6 @@ void
731slhc_free(struct slcompress *comp) 731slhc_free(struct slcompress *comp)
732{ 732{
733 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free"); 733 printk(KERN_DEBUG "Called IP function on non IP-system: slhc_free");
734 return;
735} 734}
736struct slcompress * 735struct slcompress *
737slhc_init(int rslots, int tslots) 736slhc_init(int rslots, int tslots)
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 89696156c059..fa434fb8fb7c 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -458,7 +458,7 @@ static void sl_tx_timeout(struct net_device *dev)
458 * 14 Oct 1994 Dmitry Gorodchanin. 458 * 14 Oct 1994 Dmitry Gorodchanin.
459 */ 459 */
460#ifdef SL_CHECK_TRANSMIT 460#ifdef SL_CHECK_TRANSMIT
461 if (time_before(jiffies, dev->trans_start + 20 * HZ)) { 461 if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
462 /* 20 sec timeout not reached */ 462 /* 20 sec timeout not reached */
463 goto out; 463 goto out;
464 } 464 }
@@ -1269,7 +1269,7 @@ static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1269 1269
1270 case SIOCGLEASE: 1270 case SIOCGLEASE:
1271 *p = sl->leased; 1271 *p = sl->leased;
1272 }; 1272 }
1273 spin_unlock_bh(&sl->lock); 1273 spin_unlock_bh(&sl->lock);
1274 return 0; 1274 return 0;
1275} 1275}
diff --git a/drivers/net/smc-mca.c b/drivers/net/smc-mca.c
index a93f122e9a96..d07c39cb4daf 100644
--- a/drivers/net/smc-mca.c
+++ b/drivers/net/smc-mca.c
@@ -460,7 +460,6 @@ static void ultramca_reset_8390(struct net_device *dev)
460 460
461 if (ei_debug > 1) 461 if (ei_debug > 1)
462 printk("reset done\n"); 462 printk("reset done\n");
463 return;
464} 463}
465 464
466/* Grab the 8390 specific header. Similar to the block_input routine, but 465/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index 0291ea098a06..d2dd8e6113ab 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -421,7 +421,6 @@ ultra_reset_8390(struct net_device *dev)
421 outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */ 421 outb(0x01, cmd_port + 6); /* Enable interrupts and memory. */
422 422
423 if (ei_debug > 1) printk("reset done\n"); 423 if (ei_debug > 1) printk("reset done\n");
424 return;
425} 424}
426 425
427/* Grab the 8390 specific header. Similar to the block_input routine, but 426/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc-ultra32.c b/drivers/net/smc-ultra32.c
index 7a554adc70fb..e459c3b2510a 100644
--- a/drivers/net/smc-ultra32.c
+++ b/drivers/net/smc-ultra32.c
@@ -352,7 +352,6 @@ static void ultra32_reset_8390(struct net_device *dev)
352 outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */ 352 outb(0x84, ioaddr + 5); /* Enable MEM16 & Disable Bus Master. */
353 outb(0x01, ioaddr + 6); /* Enable Interrupts. */ 353 outb(0x01, ioaddr + 6); /* Enable Interrupts. */
354 if (ei_debug > 1) printk("reset done\n"); 354 if (ei_debug > 1) printk("reset done\n");
355 return;
356} 355}
357 356
358/* Grab the 8390 specific header. Similar to the block_input routine, but 357/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d42b19..66831f378396 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
382 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 382 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
383 dev->name, __func__); 383 dev->name, __func__);
384 status = SMC_GET_RX_STS_FIFO(lp); 384 status = SMC_GET_RX_STS_FIFO(lp);
385 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 385 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
386 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 386 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
387 pkt_len = (status & RX_STS_PKT_LEN_) >> 16; 387 pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
388 if (status & RX_STS_ES_) { 388 if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1135 } 1135 }
1136#else 1136#else
1137 if (status & INT_STS_TSFL_) { 1137 if (status & INT_STS_TSFL_) {
1138 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, ); 1138 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
1139 smc911x_tx(dev); 1139 smc911x_tx(dev);
1140 SMC_ACK_INT(lp, INT_STS_TSFL_); 1140 SMC_ACK_INT(lp, INT_STS_TSFL_);
1141 } 1141 }
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
1274 status = SMC_GET_INT(lp); 1274 status = SMC_GET_INT(lp);
1275 mask = SMC_GET_INT_EN(lp); 1275 mask = SMC_GET_INT_EN(lp);
1276 spin_unlock_irqrestore(&lp->lock, flags); 1276 spin_unlock_irqrestore(&lp->lock, flags);
1277 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n", 1277 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
1278 dev->name, status, mask); 1278 dev->name, status, mask);
1279 1279
1280 /* Dump the current TX FIFO contents and restart */ 1280 /* Dump the current TX FIFO contents and restart */
@@ -1289,7 +1289,7 @@ static void smc911x_timeout(struct net_device *dev)
1289 schedule_work(&lp->phy_configure); 1289 schedule_work(&lp->phy_configure);
1290 1290
1291 /* We can accept TX packets again */ 1291 /* We can accept TX packets again */
1292 dev->trans_start = jiffies; 1292 dev->trans_start = jiffies; /* prevent tx timeout */
1293 netif_wake_queue(dev); 1293 netif_wake_queue(dev);
1294} 1294}
1295 1295
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1340 * within that register. 1340 * within that register.
1341 */ 1341 */
1342 else if (!netdev_mc_empty(dev)) { 1342 else if (!netdev_mc_empty(dev)) {
1343 struct dev_mc_list *cur_addr; 1343 struct netdev_hw_addr *ha;
1344 1344
1345 /* Set the Hash perfec mode */ 1345 /* Set the Hash perfec mode */
1346 mcr |= MAC_CR_HPFILT_; 1346 mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1348 /* start with a table of all zeros: reject all */ 1348 /* start with a table of all zeros: reject all */
1349 memset(multicast_table, 0, sizeof(multicast_table)); 1349 memset(multicast_table, 0, sizeof(multicast_table));
1350 1350
1351 netdev_for_each_mc_addr(cur_addr, dev) { 1351 netdev_for_each_mc_addr(ha, dev) {
1352 u32 position; 1352 u32 position;
1353 1353
1354 /* do we have a pointer here? */
1355 if (!cur_addr)
1356 break;
1357 /* make sure this is a multicast address - 1354 /* make sure this is a multicast address -
1358 shouldn't this be a given if we have it here ? */ 1355 shouldn't this be a given if we have it here ? */
1359 if (!(*cur_addr->dmi_addr & 1)) 1356 if (!(*ha->addr & 1))
1360 continue; 1357 continue;
1361 1358
1362 /* upper 6 bits are used as hash index */ 1359 /* upper 6 bits are used as hash index */
1363 position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; 1360 position = ether_crc(ETH_ALEN, ha->addr)>>26;
1364 1361
1365 multicast_table[position>>5] |= 1 << (position&0x1f); 1362 multicast_table[position>>5] |= 1 << (position&0x1f);
1366 } 1363 }
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f7843aa4e..7486d0908064 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
416 416
417 417
418/* 418/*
419 . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds ) 419 . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
420 . Purpose: 420 . Purpose:
421 . This sets the internal hardware table to filter out unwanted multicast 421 . This sets the internal hardware table to filter out unwanted multicast
422 . packets before they take up memory. 422 . packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
437{ 437{
438 int i; 438 int i;
439 unsigned char multicast_table[ 8 ]; 439 unsigned char multicast_table[ 8 ];
440 struct dev_mc_list *cur_addr; 440 struct netdev_hw_addr *ha;
441 /* table for flipping the order of 3 bits */ 441 /* table for flipping the order of 3 bits */
442 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; 442 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
443 443
444 /* start with a table of all zeros: reject all */ 444 /* start with a table of all zeros: reject all */
445 memset( multicast_table, 0, sizeof( multicast_table ) ); 445 memset( multicast_table, 0, sizeof( multicast_table ) );
446 446
447 netdev_for_each_mc_addr(cur_addr, dev) { 447 netdev_for_each_mc_addr(ha, dev) {
448 int position; 448 int position;
449 449
450 /* do we have a pointer here? */
451 if ( !cur_addr )
452 break;
453 /* make sure this is a multicast address - shouldn't this 450 /* make sure this is a multicast address - shouldn't this
454 be a given if we have it here ? */ 451 be a given if we have it here ? */
455 if ( !( *cur_addr->dmi_addr & 1 ) ) 452 if (!(*ha->addr & 1))
456 continue; 453 continue;
457 454
458 /* only use the low order bits */ 455 /* only use the low order bits */
459 position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f; 456 position = ether_crc_le(6, ha->addr) & 0x3f;
460 457
461 /* do some messy swapping to put the bit in the right spot */ 458 /* do some messy swapping to put the bit in the right spot */
462 multicast_table[invert3[position&7]] |= 459 multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
528 numPages = ((length & 0xfffe) + 6) / 256; 525 numPages = ((length & 0xfffe) + 6) / 256;
529 526
530 if (numPages > 7 ) { 527 if (numPages > 7 ) {
531 printk(CARDNAME": Far too big packet error. \n"); 528 printk(CARDNAME": Far too big packet error.\n");
532 /* freeing the packet is a good thing here... but should 529 /* freeing the packet is a good thing here... but should
533 . any packets of this size get down here? */ 530 . any packets of this size get down here? */
534 dev_kfree_skb (skb); 531 dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
570 if ( !time_out ) { 567 if ( !time_out ) {
571 /* oh well, wait until the chip finds memory later */ 568 /* oh well, wait until the chip finds memory later */
572 SMC_ENABLE_INT( IM_ALLOC_INT ); 569 SMC_ENABLE_INT( IM_ALLOC_INT );
573 PRINTK2((CARDNAME": memory allocation deferred. \n")); 570 PRINTK2((CARDNAME": memory allocation deferred.\n"));
574 /* it's deferred, but I'll handle it later */ 571 /* it's deferred, but I'll handle it later */
575 return NETDEV_TX_OK; 572 return NETDEV_TX_OK;
576 } 573 }
577 /* or YES! I can send the packet now.. */ 574 /* or YES! I can send the packet now.. */
578 smc_hardware_send_packet(dev); 575 smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
610 ioaddr = dev->base_addr; 607 ioaddr = dev->base_addr;
611 608
612 if ( !skb ) { 609 if ( !skb ) {
613 PRINTK((CARDNAME": In XMIT with no packet to send \n")); 610 PRINTK((CARDNAME": In XMIT with no packet to send\n"));
614 return; 611 return;
615 } 612 }
616 length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; 613 length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
620 packet_no = inb( ioaddr + PNR_ARR + 1 ); 617 packet_no = inb( ioaddr + PNR_ARR + 1 );
621 if ( packet_no & 0x80 ) { 618 if ( packet_no & 0x80 ) {
622 /* or isn't there? BAD CHIP! */ 619 /* or isn't there? BAD CHIP! */
623 printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n"); 620 printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
624 dev_kfree_skb_any(skb); 621 dev_kfree_skb_any(skb);
625 lp->saved_skb = NULL; 622 lp->saved_skb = NULL;
626 netif_wake_queue(dev); 623 netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
685 /* and let the chipset deal with it */ 682 /* and let the chipset deal with it */
686 outw( MC_ENQUEUE , ioaddr + MMU_CMD ); 683 outw( MC_ENQUEUE , ioaddr + MMU_CMD );
687 684
688 PRINTK2((CARDNAME": Sent packet of length %d \n",length)); 685 PRINTK2((CARDNAME": Sent packet of length %d\n", length));
689 686
690 lp->saved_skb = NULL; 687 lp->saved_skb = NULL;
691 dev_kfree_skb_any (skb); 688 dev_kfree_skb_any (skb);
@@ -694,8 +691,6 @@ static void smc_hardware_send_packet( struct net_device * dev )
694 691
695 /* we can send another packet */ 692 /* we can send another packet */
696 netif_wake_queue(dev); 693 netif_wake_queue(dev);
697
698 return;
699} 694}
700 695
701/*------------------------------------------------------------------------- 696/*-------------------------------------------------------------------------
@@ -937,7 +932,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
937 if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { 932 if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
938 /* I don't recognize this chip, so... */ 933 /* I don't recognize this chip, so... */
939 printk(CARDNAME ": IO %x: Unrecognized revision register:" 934 printk(CARDNAME ": IO %x: Unrecognized revision register:"
940 " %x, Contact author. \n", ioaddr, revision_register ); 935 " %x, Contact author.\n", ioaddr, revision_register);
941 936
942 retval = -ENODEV; 937 retval = -ENODEV;
943 goto err_out; 938 goto err_out;
@@ -1045,9 +1040,6 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
1045 */ 1040 */
1046 printk("ADDR: %pM\n", dev->dev_addr); 1041 printk("ADDR: %pM\n", dev->dev_addr);
1047 1042
1048 /* set the private data to zero by default */
1049 memset(netdev_priv(dev), 0, sizeof(struct smc_local));
1050
1051 /* Grab the IRQ */ 1043 /* Grab the IRQ */
1052 retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev); 1044 retval = request_irq(dev->irq, smc_interrupt, 0, DRV_NAME, dev);
1053 if (retval) { 1045 if (retval) {
@@ -1074,7 +1066,7 @@ static void print_packet( byte * buf, int length )
1074 int remainder; 1066 int remainder;
1075 int lines; 1067 int lines;
1076 1068
1077 printk("Packet of length %d \n", length ); 1069 printk("Packet of length %d\n", length);
1078 lines = length / 16; 1070 lines = length / 16;
1079 remainder = length % 16; 1071 remainder = length % 16;
1080 1072
@@ -1170,7 +1162,7 @@ static void smc_timeout(struct net_device *dev)
1170 /* "kick" the adaptor */ 1162 /* "kick" the adaptor */
1171 smc_reset( dev->base_addr ); 1163 smc_reset( dev->base_addr );
1172 smc_enable( dev->base_addr ); 1164 smc_enable( dev->base_addr );
1173 dev->trans_start = jiffies; 1165 dev->trans_start = jiffies; /* prevent tx timeout */
1174 /* clear anything saved */ 1166 /* clear anything saved */
1175 ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL; 1167 ((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
1176 netif_wake_queue(dev); 1168 netif_wake_queue(dev);
@@ -1201,7 +1193,7 @@ static void smc_rcv(struct net_device *dev)
1201 1193
1202 if ( packet_number & FP_RXEMPTY ) { 1194 if ( packet_number & FP_RXEMPTY ) {
1203 /* we got called , but nothing was on the FIFO */ 1195 /* we got called , but nothing was on the FIFO */
1204 PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n")); 1196 PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
1205 /* don't need to restore anything */ 1197 /* don't need to restore anything */
1206 return; 1198 return;
1207 } 1199 }
@@ -1257,14 +1249,14 @@ static void smc_rcv(struct net_device *dev)
1257 to send the DWORDs or the bytes first, or some 1249 to send the DWORDs or the bytes first, or some
1258 mixture. A mixture might improve already slow PIO 1250 mixture. A mixture might improve already slow PIO
1259 performance */ 1251 performance */
1260 PRINTK3((" Reading %d dwords (and %d bytes) \n", 1252 PRINTK3((" Reading %d dwords (and %d bytes)\n",
1261 packet_length >> 2, packet_length & 3 )); 1253 packet_length >> 2, packet_length & 3 ));
1262 insl(ioaddr + DATA_1 , data, packet_length >> 2 ); 1254 insl(ioaddr + DATA_1 , data, packet_length >> 2 );
1263 /* read the left over bytes */ 1255 /* read the left over bytes */
1264 insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), 1256 insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
1265 packet_length & 0x3 ); 1257 packet_length & 0x3 );
1266#else 1258#else
1267 PRINTK3((" Reading %d words and %d byte(s) \n", 1259 PRINTK3((" Reading %d words and %d byte(s)\n",
1268 (packet_length >> 1 ), packet_length & 1 )); 1260 (packet_length >> 1 ), packet_length & 1 ));
1269 insw(ioaddr + DATA_1 , data, packet_length >> 1); 1261 insw(ioaddr + DATA_1 , data, packet_length >> 1);
1270 if ( packet_length & 1 ) { 1262 if ( packet_length & 1 ) {
@@ -1333,7 +1325,7 @@ static void smc_tx( struct net_device * dev )
1333 outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); 1325 outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
1334 1326
1335 tx_status = inw( ioaddr + DATA_1 ); 1327 tx_status = inw( ioaddr + DATA_1 );
1336 PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); 1328 PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
1337 1329
1338 dev->stats.tx_errors++; 1330 dev->stats.tx_errors++;
1339 if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; 1331 if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1339,7 @@ static void smc_tx( struct net_device * dev )
1347#endif 1339#endif
1348 1340
1349 if ( tx_status & TS_SUCCESS ) { 1341 if ( tx_status & TS_SUCCESS ) {
1350 printk(CARDNAME": Successful packet caused interrupt \n"); 1342 printk(CARDNAME": Successful packet caused interrupt\n");
1351 } 1343 }
1352 /* re-enable transmit */ 1344 /* re-enable transmit */
1353 SMC_SELECT_BANK( 0 ); 1345 SMC_SELECT_BANK( 0 );
@@ -1361,7 +1353,6 @@ static void smc_tx( struct net_device * dev )
1361 lp->packets_waiting--; 1353 lp->packets_waiting--;
1362 1354
1363 outb( saved_packet, ioaddr + PNR_ARR ); 1355 outb( saved_packet, ioaddr + PNR_ARR );
1364 return;
1365} 1356}
1366 1357
1367/*-------------------------------------------------------------------- 1358/*--------------------------------------------------------------------
@@ -1393,7 +1384,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1393 int handled = 0; 1384 int handled = 0;
1394 1385
1395 1386
1396 PRINTK3((CARDNAME": SMC interrupt started \n")); 1387 PRINTK3((CARDNAME": SMC interrupt started\n"));
1397 1388
1398 saved_bank = inw( ioaddr + BANK_SELECT ); 1389 saved_bank = inw( ioaddr + BANK_SELECT );
1399 1390
@@ -1408,7 +1399,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1408 /* set a timeout value, so I don't stay here forever */ 1399 /* set a timeout value, so I don't stay here forever */
1409 timeout = 4; 1400 timeout = 4;
1410 1401
1411 PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask )); 1402 PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
1412 do { 1403 do {
1413 /* read the status flag, and mask it */ 1404 /* read the status flag, and mask it */
1414 status = inb( ioaddr + INTERRUPT ) & mask; 1405 status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1409,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1418 handled = 1; 1409 handled = 1;
1419 1410
1420 PRINTK3((KERN_WARNING CARDNAME 1411 PRINTK3((KERN_WARNING CARDNAME
1421 ": Handling interrupt status %x \n", status )); 1412 ": Handling interrupt status %x\n", status));
1422 1413
1423 if (status & IM_RCV_INT) { 1414 if (status & IM_RCV_INT) {
1424 /* Got a packet(s). */ 1415 /* Got a packet(s). */
@@ -1452,7 +1443,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1452 1443
1453 } else if (status & IM_ALLOC_INT ) { 1444 } else if (status & IM_ALLOC_INT ) {
1454 PRINTK2((KERN_DEBUG CARDNAME 1445 PRINTK2((KERN_DEBUG CARDNAME
1455 ": Allocation interrupt \n")); 1446 ": Allocation interrupt\n"));
1456 /* clear this interrupt so it doesn't happen again */ 1447 /* clear this interrupt so it doesn't happen again */
1457 mask &= ~IM_ALLOC_INT; 1448 mask &= ~IM_ALLOC_INT;
1458 1449
@@ -1470,9 +1461,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1470 dev->stats.rx_fifo_errors++; 1461 dev->stats.rx_fifo_errors++;
1471 outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); 1462 outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
1472 } else if (status & IM_EPH_INT ) { 1463 } else if (status & IM_EPH_INT ) {
1473 PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); 1464 PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
1474 } else if (status & IM_ERCV_INT ) { 1465 } else if (status & IM_ERCV_INT ) {
1475 PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n")); 1466 PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
1476 outb( IM_ERCV_INT, ioaddr + INTERRUPT ); 1467 outb( IM_ERCV_INT, ioaddr + INTERRUPT );
1477 } 1468 }
1478 } while ( timeout -- ); 1469 } while ( timeout -- );
@@ -1482,7 +1473,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1482 SMC_SELECT_BANK( 2 ); 1473 SMC_SELECT_BANK( 2 );
1483 outb( mask, ioaddr + INT_MASK ); 1474 outb( mask, ioaddr + INT_MASK );
1484 1475
1485 PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask )); 1476 PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
1486 outw( saved_pointer, ioaddr + POINTER ); 1477 outw( saved_pointer, ioaddr + POINTER );
1487 1478
1488 SMC_SELECT_BANK( saved_bank ); 1479 SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d51d58..10cf0cbc2185 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1285 smc_phy_interrupt(dev); 1285 smc_phy_interrupt(dev);
1286 } else if (status & IM_ERCV_INT) { 1286 } else if (status & IM_ERCV_INT) {
1287 SMC_ACK_INT(lp, IM_ERCV_INT); 1287 SMC_ACK_INT(lp, IM_ERCV_INT);
1288 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name); 1288 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
1289 } 1289 }
1290 } while (--timeout); 1290 } while (--timeout);
1291 1291
@@ -1360,7 +1360,7 @@ static void smc_timeout(struct net_device *dev)
1360 schedule_work(&lp->phy_configure); 1360 schedule_work(&lp->phy_configure);
1361 1361
1362 /* We can accept TX packets again */ 1362 /* We can accept TX packets again */
1363 dev->trans_start = jiffies; 1363 dev->trans_start = jiffies; /* prevent tx timeout */
1364 netif_wake_queue(dev); 1364 netif_wake_queue(dev);
1365} 1365}
1366 1366
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1412 * within that register. 1412 * within that register.
1413 */ 1413 */
1414 else if (!netdev_mc_empty(dev)) { 1414 else if (!netdev_mc_empty(dev)) {
1415 struct dev_mc_list *cur_addr; 1415 struct netdev_hw_addr *ha;
1416 1416
1417 /* table for flipping the order of 3 bits */ 1417 /* table for flipping the order of 3 bits */
1418 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; 1418 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
1420 /* start with a table of all zeros: reject all */ 1420 /* start with a table of all zeros: reject all */
1421 memset(multicast_table, 0, sizeof(multicast_table)); 1421 memset(multicast_table, 0, sizeof(multicast_table));
1422 1422
1423 netdev_for_each_mc_addr(cur_addr, dev) { 1423 netdev_for_each_mc_addr(ha, dev) {
1424 int position; 1424 int position;
1425 1425
1426 /* make sure this is a multicast address - 1426 /* make sure this is a multicast address -
1427 shouldn't this be a given if we have it here ? */ 1427 shouldn't this be a given if we have it here ? */
1428 if (!(*cur_addr->dmi_addr & 1)) 1428 if (!(*ha->addr & 1))
1429 continue; 1429 continue;
1430 1430
1431 /* only use the low order bits */ 1431 /* only use the low order bits */
1432 position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; 1432 position = crc32_le(~0, ha->addr, 6) & 0x3f;
1433 1433
1434 /* do some messy swapping to put the bit in the right spot */ 1434 /* do some messy swapping to put the bit in the right spot */
1435 multicast_table[invert3[position&7]] |= 1435 multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cbf520d38eac..cc559741b0fa 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -736,7 +736,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
736 SMSC_TRACE(HW, "configuring for carrier OK"); 736 SMSC_TRACE(HW, "configuring for carrier OK");
737 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) && 737 if ((pdata->gpio_orig_setting & GPIO_CFG_LED1_EN_) &&
738 (!pdata->using_extphy)) { 738 (!pdata->using_extphy)) {
739 /* Restore orginal GPIO configuration */ 739 /* Restore original GPIO configuration */
740 pdata->gpio_setting = pdata->gpio_orig_setting; 740 pdata->gpio_setting = pdata->gpio_orig_setting;
741 smsc911x_reg_write(pdata, GPIO_CFG, 741 smsc911x_reg_write(pdata, GPIO_CFG,
742 pdata->gpio_setting); 742 pdata->gpio_setting);
@@ -750,7 +750,7 @@ static void smsc911x_phy_adjust_link(struct net_device *dev)
750 if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) && 750 if ((pdata->gpio_setting & GPIO_CFG_LED1_EN_) &&
751 (!pdata->using_extphy)) { 751 (!pdata->using_extphy)) {
752 /* Force 10/100 LED off, after saving 752 /* Force 10/100 LED off, after saving
753 * orginal GPIO configuration */ 753 * original GPIO configuration */
754 pdata->gpio_orig_setting = pdata->gpio_setting; 754 pdata->gpio_orig_setting = pdata->gpio_setting;
755 755
756 pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_; 756 pdata->gpio_setting &= ~GPIO_CFG_LED1_EN_;
@@ -1335,7 +1335,6 @@ static int smsc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
1335 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz); 1335 smsc911x_tx_writefifo(pdata, (unsigned int *)bufp, wrsz);
1336 freespace -= (skb->len + 32); 1336 freespace -= (skb->len + 32);
1337 dev_kfree_skb(skb); 1337 dev_kfree_skb(skb);
1338 dev->trans_start = jiffies;
1339 1338
1340 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30)) 1339 if (unlikely(smsc911x_tx_get_txstatcount(pdata) >= 30))
1341 smsc911x_tx_update_txcounters(dev); 1340 smsc911x_tx_update_txcounters(dev);
@@ -1382,13 +1381,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1382 /* Enabling specific multicast addresses */ 1381 /* Enabling specific multicast addresses */
1383 unsigned int hash_high = 0; 1382 unsigned int hash_high = 0;
1384 unsigned int hash_low = 0; 1383 unsigned int hash_low = 0;
1385 struct dev_mc_list *mc_list; 1384 struct netdev_hw_addr *ha;
1386 1385
1387 pdata->set_bits_mask = MAC_CR_HPFILT_; 1386 pdata->set_bits_mask = MAC_CR_HPFILT_;
1388 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); 1387 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
1389 1388
1390 netdev_for_each_mc_addr(mc_list, dev) { 1389 netdev_for_each_mc_addr(ha, dev) {
1391 unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr); 1390 unsigned int bitnum = smsc911x_hash(ha->addr);
1392 unsigned int mask = 0x01 << (bitnum & 0x1F); 1391 unsigned int mask = 0x01 << (bitnum & 0x1F);
1393 1392
1394 if (bitnum & 0x20) 1393 if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf45748..6cdee6a15f9f 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1034,8 +1034,6 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
1034 smsc9420_reg_write(pd, TX_POLL_DEMAND, 1); 1034 smsc9420_reg_write(pd, TX_POLL_DEMAND, 1);
1035 smsc9420_pci_flush_write(pd); 1035 smsc9420_pci_flush_write(pd);
1036 1036
1037 dev->trans_start = jiffies;
1038
1039 return NETDEV_TX_OK; 1037 return NETDEV_TX_OK;
1040} 1038}
1041 1039
@@ -1064,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
1064 mac_cr |= MAC_CR_MCPAS_; 1062 mac_cr |= MAC_CR_MCPAS_;
1065 mac_cr &= (~MAC_CR_HPFILT_); 1063 mac_cr &= (~MAC_CR_HPFILT_);
1066 } else if (!netdev_mc_empty(dev)) { 1064 } else if (!netdev_mc_empty(dev)) {
1067 struct dev_mc_list *mc_list; 1065 struct netdev_hw_addr *ha;
1068 u32 hash_lo = 0, hash_hi = 0; 1066 u32 hash_lo = 0, hash_hi = 0;
1069 1067
1070 smsc_dbg(HW, "Multicast filter enabled"); 1068 smsc_dbg(HW, "Multicast filter enabled");
1071 netdev_for_each_mc_addr(mc_list, dev) { 1069 netdev_for_each_mc_addr(ha, dev) {
1072 u32 bit_num = smsc9420_hash(mc_list->dmi_addr); 1070 u32 bit_num = smsc9420_hash(ha->addr);
1073 u32 mask = 1 << (bit_num & 0x1F); 1071 u32 mask = 1 << (bit_num & 0x1F);
1074 1072
1075 if (bit_num & 0x20) 1073 if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251075e5..26e25d7f5829 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
174 /* Try to restart the adaptor. */ 174 /* Try to restart the adaptor. */
175 sonic_init(dev); 175 sonic_init(dev);
176 lp->stats.tx_errors++; 176 lp->stats.tx_errors++;
177 dev->trans_start = jiffies; 177 dev->trans_start = jiffies; /* prevent tx timeout */
178 netif_wake_queue(dev); 178 netif_wake_queue(dev);
179} 179}
180 180
@@ -263,8 +263,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
263 263
264 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); 264 SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
265 265
266 dev->trans_start = jiffies;
267
268 return NETDEV_TX_OK; 266 return NETDEV_TX_OK;
269} 267}
270 268
@@ -531,7 +529,7 @@ static void sonic_multicast_list(struct net_device *dev)
531{ 529{
532 struct sonic_local *lp = netdev_priv(dev); 530 struct sonic_local *lp = netdev_priv(dev);
533 unsigned int rcr; 531 unsigned int rcr;
534 struct dev_mc_list *dmi; 532 struct netdev_hw_addr *ha;
535 unsigned char *addr; 533 unsigned char *addr;
536 int i; 534 int i;
537 535
@@ -550,8 +548,8 @@ static void sonic_multicast_list(struct net_device *dev)
550 netdev_mc_count(dev)); 548 netdev_mc_count(dev));
551 sonic_set_cam_enable(dev, 1); /* always enable our own address */ 549 sonic_set_cam_enable(dev, 1); /* always enable our own address */
552 i = 1; 550 i = 1;
553 netdev_for_each_mc_addr(dmi, dev) { 551 netdev_for_each_mc_addr(ha, dev) {
554 addr = dmi->dmi_addr; 552 addr = ha->addr;
555 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); 553 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
556 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); 554 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
557 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); 555 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f2d21f..1636a34d95dd 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
625static void 625static void
626spider_net_set_multi(struct net_device *netdev) 626spider_net_set_multi(struct net_device *netdev)
627{ 627{
628 struct dev_mc_list *mc; 628 struct netdev_hw_addr *ha;
629 u8 hash; 629 u8 hash;
630 int i; 630 int i;
631 u32 reg; 631 u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ 646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
647 set_bit(0xfd, bitmask); 647 set_bit(0xfd, bitmask);
648 648
649 netdev_for_each_mc_addr(mc, netdev) { 649 netdev_for_each_mc_addr(ha, netdev) {
650 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr); 650 hash = spider_net_get_multicast_hash(netdev, ha->addr);
651 set_bit(hash, bitmask); 651 set_bit(hash, bitmask);
652 } 652 }
653 653
@@ -2095,8 +2095,6 @@ static void spider_net_link_phy(unsigned long data)
2095 card->netdev->name, phy->speed, 2095 card->netdev->name, phy->speed,
2096 phy->duplex == 1 ? "Full" : "Half", 2096 phy->duplex == 1 ? "Full" : "Half",
2097 phy->autoneg == 1 ? "" : "no "); 2097 phy->autoneg == 1 ? "" : "no ");
2098
2099 return;
2100} 2098}
2101 2099
2102/** 2100/**
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa69899019..74b7ae76906e 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1173,7 +1173,7 @@ static void tx_timeout(struct net_device *dev)
1173 1173
1174 /* Trigger an immediate transmit demand. */ 1174 /* Trigger an immediate transmit demand. */
1175 1175
1176 dev->trans_start = jiffies; 1176 dev->trans_start = jiffies; /* prevent tx timeout */
1177 np->stats.tx_errors++; 1177 np->stats.tx_errors++;
1178 netif_wake_queue(dev); 1178 netif_wake_queue(dev);
1179} 1179}
@@ -1221,8 +1221,6 @@ static void init_ring(struct net_device *dev)
1221 1221
1222 for (i = 0; i < TX_RING_SIZE; i++) 1222 for (i = 0; i < TX_RING_SIZE; i++)
1223 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i])); 1223 memset(&np->tx_info[i], 0, sizeof(np->tx_info[i]));
1224
1225 return;
1226} 1224}
1227 1225
1228 1226
@@ -1312,8 +1310,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1312 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE) 1310 if ((np->cur_tx - np->dirty_tx) + 4 > TX_RING_SIZE)
1313 netif_stop_queue(dev); 1311 netif_stop_queue(dev);
1314 1312
1315 dev->trans_start = jiffies;
1316
1317 return NETDEV_TX_OK; 1313 return NETDEV_TX_OK;
1318} 1314}
1319 1315
@@ -1766,7 +1762,7 @@ static void set_rx_mode(struct net_device *dev)
1766 struct netdev_private *np = netdev_priv(dev); 1762 struct netdev_private *np = netdev_priv(dev);
1767 void __iomem *ioaddr = np->base; 1763 void __iomem *ioaddr = np->base;
1768 u32 rx_mode = MinVLANPrio; 1764 u32 rx_mode = MinVLANPrio;
1769 struct dev_mc_list *mclist; 1765 struct netdev_hw_addr *ha;
1770 int i; 1766 int i;
1771#ifdef VLAN_SUPPORT 1767#ifdef VLAN_SUPPORT
1772 1768
@@ -1804,8 +1800,8 @@ static void set_rx_mode(struct net_device *dev)
1804 /* Use the 16 element perfect filter, skip first two entries. */ 1800 /* Use the 16 element perfect filter, skip first two entries. */
1805 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; 1801 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1806 __be16 *eaddrs; 1802 __be16 *eaddrs;
1807 netdev_for_each_mc_addr(mclist, dev) { 1803 netdev_for_each_mc_addr(ha, dev) {
1808 eaddrs = (__be16 *)mclist->dmi_addr; 1804 eaddrs = (__be16 *) ha->addr;
1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; 1805 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1806 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1811 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; 1807 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1821,10 @@ static void set_rx_mode(struct net_device *dev)
1825 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ 1821 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1826 1822
1827 memset(mc_filter, 0, sizeof(mc_filter)); 1823 memset(mc_filter, 0, sizeof(mc_filter));
1828 netdev_for_each_mc_addr(mclist, dev) { 1824 netdev_for_each_mc_addr(ha, dev) {
1829 /* The chip uses the upper 9 CRC bits 1825 /* The chip uses the upper 9 CRC bits
1830 as index into the hash table */ 1826 as index into the hash table */
1831 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; 1827 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1832 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; 1828 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1833 1829
1834 *fptr |= cpu_to_le32(1 << (bit_nr & 31)); 1830 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af15fe1a..9691733ddb8e 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o 2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
5 dwmac100.o $(stmmac-y) 5 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172e986a..144f76fd3e39 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include "descs.h"
26#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
27#define STMMAC_VLAN_TAG_USED
28#include <linux/if_vlan.h>
29#endif
30
31#include "descs.h"
32
33#undef CHIP_DEBUG_PRINT
34/* Turn-on extra printk debug for MAC core, dma and descriptors */
35/* #define CHIP_DEBUG_PRINT */
36
37#ifdef CHIP_DEBUG_PRINT
38#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
39#else
40#define CHIP_DBG(fmt, args...) do { } while (0)
41#endif
42
43#undef FRAME_FILTER_DEBUG
44/* #define FRAME_FILTER_DEBUG */
27 45
28struct stmmac_extra_stats { 46struct stmmac_extra_stats {
29 /* Transmit errors */ 47 /* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
231 unsigned int high, unsigned int low); 249 unsigned int high, unsigned int low);
232extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 250extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
233 unsigned int high, unsigned int low); 251 unsigned int high, unsigned int low);
252extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca614fc1..000000000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/crc32.h>
30#include <linux/mii.h>
31#include <linux/phy.h>
32#include <linux/slab.h>
33
34#include "common.h"
35#include "dwmac100.h"
36#include "dwmac_dma.h"
37
38#undef DWMAC100_DEBUG
39/*#define DWMAC100_DEBUG*/
40#ifdef DWMAC100_DEBUG
41#define DBG(fmt, args...) printk(fmt, ## args)
42#else
43#define DBG(fmt, args...) do { } while (0)
44#endif
45
46static void dwmac100_core_init(unsigned long ioaddr)
47{
48 u32 value = readl(ioaddr + MAC_CONTROL);
49
50 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
51
52#ifdef STMMAC_VLAN_TAG_USED
53 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
54#endif
55 return;
56}
57
58static void dwmac100_dump_mac_regs(unsigned long ioaddr)
59{
60 pr_info("\t----------------------------------------------\n"
61 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
62 "\t----------------------------------------------\n",
63 (unsigned int)ioaddr);
64 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
65 readl(ioaddr + MAC_CONTROL));
66 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
67 readl(ioaddr + MAC_ADDR_HIGH));
68 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
69 readl(ioaddr + MAC_ADDR_LOW));
70 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
71 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
72 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
73 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
74 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
75 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
76 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
77 readl(ioaddr + MAC_VLAN1));
78 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
79 readl(ioaddr + MAC_VLAN2));
80 pr_info("\n\tMAC management counter registers\n");
81 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
82 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
83 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
84 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
85 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
86 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
87 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
88 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
89 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
90 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
91 return;
92}
93
94static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
95 u32 dma_rx)
96{
97 u32 value = readl(ioaddr + DMA_BUS_MODE);
98 /* DMA SW reset */
99 value |= DMA_BUS_MODE_SFT_RESET;
100 writel(value, ioaddr + DMA_BUS_MODE);
101 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
102
103 /* Enable Application Access by writing to DMA CSR0 */
104 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
105 ioaddr + DMA_BUS_MODE);
106
107 /* Mask interrupts by writing to CSR7 */
108 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
109
110 /* The base address of the RX/TX descriptor lists must be written into
111 * DMA CSR3 and CSR4, respectively. */
112 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
113 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
114
115 return 0;
116}
117
118/* Store and Forward capability is not used at all..
119 * The transmit threshold can be programmed by
120 * setting the TTC bits in the DMA control register.*/
121static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
122 int rxmode)
123{
124 u32 csr6 = readl(ioaddr + DMA_CONTROL);
125
126 if (txmode <= 32)
127 csr6 |= DMA_CONTROL_TTC_32;
128 else if (txmode <= 64)
129 csr6 |= DMA_CONTROL_TTC_64;
130 else
131 csr6 |= DMA_CONTROL_TTC_128;
132
133 writel(csr6, ioaddr + DMA_CONTROL);
134
135 return;
136}
137
138static void dwmac100_dump_dma_regs(unsigned long ioaddr)
139{
140 int i;
141
142 DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
143 for (i = 0; i < 9; i++)
144 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
145 (DMA_BUS_MODE + i * 4),
146 readl(ioaddr + DMA_BUS_MODE + i * 4));
147 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
148 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
149 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
150 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
151 return;
152}
153
154/* DMA controller has two counters to track the number of
155 * the receive missed frames. */
156static void dwmac100_dma_diagnostic_fr(void *data,
157 struct stmmac_extra_stats *x,
158 unsigned long ioaddr)
159{
160 struct net_device_stats *stats = (struct net_device_stats *)data;
161 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
162
163 if (unlikely(csr8)) {
164 if (csr8 & DMA_MISSED_FRAME_OVE) {
165 stats->rx_over_errors += 0x800;
166 x->rx_overflow_cntr += 0x800;
167 } else {
168 unsigned int ove_cntr;
169 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
170 stats->rx_over_errors += ove_cntr;
171 x->rx_overflow_cntr += ove_cntr;
172 }
173
174 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
175 stats->rx_missed_errors += 0xffff;
176 x->rx_missed_cntr += 0xffff;
177 } else {
178 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
179 stats->rx_missed_errors += miss_f;
180 x->rx_missed_cntr += miss_f;
181 }
182 }
183 return;
184}
185
186static int dwmac100_get_tx_frame_status(void *data,
187 struct stmmac_extra_stats *x,
188 struct dma_desc *p, unsigned long ioaddr)
189{
190 int ret = 0;
191 struct net_device_stats *stats = (struct net_device_stats *)data;
192
193 if (unlikely(p->des01.tx.error_summary)) {
194 if (unlikely(p->des01.tx.underflow_error)) {
195 x->tx_underflow++;
196 stats->tx_fifo_errors++;
197 }
198 if (unlikely(p->des01.tx.no_carrier)) {
199 x->tx_carrier++;
200 stats->tx_carrier_errors++;
201 }
202 if (unlikely(p->des01.tx.loss_carrier)) {
203 x->tx_losscarrier++;
204 stats->tx_carrier_errors++;
205 }
206 if (unlikely((p->des01.tx.excessive_deferral) ||
207 (p->des01.tx.excessive_collisions) ||
208 (p->des01.tx.late_collision)))
209 stats->collisions += p->des01.tx.collision_count;
210 ret = -1;
211 }
212 if (unlikely(p->des01.tx.heartbeat_fail)) {
213 x->tx_heartbeat++;
214 stats->tx_heartbeat_errors++;
215 ret = -1;
216 }
217 if (unlikely(p->des01.tx.deferred))
218 x->tx_deferred++;
219
220 return ret;
221}
222
223static int dwmac100_get_tx_len(struct dma_desc *p)
224{
225 return p->des01.tx.buffer1_size;
226}
227
228/* This function verifies if each incoming frame has some errors
229 * and, if required, updates the multicast statistics.
230 * In case of success, it returns csum_none becasue the device
231 * is not able to compute the csum in HW. */
232static int dwmac100_get_rx_frame_status(void *data,
233 struct stmmac_extra_stats *x,
234 struct dma_desc *p)
235{
236 int ret = csum_none;
237 struct net_device_stats *stats = (struct net_device_stats *)data;
238
239 if (unlikely(p->des01.rx.last_descriptor == 0)) {
240 pr_warning("dwmac100 Error: Oversized Ethernet "
241 "frame spanned multiple buffers\n");
242 stats->rx_length_errors++;
243 return discard_frame;
244 }
245
246 if (unlikely(p->des01.rx.error_summary)) {
247 if (unlikely(p->des01.rx.descriptor_error))
248 x->rx_desc++;
249 if (unlikely(p->des01.rx.partial_frame_error))
250 x->rx_partial++;
251 if (unlikely(p->des01.rx.run_frame))
252 x->rx_runt++;
253 if (unlikely(p->des01.rx.frame_too_long))
254 x->rx_toolong++;
255 if (unlikely(p->des01.rx.collision)) {
256 x->rx_collision++;
257 stats->collisions++;
258 }
259 if (unlikely(p->des01.rx.crc_error)) {
260 x->rx_crc++;
261 stats->rx_crc_errors++;
262 }
263 ret = discard_frame;
264 }
265 if (unlikely(p->des01.rx.dribbling))
266 ret = discard_frame;
267
268 if (unlikely(p->des01.rx.length_error)) {
269 x->rx_length++;
270 ret = discard_frame;
271 }
272 if (unlikely(p->des01.rx.mii_error)) {
273 x->rx_mii++;
274 ret = discard_frame;
275 }
276 if (p->des01.rx.multicast_frame) {
277 x->rx_multicast++;
278 stats->multicast++;
279 }
280 return ret;
281}
282
283static void dwmac100_irq_status(unsigned long ioaddr)
284{
285 return;
286}
287
288static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
289 unsigned int reg_n)
290{
291 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
292}
293
294static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
295 unsigned int reg_n)
296{
297 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
298}
299
300static void dwmac100_set_filter(struct net_device *dev)
301{
302 unsigned long ioaddr = dev->base_addr;
303 u32 value = readl(ioaddr + MAC_CONTROL);
304
305 if (dev->flags & IFF_PROMISC) {
306 value |= MAC_CONTROL_PR;
307 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
308 MAC_CONTROL_HP);
309 } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
310 || (dev->flags & IFF_ALLMULTI)) {
311 value |= MAC_CONTROL_PM;
312 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
313 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
314 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
315 } else if (netdev_mc_empty(dev)) { /* no multicast */
316 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
317 MAC_CONTROL_HO | MAC_CONTROL_HP);
318 } else {
319 u32 mc_filter[2];
320 struct dev_mc_list *mclist;
321
322 /* Perfect filter mode for physical address and Hash
323 filter for multicast */
324 value |= MAC_CONTROL_HP;
325 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
326 MAC_CONTROL_IF | MAC_CONTROL_HO);
327
328 memset(mc_filter, 0, sizeof(mc_filter));
329 netdev_for_each_mc_addr(mclist, dev) {
330 /* The upper 6 bits of the calculated CRC are used to
331 * index the contens of the hash table */
332 int bit_nr =
333 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
334 /* The most significant bit determines the register to
335 * use (H/L) while the other 5 bits determine the bit
336 * within the register. */
337 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
338 }
339 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
340 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
341 }
342
343 writel(value, ioaddr + MAC_CONTROL);
344
345 DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
346 "HI 0x%08x, LO 0x%08x\n",
347 __func__, readl(ioaddr + MAC_CONTROL),
348 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
349 return;
350}
351
352static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
353 unsigned int fc, unsigned int pause_time)
354{
355 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
356
357 if (duplex)
358 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
359 writel(flow, ioaddr + MAC_FLOW_CTRL);
360
361 return;
362}
363
364/* No PMT module supported for this Ethernet Controller.
365 * Tested on ST platforms only.
366 */
367static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
368{
369 return;
370}
371
372static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
373 int disable_rx_ic)
374{
375 int i;
376 for (i = 0; i < ring_size; i++) {
377 p->des01.rx.own = 1;
378 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
379 if (i == ring_size - 1)
380 p->des01.rx.end_ring = 1;
381 if (disable_rx_ic)
382 p->des01.rx.disable_ic = 1;
383 p++;
384 }
385 return;
386}
387
388static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
389{
390 int i;
391 for (i = 0; i < ring_size; i++) {
392 p->des01.tx.own = 0;
393 if (i == ring_size - 1)
394 p->des01.tx.end_ring = 1;
395 p++;
396 }
397 return;
398}
399
400static int dwmac100_get_tx_owner(struct dma_desc *p)
401{
402 return p->des01.tx.own;
403}
404
405static int dwmac100_get_rx_owner(struct dma_desc *p)
406{
407 return p->des01.rx.own;
408}
409
410static void dwmac100_set_tx_owner(struct dma_desc *p)
411{
412 p->des01.tx.own = 1;
413}
414
415static void dwmac100_set_rx_owner(struct dma_desc *p)
416{
417 p->des01.rx.own = 1;
418}
419
420static int dwmac100_get_tx_ls(struct dma_desc *p)
421{
422 return p->des01.tx.last_segment;
423}
424
425static void dwmac100_release_tx_desc(struct dma_desc *p)
426{
427 int ter = p->des01.tx.end_ring;
428
429 /* clean field used within the xmit */
430 p->des01.tx.first_segment = 0;
431 p->des01.tx.last_segment = 0;
432 p->des01.tx.buffer1_size = 0;
433
434 /* clean status reported */
435 p->des01.tx.error_summary = 0;
436 p->des01.tx.underflow_error = 0;
437 p->des01.tx.no_carrier = 0;
438 p->des01.tx.loss_carrier = 0;
439 p->des01.tx.excessive_deferral = 0;
440 p->des01.tx.excessive_collisions = 0;
441 p->des01.tx.late_collision = 0;
442 p->des01.tx.heartbeat_fail = 0;
443 p->des01.tx.deferred = 0;
444
445 /* set termination field */
446 p->des01.tx.end_ring = ter;
447
448 return;
449}
450
451static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
452 int csum_flag)
453{
454 p->des01.tx.first_segment = is_fs;
455 p->des01.tx.buffer1_size = len;
456}
457
458static void dwmac100_clear_tx_ic(struct dma_desc *p)
459{
460 p->des01.tx.interrupt = 0;
461}
462
463static void dwmac100_close_tx_desc(struct dma_desc *p)
464{
465 p->des01.tx.last_segment = 1;
466 p->des01.tx.interrupt = 1;
467}
468
469static int dwmac100_get_rx_frame_len(struct dma_desc *p)
470{
471 return p->des01.rx.frame_length;
472}
473
474struct stmmac_ops dwmac100_ops = {
475 .core_init = dwmac100_core_init,
476 .dump_regs = dwmac100_dump_mac_regs,
477 .host_irq_status = dwmac100_irq_status,
478 .set_filter = dwmac100_set_filter,
479 .flow_ctrl = dwmac100_flow_ctrl,
480 .pmt = dwmac100_pmt,
481 .set_umac_addr = dwmac100_set_umac_addr,
482 .get_umac_addr = dwmac100_get_umac_addr,
483};
484
485struct stmmac_dma_ops dwmac100_dma_ops = {
486 .init = dwmac100_dma_init,
487 .dump_regs = dwmac100_dump_dma_regs,
488 .dma_mode = dwmac100_dma_operation_mode,
489 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
490 .enable_dma_transmission = dwmac_enable_dma_transmission,
491 .enable_dma_irq = dwmac_enable_dma_irq,
492 .disable_dma_irq = dwmac_disable_dma_irq,
493 .start_tx = dwmac_dma_start_tx,
494 .stop_tx = dwmac_dma_stop_tx,
495 .start_rx = dwmac_dma_start_rx,
496 .stop_rx = dwmac_dma_stop_rx,
497 .dma_interrupt = dwmac_dma_interrupt,
498};
499
500struct stmmac_desc_ops dwmac100_desc_ops = {
501 .tx_status = dwmac100_get_tx_frame_status,
502 .rx_status = dwmac100_get_rx_frame_status,
503 .get_tx_len = dwmac100_get_tx_len,
504 .init_rx_desc = dwmac100_init_rx_desc,
505 .init_tx_desc = dwmac100_init_tx_desc,
506 .get_tx_owner = dwmac100_get_tx_owner,
507 .get_rx_owner = dwmac100_get_rx_owner,
508 .release_tx_desc = dwmac100_release_tx_desc,
509 .prepare_tx_desc = dwmac100_prepare_tx_desc,
510 .clear_tx_ic = dwmac100_clear_tx_ic,
511 .close_tx_desc = dwmac100_close_tx_desc,
512 .get_tx_ls = dwmac100_get_tx_ls,
513 .set_tx_owner = dwmac100_set_tx_owner,
514 .set_rx_owner = dwmac100_set_rx_owner,
515 .get_rx_frame_len = dwmac100_get_rx_frame_len,
516};
517
518struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
519{
520 struct mac_device_info *mac;
521
522 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
523
524 pr_info("\tDWMAC100\n");
525
526 mac->mac = &dwmac100_ops;
527 mac->desc = &dwmac100_desc_ops;
528 mac->dma = &dwmac100_dma_ops;
529
530 mac->pmt = PMT_NOT_SUPPORTED;
531 mac->link.port = MAC_CONTROL_PS;
532 mac->link.duplex = MAC_CONTROL_F;
533 mac->link.speed = 0;
534 mac->mii.addr = MAC_MII_ADDR;
535 mac->mii.data = MAC_MII_DATA;
536
537 return mac;
538}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..97956cbf1cb4 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/phy.h>
26#include "common.h"
27
25/*---------------------------------------------------------------------------- 28/*----------------------------------------------------------------------------
26 * MAC BLOCK defines 29 * MAC BLOCK defines
27 *---------------------------------------------------------------------------*/ 30 *---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
114#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ 117#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
115#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ 118#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
116#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120
121extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e384e7..d8d0f3553770 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
172 deac_full_minus_4 = 0x00401800, 172 deac_full_minus_4 = 0x00401800,
173}; 173};
174#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ 174#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
175#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
176 175
177enum ttc_control { 176enum ttc_control {
178 DMA_CONTROL_TTC_64 = 0x00000000, 177 DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
206#define GMAC_MMC_TX_INTR 0x108 205#define GMAC_MMC_TX_INTR 0x108
207#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 206#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
208 207
209#undef DWMAC1000_DEBUG
210/* #define DWMAC1000__DEBUG */
211#undef FRAME_FILTER_DEBUG
212/* #define FRAME_FILTER_DEBUG */
213#ifdef DWMAC1000__DEBUG
214#define DBG(fmt, args...) printk(fmt, ## args)
215#else
216#define DBG(fmt, args...) do { } while (0)
217#endif
218
219extern struct stmmac_dma_ops dwmac1000_dma_ops; 208extern struct stmmac_dma_ops dwmac1000_dma_ops;
220extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95ebfe498..917b4e16923b 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -48,7 +48,6 @@ static void dwmac1000_core_init(unsigned long ioaddr)
48 /* Tag detection without filtering */ 48 /* Tag detection without filtering */
49 writel(0x0, ioaddr + GMAC_VLAN_TAG); 49 writel(0x0, ioaddr + GMAC_VLAN_TAG);
50#endif 50#endif
51 return;
52} 51}
53 52
54static void dwmac1000_dump_regs(unsigned long ioaddr) 53static void dwmac1000_dump_regs(unsigned long ioaddr)
@@ -61,7 +60,6 @@ static void dwmac1000_dump_regs(unsigned long ioaddr)
61 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i, 60 pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
62 offset, readl(ioaddr + offset)); 61 offset, readl(ioaddr + offset));
63 } 62 }
64 return;
65} 63}
66 64
67static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr, 65static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -83,8 +81,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
83 unsigned long ioaddr = dev->base_addr; 81 unsigned long ioaddr = dev->base_addr;
84 unsigned int value = 0; 82 unsigned int value = 0;
85 83
86 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 84 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
87 __func__, netdev_mc_count(dev), netdev_uc_count(dev)); 85 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
88 86
89 if (dev->flags & IFF_PROMISC) 87 if (dev->flags & IFF_PROMISC)
90 value = GMAC_FRAME_FILTER_PR; 88 value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +93,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
95 writel(0xffffffff, ioaddr + GMAC_HASH_LOW); 93 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
96 } else if (!netdev_mc_empty(dev)) { 94 } else if (!netdev_mc_empty(dev)) {
97 u32 mc_filter[2]; 95 u32 mc_filter[2];
98 struct dev_mc_list *mclist; 96 struct netdev_hw_addr *ha;
99 97
100 /* Hash filter for multicast */ 98 /* Hash filter for multicast */
101 value = GMAC_FRAME_FILTER_HMC; 99 value = GMAC_FRAME_FILTER_HMC;
102 100
103 memset(mc_filter, 0, sizeof(mc_filter)); 101 memset(mc_filter, 0, sizeof(mc_filter));
104 netdev_for_each_mc_addr(mclist, dev) { 102 netdev_for_each_mc_addr(ha, dev) {
105 /* The upper 6 bits of the calculated CRC are used to 103 /* The upper 6 bits of the calculated CRC are used to
106 index the contens of the hash table */ 104 index the contens of the hash table */
107 int bit_nr = 105 int bit_nr =
108 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; 106 bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
109 /* The most significant bit determines the register to 107 /* The most significant bit determines the register to
110 * use (H/L) while the other 5 bits determine the bit 108 * use (H/L) while the other 5 bits determine the bit
111 * within the register. */ 109 * within the register. */
@@ -136,11 +134,9 @@ static void dwmac1000_set_filter(struct net_device *dev)
136#endif 134#endif
137 writel(value, ioaddr + GMAC_FRAME_FILTER); 135 writel(value, ioaddr + GMAC_FRAME_FILTER);
138 136
139 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " 137 CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
140 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), 138 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
141 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 139 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
142
143 return;
144} 140}
145 141
146static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex, 142static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
@@ -148,23 +144,22 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
148{ 144{
149 unsigned int flow = 0; 145 unsigned int flow = 0;
150 146
151 DBG(KERN_DEBUG "GMAC Flow-Control:\n"); 147 CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
152 if (fc & FLOW_RX) { 148 if (fc & FLOW_RX) {
153 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); 149 CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
154 flow |= GMAC_FLOW_CTRL_RFE; 150 flow |= GMAC_FLOW_CTRL_RFE;
155 } 151 }
156 if (fc & FLOW_TX) { 152 if (fc & FLOW_TX) {
157 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); 153 CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
158 flow |= GMAC_FLOW_CTRL_TFE; 154 flow |= GMAC_FLOW_CTRL_TFE;
159 } 155 }
160 156
161 if (duplex) { 157 if (duplex) {
162 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); 158 CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
163 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); 159 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
164 } 160 }
165 161
166 writel(flow, ioaddr + GMAC_FLOW_CTRL); 162 writel(flow, ioaddr + GMAC_FLOW_CTRL);
167 return;
168} 163}
169 164
170static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode) 165static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
@@ -172,15 +167,14 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
172 unsigned int pmt = 0; 167 unsigned int pmt = 0;
173 168
174 if (mode == WAKE_MAGIC) { 169 if (mode == WAKE_MAGIC) {
175 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); 170 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
176 pmt |= power_down | magic_pkt_en; 171 pmt |= power_down | magic_pkt_en;
177 } else if (mode == WAKE_UCAST) { 172 } else if (mode == WAKE_UCAST) {
178 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); 173 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
179 pmt |= global_unicast; 174 pmt |= global_unicast;
180 } 175 }
181 176
182 writel(pmt, ioaddr + GMAC_PMT); 177 writel(pmt, ioaddr + GMAC_PMT);
183 return;
184} 178}
185 179
186 180
@@ -190,22 +184,20 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
190 184
191 /* Not used events (e.g. MMC interrupts) are not handled. */ 185 /* Not used events (e.g. MMC interrupts) are not handled. */
192 if ((intr_status & mmc_tx_irq)) 186 if ((intr_status & mmc_tx_irq))
193 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", 187 CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
194 readl(ioaddr + GMAC_MMC_TX_INTR)); 188 readl(ioaddr + GMAC_MMC_TX_INTR));
195 if (unlikely(intr_status & mmc_rx_irq)) 189 if (unlikely(intr_status & mmc_rx_irq))
196 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", 190 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
197 readl(ioaddr + GMAC_MMC_RX_INTR)); 191 readl(ioaddr + GMAC_MMC_RX_INTR));
198 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 192 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
199 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", 193 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
200 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 194 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
201 if (unlikely(intr_status & pmt_irq)) { 195 if (unlikely(intr_status & pmt_irq)) {
202 DBG(KERN_DEBUG "GMAC: received Magic frame\n"); 196 CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
203 /* clear the PMT bits 5 and 6 by reading the PMT 197 /* clear the PMT bits 5 and 6 by reading the PMT
204 * status register. */ 198 * status register. */
205 readl(ioaddr + GMAC_PMT); 199 readl(ioaddr + GMAC_PMT);
206 } 200 }
207
208 return;
209} 201}
210 202
211struct stmmac_ops dwmac1000_ops = { 203struct stmmac_ops dwmac1000_ops = {
@@ -230,7 +222,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
230 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 222 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
231 223
232 mac->mac = &dwmac1000_ops; 224 mac->mac = &dwmac1000_ops;
233 mac->desc = &dwmac1000_desc_ops;
234 mac->dma = &dwmac1000_dma_ops; 225 mac->dma = &dwmac1000_dma_ops;
235 226
236 mac->pmt = PMT_SUPPORTED; 227 mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a2da68..415805057cb0 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for 3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code. 4 developing this code.
5 5
6 This contains the functions to handle the dma and descriptors. 6 This contains the functions to handle the dma.
7 7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd 8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9 9
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58 return 0; 58 return 0;
59} 59}
60 60
61/* Transmit FIFO flush operation */
62static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
65 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
66
67 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
68}
69
70static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, 61static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
71 int rxmode) 62 int rxmode)
72{ 63{
73 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
74 65
75 if (txmode == SF_DMA_MODE) { 66 if (txmode == SF_DMA_MODE) {
76 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); 67 CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
77 /* Transmit COE type 2 cannot be done in cut-through mode. */ 68 /* Transmit COE type 2 cannot be done in cut-through mode. */
78 csr6 |= DMA_CONTROL_TSF; 69 csr6 |= DMA_CONTROL_TSF;
79 /* Operating on second frame increase the performance 70 /* Operating on second frame increase the performance
80 * especially when transmit store-and-forward is used.*/ 71 * especially when transmit store-and-forward is used.*/
81 csr6 |= DMA_CONTROL_OSF; 72 csr6 |= DMA_CONTROL_OSF;
82 } else { 73 } else {
83 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" 74 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
84 " (threshold = %d)\n", txmode); 75 " (threshold = %d)\n", txmode);
85 csr6 &= ~DMA_CONTROL_TSF; 76 csr6 &= ~DMA_CONTROL_TSF;
86 csr6 &= DMA_CONTROL_TC_TX_MASK; 77 csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
98 } 89 }
99 90
100 if (rxmode == SF_DMA_MODE) { 91 if (rxmode == SF_DMA_MODE) {
101 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); 92 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
102 csr6 |= DMA_CONTROL_RSF; 93 csr6 |= DMA_CONTROL_RSF;
103 } else { 94 } else {
104 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" 95 CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
105 " (threshold = %d)\n", rxmode); 96 " (threshold = %d)\n", rxmode);
106 csr6 &= ~DMA_CONTROL_RSF; 97 csr6 &= ~DMA_CONTROL_RSF;
107 csr6 &= DMA_CONTROL_TC_RX_MASK; 98 csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -116,7 +107,6 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
116 } 107 }
117 108
118 writel(csr6, ioaddr + DMA_CONTROL); 109 writel(csr6, ioaddr + DMA_CONTROL);
119 return;
120} 110}
121 111
122/* Not yet implemented --- no RMON module */ 112/* Not yet implemented --- no RMON module */
@@ -138,306 +128,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
138 readl(ioaddr + DMA_BUS_MODE + offset)); 128 readl(ioaddr + DMA_BUS_MODE + offset));
139 } 129 }
140 } 130 }
141 return;
142}
143
144static int dwmac1000_get_tx_frame_status(void *data,
145 struct stmmac_extra_stats *x,
146 struct dma_desc *p, unsigned long ioaddr)
147{
148 int ret = 0;
149 struct net_device_stats *stats = (struct net_device_stats *)data;
150
151 if (unlikely(p->des01.etx.error_summary)) {
152 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
153 if (unlikely(p->des01.etx.jabber_timeout)) {
154 DBG(KERN_ERR "\tjabber_timeout error\n");
155 x->tx_jabber++;
156 }
157
158 if (unlikely(p->des01.etx.frame_flushed)) {
159 DBG(KERN_ERR "\tframe_flushed error\n");
160 x->tx_frame_flushed++;
161 dwmac1000_flush_tx_fifo(ioaddr);
162 }
163
164 if (unlikely(p->des01.etx.loss_carrier)) {
165 DBG(KERN_ERR "\tloss_carrier error\n");
166 x->tx_losscarrier++;
167 stats->tx_carrier_errors++;
168 }
169 if (unlikely(p->des01.etx.no_carrier)) {
170 DBG(KERN_ERR "\tno_carrier error\n");
171 x->tx_carrier++;
172 stats->tx_carrier_errors++;
173 }
174 if (unlikely(p->des01.etx.late_collision)) {
175 DBG(KERN_ERR "\tlate_collision error\n");
176 stats->collisions += p->des01.etx.collision_count;
177 }
178 if (unlikely(p->des01.etx.excessive_collisions)) {
179 DBG(KERN_ERR "\texcessive_collisions\n");
180 stats->collisions += p->des01.etx.collision_count;
181 }
182 if (unlikely(p->des01.etx.excessive_deferral)) {
183 DBG(KERN_INFO "\texcessive tx_deferral\n");
184 x->tx_deferred++;
185 }
186
187 if (unlikely(p->des01.etx.underflow_error)) {
188 DBG(KERN_ERR "\tunderflow error\n");
189 dwmac1000_flush_tx_fifo(ioaddr);
190 x->tx_underflow++;
191 }
192
193 if (unlikely(p->des01.etx.ip_header_error)) {
194 DBG(KERN_ERR "\tTX IP header csum error\n");
195 x->tx_ip_header_error++;
196 }
197
198 if (unlikely(p->des01.etx.payload_error)) {
199 DBG(KERN_ERR "\tAddr/Payload csum error\n");
200 x->tx_payload_error++;
201 dwmac1000_flush_tx_fifo(ioaddr);
202 }
203
204 ret = -1;
205 }
206
207 if (unlikely(p->des01.etx.deferred)) {
208 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
209 x->tx_deferred++;
210 }
211#ifdef STMMAC_VLAN_TAG_USED
212 if (p->des01.etx.vlan_frame) {
213 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
214 x->tx_vlan++;
215 }
216#endif
217
218 return ret;
219}
220
221static int dwmac1000_get_tx_len(struct dma_desc *p)
222{
223 return p->des01.etx.buffer1_size;
224}
225
226static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
227{
228 int ret = good_frame;
229 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
230
231 /* bits 5 7 0 | Frame status
232 * ----------------------------------------------------------
233 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
234 * 1 0 0 | IPv4/6 No CSUM errorS.
235 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
236 * 1 1 0 | IPv4/6 CSUM IP HR error
237 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
238 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
239 * 0 1 1 | COE bypassed.. no IPv4/6 frame
240 * 0 1 0 | Reserved.
241 */
242 if (status == 0x0) {
243 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
244 ret = good_frame;
245 } else if (status == 0x4) {
246 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
247 ret = good_frame;
248 } else if (status == 0x5) {
249 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
250 ret = csum_none;
251 } else if (status == 0x6) {
252 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
253 ret = csum_none;
254 } else if (status == 0x7) {
255 DBG(KERN_ERR
256 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
257 ret = csum_none;
258 } else if (status == 0x1) {
259 DBG(KERN_ERR
260 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
261 ret = discard_frame;
262 } else if (status == 0x3) {
263 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
264 ret = discard_frame;
265 }
266 return ret;
267}
268
269static int dwmac1000_get_rx_frame_status(void *data,
270 struct stmmac_extra_stats *x, struct dma_desc *p)
271{
272 int ret = good_frame;
273 struct net_device_stats *stats = (struct net_device_stats *)data;
274
275 if (unlikely(p->des01.erx.error_summary)) {
276 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
277 if (unlikely(p->des01.erx.descriptor_error)) {
278 DBG(KERN_ERR "\tdescriptor error\n");
279 x->rx_desc++;
280 stats->rx_length_errors++;
281 }
282 if (unlikely(p->des01.erx.overflow_error)) {
283 DBG(KERN_ERR "\toverflow error\n");
284 x->rx_gmac_overflow++;
285 }
286
287 if (unlikely(p->des01.erx.ipc_csum_error))
288 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
289
290 if (unlikely(p->des01.erx.late_collision)) {
291 DBG(KERN_ERR "\tlate_collision error\n");
292 stats->collisions++;
293 stats->collisions++;
294 }
295 if (unlikely(p->des01.erx.receive_watchdog)) {
296 DBG(KERN_ERR "\treceive_watchdog error\n");
297 x->rx_watchdog++;
298 }
299 if (unlikely(p->des01.erx.error_gmii)) {
300 DBG(KERN_ERR "\tReceive Error\n");
301 x->rx_mii++;
302 }
303 if (unlikely(p->des01.erx.crc_error)) {
304 DBG(KERN_ERR "\tCRC error\n");
305 x->rx_crc++;
306 stats->rx_crc_errors++;
307 }
308 ret = discard_frame;
309 }
310
311 /* After a payload csum error, the ES bit is set.
312 * It doesn't match with the information reported into the databook.
313 * At any rate, we need to understand if the CSUM hw computation is ok
314 * and report this info to the upper layers. */
315 ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
316 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
317
318 if (unlikely(p->des01.erx.dribbling)) {
319 DBG(KERN_ERR "GMAC RX: dribbling error\n");
320 ret = discard_frame;
321 }
322 if (unlikely(p->des01.erx.sa_filter_fail)) {
323 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
324 x->sa_rx_filter_fail++;
325 ret = discard_frame;
326 }
327 if (unlikely(p->des01.erx.da_filter_fail)) {
328 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
329 x->da_rx_filter_fail++;
330 ret = discard_frame;
331 }
332 if (unlikely(p->des01.erx.length_error)) {
333 DBG(KERN_ERR "GMAC RX: length_error error\n");
334 x->rx_length++;
335 ret = discard_frame;
336 }
337#ifdef STMMAC_VLAN_TAG_USED
338 if (p->des01.erx.vlan_tag) {
339 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
340 x->rx_vlan++;
341 }
342#endif
343 return ret;
344}
345
346static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
347 int disable_rx_ic)
348{
349 int i;
350 for (i = 0; i < ring_size; i++) {
351 p->des01.erx.own = 1;
352 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
353 /* To support jumbo frames */
354 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
355 if (i == ring_size - 1)
356 p->des01.erx.end_ring = 1;
357 if (disable_rx_ic)
358 p->des01.erx.disable_ic = 1;
359 p++;
360 }
361 return;
362}
363
364static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
365{
366 int i;
367
368 for (i = 0; i < ring_size; i++) {
369 p->des01.etx.own = 0;
370 if (i == ring_size - 1)
371 p->des01.etx.end_ring = 1;
372 p++;
373 }
374
375 return;
376}
377
378static int dwmac1000_get_tx_owner(struct dma_desc *p)
379{
380 return p->des01.etx.own;
381}
382
383static int dwmac1000_get_rx_owner(struct dma_desc *p)
384{
385 return p->des01.erx.own;
386}
387
388static void dwmac1000_set_tx_owner(struct dma_desc *p)
389{
390 p->des01.etx.own = 1;
391}
392
393static void dwmac1000_set_rx_owner(struct dma_desc *p)
394{
395 p->des01.erx.own = 1;
396}
397
398static int dwmac1000_get_tx_ls(struct dma_desc *p)
399{
400 return p->des01.etx.last_segment;
401}
402
403static void dwmac1000_release_tx_desc(struct dma_desc *p)
404{
405 int ter = p->des01.etx.end_ring;
406
407 memset(p, 0, sizeof(struct dma_desc));
408 p->des01.etx.end_ring = ter;
409
410 return;
411}
412
413static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
414 int csum_flag)
415{
416 p->des01.etx.first_segment = is_fs;
417 if (unlikely(len > BUF_SIZE_4KiB)) {
418 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
419 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
420 } else {
421 p->des01.etx.buffer1_size = len;
422 }
423 if (likely(csum_flag))
424 p->des01.etx.checksum_insertion = cic_full;
425}
426
427static void dwmac1000_clear_tx_ic(struct dma_desc *p)
428{
429 p->des01.etx.interrupt = 0;
430}
431
432static void dwmac1000_close_tx_desc(struct dma_desc *p)
433{
434 p->des01.etx.last_segment = 1;
435 p->des01.etx.interrupt = 1;
436}
437
438static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
439{
440 return p->des01.erx.frame_length;
441} 131}
442 132
443struct stmmac_dma_ops dwmac1000_dma_ops = { 133struct stmmac_dma_ops dwmac1000_dma_ops = {
@@ -454,21 +144,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
454 .stop_rx = dwmac_dma_stop_rx, 144 .stop_rx = dwmac_dma_stop_rx,
455 .dma_interrupt = dwmac_dma_interrupt, 145 .dma_interrupt = dwmac_dma_interrupt,
456}; 146};
457
458struct stmmac_desc_ops dwmac1000_desc_ops = {
459 .tx_status = dwmac1000_get_tx_frame_status,
460 .rx_status = dwmac1000_get_rx_frame_status,
461 .get_tx_len = dwmac1000_get_tx_len,
462 .init_rx_desc = dwmac1000_init_rx_desc,
463 .init_tx_desc = dwmac1000_init_tx_desc,
464 .get_tx_owner = dwmac1000_get_tx_owner,
465 .get_rx_owner = dwmac1000_get_rx_owner,
466 .release_tx_desc = dwmac1000_release_tx_desc,
467 .prepare_tx_desc = dwmac1000_prepare_tx_desc,
468 .clear_tx_ic = dwmac1000_clear_tx_ic,
469 .close_tx_desc = dwmac1000_close_tx_desc,
470 .get_tx_ls = dwmac1000_get_tx_ls,
471 .set_tx_owner = dwmac1000_set_tx_owner,
472 .set_rx_owner = dwmac1000_set_rx_owner,
473 .get_rx_frame_len = dwmac1000_get_rx_frame_len,
474};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 000000000000..6f270a0e151a
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,196 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 This only implements the mac core functions for this chip.
9
10 Copyright (C) 2007-2009 STMicroelectronics Ltd
11
12 This program is free software; you can redistribute it and/or modify it
13 under the terms and conditions of the GNU General Public License,
14 version 2, as published by the Free Software Foundation.
15
16 This program is distributed in the hope it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24
25 The full GNU General Public License is included in this distribution in
26 the file called "COPYING".
27
28 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
29*******************************************************************************/
30
31#include <linux/crc32.h>
32#include "dwmac100.h"
33
34static void dwmac100_core_init(unsigned long ioaddr)
35{
36 u32 value = readl(ioaddr + MAC_CONTROL);
37
38 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
39
40#ifdef STMMAC_VLAN_TAG_USED
41 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
42#endif
43}
44
45static void dwmac100_dump_mac_regs(unsigned long ioaddr)
46{
47 pr_info("\t----------------------------------------------\n"
48 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
49 "\t----------------------------------------------\n",
50 (unsigned int)ioaddr);
51 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
52 readl(ioaddr + MAC_CONTROL));
53 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
54 readl(ioaddr + MAC_ADDR_HIGH));
55 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
56 readl(ioaddr + MAC_ADDR_LOW));
57 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
58 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
59 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
60 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
61 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
62 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
63 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
64 readl(ioaddr + MAC_VLAN1));
65 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
66 readl(ioaddr + MAC_VLAN2));
67 pr_info("\n\tMAC management counter registers\n");
68 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
69 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
70 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
71 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
72 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
73 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
74 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
75 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
76 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
77 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
78}
79
80static void dwmac100_irq_status(unsigned long ioaddr)
81{
82 return;
83}
84
85static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
86 unsigned int reg_n)
87{
88 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
89}
90
91static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
92 unsigned int reg_n)
93{
94 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
95}
96
97static void dwmac100_set_filter(struct net_device *dev)
98{
99 unsigned long ioaddr = dev->base_addr;
100 u32 value = readl(ioaddr + MAC_CONTROL);
101
102 if (dev->flags & IFF_PROMISC) {
103 value |= MAC_CONTROL_PR;
104 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
105 MAC_CONTROL_HP);
106 } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
107 || (dev->flags & IFF_ALLMULTI)) {
108 value |= MAC_CONTROL_PM;
109 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
110 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
111 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
112 } else if (netdev_mc_empty(dev)) { /* no multicast */
113 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
114 MAC_CONTROL_HO | MAC_CONTROL_HP);
115 } else {
116 u32 mc_filter[2];
117 struct netdev_hw_addr *ha;
118
119 /* Perfect filter mode for physical address and Hash
120 filter for multicast */
121 value |= MAC_CONTROL_HP;
122 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
123 MAC_CONTROL_IF | MAC_CONTROL_HO);
124
125 memset(mc_filter, 0, sizeof(mc_filter));
126 netdev_for_each_mc_addr(ha, dev) {
127 /* The upper 6 bits of the calculated CRC are used to
128 * index the contens of the hash table */
129 int bit_nr =
130 ether_crc(ETH_ALEN, ha->addr) >> 26;
131 /* The most significant bit determines the register to
132 * use (H/L) while the other 5 bits determine the bit
133 * within the register. */
134 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
135 }
136 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
137 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
138 }
139
140 writel(value, ioaddr + MAC_CONTROL);
141
142 CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
143 "HI 0x%08x, LO 0x%08x\n",
144 __func__, readl(ioaddr + MAC_CONTROL),
145 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
146}
147
148static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
149 unsigned int fc, unsigned int pause_time)
150{
151 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
152
153 if (duplex)
154 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
155 writel(flow, ioaddr + MAC_FLOW_CTRL);
156}
157
158/* No PMT module supported for this Ethernet Controller.
159 * Tested on ST platforms only.
160 */
161static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
162{
163 return;
164}
165
166struct stmmac_ops dwmac100_ops = {
167 .core_init = dwmac100_core_init,
168 .dump_regs = dwmac100_dump_mac_regs,
169 .host_irq_status = dwmac100_irq_status,
170 .set_filter = dwmac100_set_filter,
171 .flow_ctrl = dwmac100_flow_ctrl,
172 .pmt = dwmac100_pmt,
173 .set_umac_addr = dwmac100_set_umac_addr,
174 .get_umac_addr = dwmac100_get_umac_addr,
175};
176
177struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
178{
179 struct mac_device_info *mac;
180
181 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
182
183 pr_info("\tDWMAC100\n");
184
185 mac->mac = &dwmac100_ops;
186 mac->dma = &dwmac100_dma_ops;
187
188 mac->pmt = PMT_NOT_SUPPORTED;
189 mac->link.port = MAC_CONTROL_PS;
190 mac->link.duplex = MAC_CONTROL_F;
191 mac->link.speed = 0;
192 mac->mii.addr = MAC_MII_ADDR;
193 mac->mii.data = MAC_MII_DATA;
194
195 return mac;
196}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000000..2fece7b72727
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,134 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 This contains the functions to handle the dma.
9
10 Copyright (C) 2007-2009 STMicroelectronics Ltd
11
12 This program is free software; you can redistribute it and/or modify it
13 under the terms and conditions of the GNU General Public License,
14 version 2, as published by the Free Software Foundation.
15
16 This program is distributed in the hope it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24
25 The full GNU General Public License is included in this distribution in
26 the file called "COPYING".
27
28 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
29*******************************************************************************/
30
31#include "dwmac100.h"
32#include "dwmac_dma.h"
33
34static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
35 u32 dma_rx)
36{
37 u32 value = readl(ioaddr + DMA_BUS_MODE);
38 /* DMA SW reset */
39 value |= DMA_BUS_MODE_SFT_RESET;
40 writel(value, ioaddr + DMA_BUS_MODE);
41 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
42
43 /* Enable Application Access by writing to DMA CSR0 */
44 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
45 ioaddr + DMA_BUS_MODE);
46
47 /* Mask interrupts by writing to CSR7 */
48 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
49
50 /* The base address of the RX/TX descriptor lists must be written into
51 * DMA CSR3 and CSR4, respectively. */
52 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
53 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
54
55 return 0;
56}
57
58/* Store and Forward capability is not used at all..
59 * The transmit threshold can be programmed by
60 * setting the TTC bits in the DMA control register.*/
61static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
62 int rxmode)
63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
65
66 if (txmode <= 32)
67 csr6 |= DMA_CONTROL_TTC_32;
68 else if (txmode <= 64)
69 csr6 |= DMA_CONTROL_TTC_64;
70 else
71 csr6 |= DMA_CONTROL_TTC_128;
72
73 writel(csr6, ioaddr + DMA_CONTROL);
74}
75
76static void dwmac100_dump_dma_regs(unsigned long ioaddr)
77{
78 int i;
79
80 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
81 for (i = 0; i < 9; i++)
82 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
83 (DMA_BUS_MODE + i * 4),
84 readl(ioaddr + DMA_BUS_MODE + i * 4));
85 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
86 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
87 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
88 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
89}
90
91/* DMA controller has two counters to track the number of
92 * the receive missed frames. */
93static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
94 unsigned long ioaddr)
95{
96 struct net_device_stats *stats = (struct net_device_stats *)data;
97 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
98
99 if (unlikely(csr8)) {
100 if (csr8 & DMA_MISSED_FRAME_OVE) {
101 stats->rx_over_errors += 0x800;
102 x->rx_overflow_cntr += 0x800;
103 } else {
104 unsigned int ove_cntr;
105 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
106 stats->rx_over_errors += ove_cntr;
107 x->rx_overflow_cntr += ove_cntr;
108 }
109
110 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
111 stats->rx_missed_errors += 0xffff;
112 x->rx_missed_cntr += 0xffff;
113 } else {
114 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
115 stats->rx_missed_errors += miss_f;
116 x->rx_missed_cntr += miss_f;
117 }
118 }
119}
120
121struct stmmac_dma_ops dwmac100_dma_ops = {
122 .init = dwmac100_dma_init,
123 .dump_regs = dwmac100_dump_dma_regs,
124 .dma_mode = dwmac100_dma_operation_mode,
125 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
126 .enable_dma_transmission = dwmac_enable_dma_transmission,
127 .enable_dma_irq = dwmac_enable_dma_irq,
128 .disable_dma_irq = dwmac_disable_dma_irq,
129 .start_tx = dwmac_dma_start_tx,
130 .stop_tx = dwmac_dma_stop_tx,
131 .start_rx = dwmac_dma_start_rx,
132 .stop_rx = dwmac_dma_stop_rx,
133 .dma_interrupt = dwmac_dma_interrupt,
134};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9f6060..7b815a1b7b8c 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ 95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
98 99
99extern void dwmac_enable_dma_transmission(unsigned long ioaddr); 100extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
100extern void dwmac_enable_dma_irq(unsigned long ioaddr); 101extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1eaa447..a85415216ef4 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -52,7 +52,6 @@ void dwmac_dma_start_tx(unsigned long ioaddr)
52 u32 value = readl(ioaddr + DMA_CONTROL); 52 u32 value = readl(ioaddr + DMA_CONTROL);
53 value |= DMA_CONTROL_ST; 53 value |= DMA_CONTROL_ST;
54 writel(value, ioaddr + DMA_CONTROL); 54 writel(value, ioaddr + DMA_CONTROL);
55 return;
56} 55}
57 56
58void dwmac_dma_stop_tx(unsigned long ioaddr) 57void dwmac_dma_stop_tx(unsigned long ioaddr)
@@ -60,7 +59,6 @@ void dwmac_dma_stop_tx(unsigned long ioaddr)
60 u32 value = readl(ioaddr + DMA_CONTROL); 59 u32 value = readl(ioaddr + DMA_CONTROL);
61 value &= ~DMA_CONTROL_ST; 60 value &= ~DMA_CONTROL_ST;
62 writel(value, ioaddr + DMA_CONTROL); 61 writel(value, ioaddr + DMA_CONTROL);
63 return;
64} 62}
65 63
66void dwmac_dma_start_rx(unsigned long ioaddr) 64void dwmac_dma_start_rx(unsigned long ioaddr)
@@ -68,8 +66,6 @@ void dwmac_dma_start_rx(unsigned long ioaddr)
68 u32 value = readl(ioaddr + DMA_CONTROL); 66 u32 value = readl(ioaddr + DMA_CONTROL);
69 value |= DMA_CONTROL_SR; 67 value |= DMA_CONTROL_SR;
70 writel(value, ioaddr + DMA_CONTROL); 68 writel(value, ioaddr + DMA_CONTROL);
71
72 return;
73} 69}
74 70
75void dwmac_dma_stop_rx(unsigned long ioaddr) 71void dwmac_dma_stop_rx(unsigned long ioaddr)
@@ -77,8 +73,6 @@ void dwmac_dma_stop_rx(unsigned long ioaddr)
77 u32 value = readl(ioaddr + DMA_CONTROL); 73 u32 value = readl(ioaddr + DMA_CONTROL);
78 value &= ~DMA_CONTROL_SR; 74 value &= ~DMA_CONTROL_SR;
79 writel(value, ioaddr + DMA_CONTROL); 75 writel(value, ioaddr + DMA_CONTROL);
80
81 return;
82} 76}
83 77
84#ifdef DWMAC_DMA_DEBUG 78#ifdef DWMAC_DMA_DEBUG
@@ -111,7 +105,6 @@ static void show_tx_process_state(unsigned int status)
111 default: 105 default:
112 break; 106 break;
113 } 107 }
114 return;
115} 108}
116 109
117static void show_rx_process_state(unsigned int status) 110static void show_rx_process_state(unsigned int status)
@@ -149,7 +142,6 @@ static void show_rx_process_state(unsigned int status)
149 default: 142 default:
150 break; 143 break;
151 } 144 }
152 return;
153} 145}
154#endif 146#endif
155 147
@@ -227,6 +219,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
227 return ret; 219 return ret;
228} 220}
229 221
222void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
223{
224 u32 csr6 = readl(ioaddr + DMA_CONTROL);
225 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
226
227 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
228}
230 229
231void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 230void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
232 unsigned int high, unsigned int low) 231 unsigned int high, unsigned int low)
@@ -237,8 +236,6 @@ void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
237 writel(data, ioaddr + high); 236 writel(data, ioaddr + high);
238 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 237 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
239 writel(data, ioaddr + low); 238 writel(data, ioaddr + low);
240
241 return;
242} 239}
243 240
244void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 241void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
@@ -257,7 +254,5 @@ void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
257 addr[3] = (lo_addr >> 24) & 0xff; 254 addr[3] = (lo_addr >> 24) & 0xff;
258 addr[4] = hi_addr & 0xff; 255 addr[4] = hi_addr & 0xff;
259 addr[5] = (hi_addr >> 8) & 0xff; 256 addr[5] = (hi_addr >> 8) & 0xff;
260
261 return;
262} 257}
263 258
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 000000000000..3c18ebece043
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,337 @@
1/*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "common.h"
26
27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr)
29{
30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data;
32
33 if (unlikely(p->des01.etx.error_summary)) {
34 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
35 if (unlikely(p->des01.etx.jabber_timeout)) {
36 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
37 x->tx_jabber++;
38 }
39
40 if (unlikely(p->des01.etx.frame_flushed)) {
41 CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
42 x->tx_frame_flushed++;
43 dwmac_dma_flush_tx_fifo(ioaddr);
44 }
45
46 if (unlikely(p->des01.etx.loss_carrier)) {
47 CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
48 x->tx_losscarrier++;
49 stats->tx_carrier_errors++;
50 }
51 if (unlikely(p->des01.etx.no_carrier)) {
52 CHIP_DBG(KERN_ERR "\tno_carrier error\n");
53 x->tx_carrier++;
54 stats->tx_carrier_errors++;
55 }
56 if (unlikely(p->des01.etx.late_collision)) {
57 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
58 stats->collisions += p->des01.etx.collision_count;
59 }
60 if (unlikely(p->des01.etx.excessive_collisions)) {
61 CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
62 stats->collisions += p->des01.etx.collision_count;
63 }
64 if (unlikely(p->des01.etx.excessive_deferral)) {
65 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
66 x->tx_deferred++;
67 }
68
69 if (unlikely(p->des01.etx.underflow_error)) {
70 CHIP_DBG(KERN_ERR "\tunderflow error\n");
71 dwmac_dma_flush_tx_fifo(ioaddr);
72 x->tx_underflow++;
73 }
74
75 if (unlikely(p->des01.etx.ip_header_error)) {
76 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
77 x->tx_ip_header_error++;
78 }
79
80 if (unlikely(p->des01.etx.payload_error)) {
81 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
82 x->tx_payload_error++;
83 dwmac_dma_flush_tx_fifo(ioaddr);
84 }
85
86 ret = -1;
87 }
88
89 if (unlikely(p->des01.etx.deferred)) {
90 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
91 x->tx_deferred++;
92 }
93#ifdef STMMAC_VLAN_TAG_USED
94 if (p->des01.etx.vlan_frame) {
95 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
96 x->tx_vlan++;
97 }
98#endif
99
100 return ret;
101}
102
103static int enh_desc_get_tx_len(struct dma_desc *p)
104{
105 return p->des01.etx.buffer1_size;
106}
107
108static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
109{
110 int ret = good_frame;
111 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
112
113 /* bits 5 7 0 | Frame status
114 * ----------------------------------------------------------
115 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
116 * 1 0 0 | IPv4/6 No CSUM errorS.
117 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
118 * 1 1 0 | IPv4/6 CSUM IP HR error
119 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
120 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
121 * 0 1 1 | COE bypassed.. no IPv4/6 frame
122 * 0 1 0 | Reserved.
123 */
124 if (status == 0x0) {
125 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
126 ret = good_frame;
127 } else if (status == 0x4) {
128 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
129 ret = good_frame;
130 } else if (status == 0x5) {
131 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
132 ret = csum_none;
133 } else if (status == 0x6) {
134 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
135 ret = csum_none;
136 } else if (status == 0x7) {
137 CHIP_DBG(KERN_ERR
138 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
139 ret = csum_none;
140 } else if (status == 0x1) {
141 CHIP_DBG(KERN_ERR
142 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
143 ret = discard_frame;
144 } else if (status == 0x3) {
145 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
146 ret = discard_frame;
147 }
148 return ret;
149}
150
151static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
152 struct dma_desc *p)
153{
154 int ret = good_frame;
155 struct net_device_stats *stats = (struct net_device_stats *)data;
156
157 if (unlikely(p->des01.erx.error_summary)) {
158 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
159 p->des01.erx);
160 if (unlikely(p->des01.erx.descriptor_error)) {
161 CHIP_DBG(KERN_ERR "\tdescriptor error\n");
162 x->rx_desc++;
163 stats->rx_length_errors++;
164 }
165 if (unlikely(p->des01.erx.overflow_error)) {
166 CHIP_DBG(KERN_ERR "\toverflow error\n");
167 x->rx_gmac_overflow++;
168 }
169
170 if (unlikely(p->des01.erx.ipc_csum_error))
171 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
172
173 if (unlikely(p->des01.erx.late_collision)) {
174 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
175 stats->collisions++;
176 stats->collisions++;
177 }
178 if (unlikely(p->des01.erx.receive_watchdog)) {
179 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
180 x->rx_watchdog++;
181 }
182 if (unlikely(p->des01.erx.error_gmii)) {
183 CHIP_DBG(KERN_ERR "\tReceive Error\n");
184 x->rx_mii++;
185 }
186 if (unlikely(p->des01.erx.crc_error)) {
187 CHIP_DBG(KERN_ERR "\tCRC error\n");
188 x->rx_crc++;
189 stats->rx_crc_errors++;
190 }
191 ret = discard_frame;
192 }
193
194 /* After a payload csum error, the ES bit is set.
195 * It doesn't match with the information reported into the databook.
196 * At any rate, we need to understand if the CSUM hw computation is ok
197 * and report this info to the upper layers. */
198 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
199 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
200
201 if (unlikely(p->des01.erx.dribbling)) {
202 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
203 ret = discard_frame;
204 }
205 if (unlikely(p->des01.erx.sa_filter_fail)) {
206 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
207 x->sa_rx_filter_fail++;
208 ret = discard_frame;
209 }
210 if (unlikely(p->des01.erx.da_filter_fail)) {
211 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
212 x->da_rx_filter_fail++;
213 ret = discard_frame;
214 }
215 if (unlikely(p->des01.erx.length_error)) {
216 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
217 x->rx_length++;
218 ret = discard_frame;
219 }
220#ifdef STMMAC_VLAN_TAG_USED
221 if (p->des01.erx.vlan_tag) {
222 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
223 x->rx_vlan++;
224 }
225#endif
226 return ret;
227}
228
229static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
230 int disable_rx_ic)
231{
232 int i;
233 for (i = 0; i < ring_size; i++) {
234 p->des01.erx.own = 1;
235 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236 /* To support jumbo frames */
237 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
238 if (i == ring_size - 1)
239 p->des01.erx.end_ring = 1;
240 if (disable_rx_ic)
241 p->des01.erx.disable_ic = 1;
242 p++;
243 }
244}
245
246static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
247{
248 int i;
249
250 for (i = 0; i < ring_size; i++) {
251 p->des01.etx.own = 0;
252 if (i == ring_size - 1)
253 p->des01.etx.end_ring = 1;
254 p++;
255 }
256}
257
258static int enh_desc_get_tx_owner(struct dma_desc *p)
259{
260 return p->des01.etx.own;
261}
262
263static int enh_desc_get_rx_owner(struct dma_desc *p)
264{
265 return p->des01.erx.own;
266}
267
268static void enh_desc_set_tx_owner(struct dma_desc *p)
269{
270 p->des01.etx.own = 1;
271}
272
273static void enh_desc_set_rx_owner(struct dma_desc *p)
274{
275 p->des01.erx.own = 1;
276}
277
278static int enh_desc_get_tx_ls(struct dma_desc *p)
279{
280 return p->des01.etx.last_segment;
281}
282
283static void enh_desc_release_tx_desc(struct dma_desc *p)
284{
285 int ter = p->des01.etx.end_ring;
286
287 memset(p, 0, sizeof(struct dma_desc));
288 p->des01.etx.end_ring = ter;
289}
290
291static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
292 int csum_flag)
293{
294 p->des01.etx.first_segment = is_fs;
295 if (unlikely(len > BUF_SIZE_4KiB)) {
296 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
297 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
298 } else {
299 p->des01.etx.buffer1_size = len;
300 }
301 if (likely(csum_flag))
302 p->des01.etx.checksum_insertion = cic_full;
303}
304
305static void enh_desc_clear_tx_ic(struct dma_desc *p)
306{
307 p->des01.etx.interrupt = 0;
308}
309
310static void enh_desc_close_tx_desc(struct dma_desc *p)
311{
312 p->des01.etx.last_segment = 1;
313 p->des01.etx.interrupt = 1;
314}
315
316static int enh_desc_get_rx_frame_len(struct dma_desc *p)
317{
318 return p->des01.erx.frame_length;
319}
320
321struct stmmac_desc_ops enh_desc_ops = {
322 .tx_status = enh_desc_get_tx_status,
323 .rx_status = enh_desc_get_rx_status,
324 .get_tx_len = enh_desc_get_tx_len,
325 .init_rx_desc = enh_desc_init_rx_desc,
326 .init_tx_desc = enh_desc_init_tx_desc,
327 .get_tx_owner = enh_desc_get_tx_owner,
328 .get_rx_owner = enh_desc_get_rx_owner,
329 .release_tx_desc = enh_desc_release_tx_desc,
330 .prepare_tx_desc = enh_desc_prepare_tx_desc,
331 .clear_tx_ic = enh_desc_clear_tx_ic,
332 .close_tx_desc = enh_desc_close_tx_desc,
333 .get_tx_ls = enh_desc_get_tx_ls,
334 .set_tx_owner = enh_desc_set_tx_owner,
335 .set_rx_owner = enh_desc_set_rx_owner,
336 .get_rx_frame_len = enh_desc_get_rx_frame_len,
337};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 000000000000..31ad53643792
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,236 @@
1/*******************************************************************************
2 This contains the functions to handle the normal descriptors.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "common.h"
26
27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr)
29{
30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data;
32
33 if (unlikely(p->des01.tx.error_summary)) {
34 if (unlikely(p->des01.tx.underflow_error)) {
35 x->tx_underflow++;
36 stats->tx_fifo_errors++;
37 }
38 if (unlikely(p->des01.tx.no_carrier)) {
39 x->tx_carrier++;
40 stats->tx_carrier_errors++;
41 }
42 if (unlikely(p->des01.tx.loss_carrier)) {
43 x->tx_losscarrier++;
44 stats->tx_carrier_errors++;
45 }
46 if (unlikely((p->des01.tx.excessive_deferral) ||
47 (p->des01.tx.excessive_collisions) ||
48 (p->des01.tx.late_collision)))
49 stats->collisions += p->des01.tx.collision_count;
50 ret = -1;
51 }
52 if (unlikely(p->des01.tx.heartbeat_fail)) {
53 x->tx_heartbeat++;
54 stats->tx_heartbeat_errors++;
55 ret = -1;
56 }
57 if (unlikely(p->des01.tx.deferred))
58 x->tx_deferred++;
59
60 return ret;
61}
62
63static int ndesc_get_tx_len(struct dma_desc *p)
64{
65 return p->des01.tx.buffer1_size;
66}
67
68/* This function verifies if each incoming frame has some errors
69 * and, if required, updates the multicast statistics.
70 * In case of success, it returns csum_none becasue the device
71 * is not able to compute the csum in HW. */
72static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
73 struct dma_desc *p)
74{
75 int ret = csum_none;
76 struct net_device_stats *stats = (struct net_device_stats *)data;
77
78 if (unlikely(p->des01.rx.last_descriptor == 0)) {
79 pr_warning("ndesc Error: Oversized Ethernet "
80 "frame spanned multiple buffers\n");
81 stats->rx_length_errors++;
82 return discard_frame;
83 }
84
85 if (unlikely(p->des01.rx.error_summary)) {
86 if (unlikely(p->des01.rx.descriptor_error))
87 x->rx_desc++;
88 if (unlikely(p->des01.rx.partial_frame_error))
89 x->rx_partial++;
90 if (unlikely(p->des01.rx.run_frame))
91 x->rx_runt++;
92 if (unlikely(p->des01.rx.frame_too_long))
93 x->rx_toolong++;
94 if (unlikely(p->des01.rx.collision)) {
95 x->rx_collision++;
96 stats->collisions++;
97 }
98 if (unlikely(p->des01.rx.crc_error)) {
99 x->rx_crc++;
100 stats->rx_crc_errors++;
101 }
102 ret = discard_frame;
103 }
104 if (unlikely(p->des01.rx.dribbling))
105 ret = discard_frame;
106
107 if (unlikely(p->des01.rx.length_error)) {
108 x->rx_length++;
109 ret = discard_frame;
110 }
111 if (unlikely(p->des01.rx.mii_error)) {
112 x->rx_mii++;
113 ret = discard_frame;
114 }
115 if (p->des01.rx.multicast_frame) {
116 x->rx_multicast++;
117 stats->multicast++;
118 }
119 return ret;
120}
121
122static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
123 int disable_rx_ic)
124{
125 int i;
126 for (i = 0; i < ring_size; i++) {
127 p->des01.rx.own = 1;
128 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
129 if (i == ring_size - 1)
130 p->des01.rx.end_ring = 1;
131 if (disable_rx_ic)
132 p->des01.rx.disable_ic = 1;
133 p++;
134 }
135}
136
137static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
138{
139 int i;
140 for (i = 0; i < ring_size; i++) {
141 p->des01.tx.own = 0;
142 if (i == ring_size - 1)
143 p->des01.tx.end_ring = 1;
144 p++;
145 }
146}
147
148static int ndesc_get_tx_owner(struct dma_desc *p)
149{
150 return p->des01.tx.own;
151}
152
153static int ndesc_get_rx_owner(struct dma_desc *p)
154{
155 return p->des01.rx.own;
156}
157
158static void ndesc_set_tx_owner(struct dma_desc *p)
159{
160 p->des01.tx.own = 1;
161}
162
163static void ndesc_set_rx_owner(struct dma_desc *p)
164{
165 p->des01.rx.own = 1;
166}
167
168static int ndesc_get_tx_ls(struct dma_desc *p)
169{
170 return p->des01.tx.last_segment;
171}
172
173static void ndesc_release_tx_desc(struct dma_desc *p)
174{
175 int ter = p->des01.tx.end_ring;
176
177 /* clean field used within the xmit */
178 p->des01.tx.first_segment = 0;
179 p->des01.tx.last_segment = 0;
180 p->des01.tx.buffer1_size = 0;
181
182 /* clean status reported */
183 p->des01.tx.error_summary = 0;
184 p->des01.tx.underflow_error = 0;
185 p->des01.tx.no_carrier = 0;
186 p->des01.tx.loss_carrier = 0;
187 p->des01.tx.excessive_deferral = 0;
188 p->des01.tx.excessive_collisions = 0;
189 p->des01.tx.late_collision = 0;
190 p->des01.tx.heartbeat_fail = 0;
191 p->des01.tx.deferred = 0;
192
193 /* set termination field */
194 p->des01.tx.end_ring = ter;
195}
196
197static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
198 int csum_flag)
199{
200 p->des01.tx.first_segment = is_fs;
201 p->des01.tx.buffer1_size = len;
202}
203
204static void ndesc_clear_tx_ic(struct dma_desc *p)
205{
206 p->des01.tx.interrupt = 0;
207}
208
209static void ndesc_close_tx_desc(struct dma_desc *p)
210{
211 p->des01.tx.last_segment = 1;
212 p->des01.tx.interrupt = 1;
213}
214
215static int ndesc_get_rx_frame_len(struct dma_desc *p)
216{
217 return p->des01.rx.frame_length;
218}
219
220struct stmmac_desc_ops ndesc_ops = {
221 .tx_status = ndesc_get_tx_status,
222 .rx_status = ndesc_get_rx_status,
223 .get_tx_len = ndesc_get_tx_len,
224 .init_rx_desc = ndesc_init_rx_desc,
225 .init_tx_desc = ndesc_init_tx_desc,
226 .get_tx_owner = ndesc_get_tx_owner,
227 .get_rx_owner = ndesc_get_rx_owner,
228 .release_tx_desc = ndesc_release_tx_desc,
229 .prepare_tx_desc = ndesc_prepare_tx_desc,
230 .clear_tx_ic = ndesc_clear_tx_ic,
231 .close_tx_desc = ndesc_close_tx_desc,
232 .get_tx_ls = ndesc_get_tx_ls,
233 .set_tx_owner = ndesc_set_tx_owner,
234 .set_rx_owner = ndesc_set_rx_owner,
235 .get_rx_frame_len = ndesc_get_rx_frame_len,
236};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e6943cf4..ebebc644b1b8 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Jan_2010" 23#define DRV_MODULE_VERSION "Apr_2010"
24#include <linux/stmmac.h> 24#include <linux/stmmac.h>
25 25
26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
27#define STMMAC_VLAN_TAG_USED
28#include <linux/if_vlan.h>
29#endif
30
31#include "common.h" 26#include "common.h"
32#ifdef CONFIG_STMMAC_TIMER 27#ifdef CONFIG_STMMAC_TIMER
33#include "stmmac_timer.h" 28#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
93#ifdef STMMAC_VLAN_TAG_USED 88#ifdef STMMAC_VLAN_TAG_USED
94 struct vlan_group *vlgrp; 89 struct vlan_group *vlgrp;
95#endif 90#endif
91 int enh_desc;
96}; 92};
97 93
98#ifdef CONFIG_STM_DRIVERS 94#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
120extern int stmmac_mdio_unregister(struct net_device *ndev); 116extern int stmmac_mdio_unregister(struct net_device *ndev);
121extern int stmmac_mdio_register(struct net_device *ndev); 117extern int stmmac_mdio_register(struct net_device *ndev);
122extern void stmmac_set_ethtool_ops(struct net_device *netdev); 118extern void stmmac_set_ethtool_ops(struct net_device *netdev);
119extern struct stmmac_desc_ops enh_desc_ops;
120extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index c021eaa3ca69..f080509923f0 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -102,7 +102,6 @@ void stmmac_ethtool_getdrvinfo(struct net_device *dev,
102 strcpy(info->version, DRV_MODULE_VERSION); 102 strcpy(info->version, DRV_MODULE_VERSION);
103 info->fw_version[0] = '\0'; 103 info->fw_version[0] = '\0';
104 info->n_stats = STMMAC_STATS_LEN; 104 info->n_stats = STMMAC_STATS_LEN;
105 return;
106} 105}
107 106
108int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) 107int stmmac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -194,8 +193,6 @@ void stmmac_ethtool_gregs(struct net_device *dev,
194 reg_space[i + 55] = 193 reg_space[i + 55] =
195 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4))); 194 readl(dev->base_addr + (DMA_BUS_MODE + (i * 4)));
196 } 195 }
197
198 return;
199} 196}
200 197
201int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data) 198int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
@@ -233,7 +230,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
233 pause->tx_pause = 1; 230 pause->tx_pause = 1;
234 231
235 spin_unlock(&priv->lock); 232 spin_unlock(&priv->lock);
236 return;
237} 233}
238 234
239static int 235static int
@@ -292,8 +288,6 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
292 data[i] = (stmmac_gstrings_stats[i].sizeof_stat == 288 data[i] = (stmmac_gstrings_stats[i].sizeof_stat ==
293 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); 289 sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
294 } 290 }
295
296 return;
297} 291}
298 292
299static int stmmac_get_sset_count(struct net_device *netdev, int sset) 293static int stmmac_get_sset_count(struct net_device *netdev, int sset)
@@ -323,7 +317,6 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
323 WARN_ON(1); 317 WARN_ON(1);
324 break; 318 break;
325 } 319 }
326 return;
327} 320}
328 321
329/* Currently only support WOL through Magic packet. */ 322/* Currently only support WOL through Magic packet. */
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85ec80e..a31d580f306d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -169,8 +169,6 @@ static void stmmac_verify_args(void)
169 flow_ctrl = FLOW_OFF; 169 flow_ctrl = FLOW_OFF;
170 if (unlikely((pause < 0) || (pause > 0xffff))) 170 if (unlikely((pause < 0) || (pause > 0xffff)))
171 pause = PAUSE_TIME; 171 pause = PAUSE_TIME;
172
173 return;
174} 172}
175 173
176#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG) 174#if defined(STMMAC_XMIT_DEBUG) || defined(STMMAC_RX_DEBUG)
@@ -184,7 +182,6 @@ static void print_pkt(unsigned char *buf, int len)
184 pr_info(" %02x", buf[j]); 182 pr_info(" %02x", buf[j]);
185 } 183 }
186 pr_info("\n"); 184 pr_info("\n");
187 return;
188} 185}
189#endif 186#endif
190 187
@@ -514,7 +511,6 @@ static void init_dma_desc_rings(struct net_device *dev)
514 pr_info("TX descriptor ring:\n"); 511 pr_info("TX descriptor ring:\n");
515 display_ring(priv->dma_tx, txsize); 512 display_ring(priv->dma_tx, txsize);
516 } 513 }
517 return;
518} 514}
519 515
520static void dma_free_rx_skbufs(struct stmmac_priv *priv) 516static void dma_free_rx_skbufs(struct stmmac_priv *priv)
@@ -529,7 +525,6 @@ static void dma_free_rx_skbufs(struct stmmac_priv *priv)
529 } 525 }
530 priv->rx_skbuff[i] = NULL; 526 priv->rx_skbuff[i] = NULL;
531 } 527 }
532 return;
533} 528}
534 529
535static void dma_free_tx_skbufs(struct stmmac_priv *priv) 530static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -547,7 +542,6 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
547 priv->tx_skbuff[i] = NULL; 542 priv->tx_skbuff[i] = NULL;
548 } 543 }
549 } 544 }
550 return;
551} 545}
552 546
553static void free_dma_desc_resources(struct stmmac_priv *priv) 547static void free_dma_desc_resources(struct stmmac_priv *priv)
@@ -567,8 +561,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
567 kfree(priv->rx_skbuff_dma); 561 kfree(priv->rx_skbuff_dma);
568 kfree(priv->rx_skbuff); 562 kfree(priv->rx_skbuff);
569 kfree(priv->tx_skbuff); 563 kfree(priv->tx_skbuff);
570
571 return;
572} 564}
573 565
574/** 566/**
@@ -598,8 +590,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
598 } 590 }
599 } 591 }
600 tx_coe = priv->tx_coe; 592 tx_coe = priv->tx_coe;
601
602 return;
603} 593}
604 594
605/** 595/**
@@ -675,7 +665,6 @@ static void stmmac_tx(struct stmmac_priv *priv)
675 } 665 }
676 netif_tx_unlock(priv->dev); 666 netif_tx_unlock(priv->dev);
677 } 667 }
678 return;
679} 668}
680 669
681static inline void stmmac_enable_irq(struct stmmac_priv *priv) 670static inline void stmmac_enable_irq(struct stmmac_priv *priv)
@@ -731,8 +720,6 @@ void stmmac_schedule(struct net_device *dev)
731 priv->xstats.sched_timer_n++; 720 priv->xstats.sched_timer_n++;
732 721
733 _stmmac_schedule(priv); 722 _stmmac_schedule(priv);
734
735 return;
736} 723}
737 724
738static void stmmac_no_timer_started(unsigned int x) 725static void stmmac_no_timer_started(unsigned int x)
@@ -763,8 +750,6 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
763 750
764 priv->dev->stats.tx_errors++; 751 priv->dev->stats.tx_errors++;
765 netif_wake_queue(priv->dev); 752 netif_wake_queue(priv->dev);
766
767 return;
768} 753}
769 754
770 755
@@ -788,8 +773,6 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
788 stmmac_tx_err(priv); 773 stmmac_tx_err(priv);
789 } else if (unlikely(status == tx_hard_error)) 774 } else if (unlikely(status == tx_hard_error))
790 stmmac_tx_err(priv); 775 stmmac_tx_err(priv);
791
792 return;
793} 776}
794 777
795/** 778/**
@@ -837,7 +820,7 @@ static int stmmac_open(struct net_device *dev)
837#ifdef CONFIG_STMMAC_TIMER 820#ifdef CONFIG_STMMAC_TIMER
838 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 821 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
839 if (unlikely(priv->tm == NULL)) { 822 if (unlikely(priv->tm == NULL)) {
840 pr_err("%s: ERROR: timer memory alloc failed \n", __func__); 823 pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
841 return -ENOMEM; 824 return -ENOMEM;
842 } 825 }
843 priv->tm->freq = tmrate; 826 priv->tm->freq = tmrate;
@@ -1197,7 +1180,6 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1197 } 1180 }
1198 priv->hw->desc->set_rx_owner(p + entry); 1181 priv->hw->desc->set_rx_owner(p + entry);
1199 } 1182 }
1200 return;
1201} 1183}
1202 1184
1203static int stmmac_rx(struct stmmac_priv *priv, int limit) 1185static int stmmac_rx(struct stmmac_priv *priv, int limit)
@@ -1280,7 +1262,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1280 1262
1281 priv->dev->stats.rx_packets++; 1263 priv->dev->stats.rx_packets++;
1282 priv->dev->stats.rx_bytes += frame_len; 1264 priv->dev->stats.rx_bytes += frame_len;
1283 priv->dev->last_rx = jiffies;
1284 } 1265 }
1285 entry = next_entry; 1266 entry = next_entry;
1286 p = p_next; /* use prefetched values */ 1267 p = p_next; /* use prefetched values */
@@ -1332,7 +1313,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
1332 1313
1333 /* Clear Tx resources and restart transmitting again */ 1314 /* Clear Tx resources and restart transmitting again */
1334 stmmac_tx_err(priv); 1315 stmmac_tx_err(priv);
1335 return;
1336} 1316}
1337 1317
1338/* Configuration changes (passed on by ifconfig) */ 1318/* Configuration changes (passed on by ifconfig) */
@@ -1374,7 +1354,6 @@ static void stmmac_multicast_list(struct net_device *dev)
1374 spin_lock(&priv->lock); 1354 spin_lock(&priv->lock);
1375 priv->hw->mac->set_filter(dev); 1355 priv->hw->mac->set_filter(dev);
1376 spin_unlock(&priv->lock); 1356 spin_unlock(&priv->lock);
1377 return;
1378} 1357}
1379 1358
1380/** 1359/**
@@ -1490,8 +1469,6 @@ static void stmmac_vlan_rx_register(struct net_device *dev,
1490 spin_lock(&priv->lock); 1469 spin_lock(&priv->lock);
1491 priv->vlgrp = grp; 1470 priv->vlgrp = grp;
1492 spin_unlock(&priv->lock); 1471 spin_unlock(&priv->lock);
1493
1494 return;
1495} 1472}
1496#endif 1473#endif
1497 1474
@@ -1587,6 +1564,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1587 else 1564 else
1588 device = dwmac100_setup(ioaddr); 1565 device = dwmac100_setup(ioaddr);
1589 1566
1567 if (priv->enh_desc) {
1568 device->desc = &enh_desc_ops;
1569 pr_info("\tEnhanced descriptor structure\n");
1570 } else
1571 device->desc = &ndesc_ops;
1572
1590 if (!device) 1573 if (!device)
1591 return -ENOMEM; 1574 return -ENOMEM;
1592 1575
@@ -1727,6 +1710,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1727 priv->bus_id = plat_dat->bus_id; 1710 priv->bus_id = plat_dat->bus_id;
1728 priv->pbl = plat_dat->pbl; /* TLI */ 1711 priv->pbl = plat_dat->pbl; /* TLI */
1729 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1712 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1713 priv->enh_desc = plat_dat->enh_desc;
1730 1714
1731 platform_set_drvdata(pdev, ndev); 1715 platform_set_drvdata(pdev, ndev);
1732 1716
diff --git a/drivers/net/stmmac/stmmac_timer.c b/drivers/net/stmmac/stmmac_timer.c
index 679f61ffb1f8..2a0e1abde7e7 100644
--- a/drivers/net/stmmac/stmmac_timer.c
+++ b/drivers/net/stmmac/stmmac_timer.c
@@ -31,8 +31,6 @@ static void stmmac_timer_handler(void *data)
31 struct net_device *dev = (struct net_device *)data; 31 struct net_device *dev = (struct net_device *)data;
32 32
33 stmmac_schedule(dev); 33 stmmac_schedule(dev);
34
35 return;
36} 34}
37 35
38#define STMMAC_TIMER_MSG(timer, freq) \ 36#define STMMAC_TIMER_MSG(timer, freq) \
@@ -47,13 +45,11 @@ static void stmmac_rtc_start(unsigned int new_freq)
47{ 45{
48 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq); 46 rtc_irq_set_freq(stmmac_rtc, &stmmac_task, new_freq);
49 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1); 47 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 1);
50 return;
51} 48}
52 49
53static void stmmac_rtc_stop(void) 50static void stmmac_rtc_stop(void)
54{ 51{
55 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0); 52 rtc_irq_set_state(stmmac_rtc, &stmmac_task, 0);
56 return;
57} 53}
58 54
59int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) 55int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
@@ -102,13 +98,11 @@ static void stmmac_tmu_start(unsigned int new_freq)
102{ 98{
103 clk_set_rate(timer_clock, new_freq); 99 clk_set_rate(timer_clock, new_freq);
104 clk_enable(timer_clock); 100 clk_enable(timer_clock);
105 return;
106} 101}
107 102
108static void stmmac_tmu_stop(void) 103static void stmmac_tmu_stop(void)
109{ 104{
110 clk_disable(timer_clock); 105 clk_disable(timer_clock);
111 return;
112} 106}
113 107
114int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) 108int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
diff --git a/drivers/net/stnic.c b/drivers/net/stnic.c
index 87a6b8eabc67..d85f0a84bc7b 100644
--- a/drivers/net/stnic.c
+++ b/drivers/net/stnic.c
@@ -280,7 +280,6 @@ stnic_init (struct net_device *dev)
280{ 280{
281 stnic_reset (dev); 281 stnic_reset (dev);
282 NS8390_init (dev, 0); 282 NS8390_init (dev, 0);
283 return;
284} 283}
285 284
286static void __exit stnic_cleanup(void) 285static void __exit stnic_cleanup(void)
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89a9a77..151312342243 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
412 volatile struct iasetup_cmd_struct *ias_cmd; 412 volatile struct iasetup_cmd_struct *ias_cmd;
413 volatile struct tdr_cmd_struct *tdr_cmd; 413 volatile struct tdr_cmd_struct *tdr_cmd;
414 volatile struct mcsetup_cmd_struct *mc_cmd; 414 volatile struct mcsetup_cmd_struct *mc_cmd;
415 struct dev_mc_list *dmi; 415 struct netdev_hw_addr *ha;
416 int num_addrs=netdev_mc_count(dev); 416 int num_addrs=netdev_mc_count(dev);
417 417
418 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 418 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
536 mc_cmd->mc_cnt = swab16(num_addrs * 6); 536 mc_cmd->mc_cnt = swab16(num_addrs * 6);
537 537
538 i = 0; 538 i = 0;
539 netdev_for_each_mc_addr(dmi, dev) 539 netdev_for_each_mc_addr(ha, dev)
540 memcpy((char *) mc_cmd->mc_list[i++], 540 memcpy((char *) mc_cmd->mc_list[i++],
541 dmi->dmi_addr, ETH_ALEN); 541 ha->addr, ETH_ALEN);
542 542
543 p->scb->cbl_offset = make16(mc_cmd); 543 p->scb->cbl_offset = make16(mc_cmd);
544 p->scb->cmd_cuc = CUC_START; 544 p->scb->cmd_cuc = CUC_START;
@@ -985,7 +985,7 @@ static void sun3_82586_timeout(struct net_device *dev)
985 p->scb->cmd_cuc = CUC_START; 985 p->scb->cmd_cuc = CUC_START;
986 sun3_attn586(); 986 sun3_attn586();
987 WAIT_4_SCB_CMD(); 987 WAIT_4_SCB_CMD();
988 dev->trans_start = jiffies; 988 dev->trans_start = jiffies; /* prevent tx timeout */
989 return 0; 989 return 0;
990 } 990 }
991#endif 991#endif
@@ -998,7 +998,7 @@ static void sun3_82586_timeout(struct net_device *dev)
998 sun3_82586_close(dev); 998 sun3_82586_close(dev);
999 sun3_82586_open(dev); 999 sun3_82586_open(dev);
1000 } 1000 }
1001 dev->trans_start = jiffies; 1001 dev->trans_start = jiffies; /* prevent tx timeout */
1002} 1002}
1003 1003
1004/****************************************************** 1004/******************************************************
@@ -1062,7 +1062,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1062 } 1062 }
1063 1063
1064 sun3_attn586(); 1064 sun3_attn586();
1065 dev->trans_start = jiffies;
1066 if(!i) 1065 if(!i)
1067 dev_kfree_skb(skb); 1066 dev_kfree_skb(skb);
1068 WAIT_4_SCB_CMD(); 1067 WAIT_4_SCB_CMD();
@@ -1082,7 +1081,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1082 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; 1081 p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0;
1083 1082
1084 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); 1083 p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0]));
1085 dev->trans_start = jiffies;
1086 p->nop_point = next_nop; 1084 p->nop_point = next_nop;
1087 dev_kfree_skb(skb); 1085 dev_kfree_skb(skb);
1088# endif 1086# endif
@@ -1097,7 +1095,6 @@ static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1097 p->nop_cmds[next_nop]->cmd_status = 0; 1095 p->nop_cmds[next_nop]->cmd_status = 0;
1098 1096
1099 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); 1097 p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count]));
1100 dev->trans_start = jiffies;
1101 p->xmit_count = next_nop; 1098 p->xmit_count = next_nop;
1102 1099
1103 { 1100 {
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 1694ca5bfb41..358c22f9acbe 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -523,8 +523,8 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
523 523
524 /* Transmitter timeout, serious problems. */ 524 /* Transmitter timeout, serious problems. */
525 if (netif_queue_stopped(dev)) { 525 if (netif_queue_stopped(dev)) {
526 int tickssofar = jiffies - dev->trans_start; 526 int tickssofar = jiffies - dev_trans_start(dev);
527 if (tickssofar < 20) 527 if (tickssofar < HZ/5)
528 return NETDEV_TX_BUSY; 528 return NETDEV_TX_BUSY;
529 529
530 DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n", 530 DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
@@ -559,7 +559,6 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
559 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT; 559 REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
560 560
561 netif_start_queue(dev); 561 netif_start_queue(dev);
562 dev->trans_start = jiffies;
563 562
564 return NETDEV_TX_OK; 563 return NETDEV_TX_OK;
565 } 564 }
@@ -637,8 +636,7 @@ static int lance_start_xmit( struct sk_buff *skb, struct net_device *dev )
637 AREG = CSR0; 636 AREG = CSR0;
638 DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n", 637 DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
639 dev->name, DREG )); 638 dev->name, DREG ));
640 dev->trans_start = jiffies; 639 dev_kfree_skb(skb);
641 dev_kfree_skb( skb );
642 640
643 lp->lock = 0; 641 lp->lock = 0;
644 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) == 642 if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 5f0ec390d6fc..367e96f317d4 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -362,7 +362,7 @@ static void bigmac_tcvr_write(struct bigmac *bp, void __iomem *tregs,
362 default: 362 default:
363 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 363 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
364 return; 364 return;
365 }; 365 }
366 366
367 idle_transceiver(tregs); 367 idle_transceiver(tregs);
368 write_tcvr_bit(bp, tregs, 0); 368 write_tcvr_bit(bp, tregs, 0);
@@ -401,7 +401,7 @@ static unsigned short bigmac_tcvr_read(struct bigmac *bp,
401 default: 401 default:
402 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n"); 402 printk(KERN_ERR "bigmac_tcvr_read: Whoops, no known transceiver type.\n");
403 return 0xffff; 403 return 0xffff;
404 }; 404 }
405 405
406 idle_transceiver(tregs); 406 idle_transceiver(tregs);
407 write_tcvr_bit(bp, tregs, 0); 407 write_tcvr_bit(bp, tregs, 0);
@@ -982,8 +982,6 @@ static int bigmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
982 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL); 982 sbus_writel(CREG_CTRL_TWAKEUP, bp->creg + CREG_CTRL);
983 983
984 984
985 dev->trans_start = jiffies;
986
987 return NETDEV_TX_OK; 985 return NETDEV_TX_OK;
988} 986}
989 987
@@ -999,7 +997,7 @@ static void bigmac_set_multicast(struct net_device *dev)
999{ 997{
1000 struct bigmac *bp = netdev_priv(dev); 998 struct bigmac *bp = netdev_priv(dev);
1001 void __iomem *bregs = bp->bregs; 999 void __iomem *bregs = bp->bregs;
1002 struct dev_mc_list *dmi; 1000 struct netdev_hw_addr *ha;
1003 char *addrs; 1001 char *addrs;
1004 int i; 1002 int i;
1005 u32 tmp, crc; 1003 u32 tmp, crc;
@@ -1028,8 +1026,8 @@ static void bigmac_set_multicast(struct net_device *dev)
1028 for (i = 0; i < 4; i++) 1026 for (i = 0; i < 4; i++)
1029 hash_table[i] = 0; 1027 hash_table[i] = 0;
1030 1028
1031 netdev_for_each_mc_addr(dmi, dev) { 1029 netdev_for_each_mc_addr(ha, dev) {
1032 addrs = dmi->dmi_addr; 1030 addrs = ha->addr;
1033 1031
1034 if (!(*addrs & 1)) 1032 if (!(*addrs & 1))
1035 continue; 1033 continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a394a4e1..2678588ea4b2 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -788,7 +788,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
788 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); 788 iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
789 mdio_delay(); 789 mdio_delay();
790 } 790 }
791 return;
792} 791}
793 792
794static int mdio_wait_link(struct net_device *dev, int wait) 793static int mdio_wait_link(struct net_device *dev, int wait)
@@ -972,7 +971,7 @@ static void tx_timeout(struct net_device *dev)
972 971
973 dev->if_port = 0; 972 dev->if_port = 0;
974 973
975 dev->trans_start = jiffies; 974 dev->trans_start = jiffies; /* prevent tx timeout */
976 dev->stats.tx_errors++; 975 dev->stats.tx_errors++;
977 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { 976 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
978 netif_wake_queue(dev); 977 netif_wake_queue(dev);
@@ -1022,7 +1021,6 @@ static void init_ring(struct net_device *dev)
1022 np->tx_skbuff[i] = NULL; 1021 np->tx_skbuff[i] = NULL;
1023 np->tx_ring[i].status = 0; 1022 np->tx_ring[i].status = 0;
1024 } 1023 }
1025 return;
1026} 1024}
1027 1025
1028static void tx_poll (unsigned long data) 1026static void tx_poll (unsigned long data)
@@ -1049,7 +1047,6 @@ static void tx_poll (unsigned long data)
1049 if (ioread32 (np->base + TxListPtr) == 0) 1047 if (ioread32 (np->base + TxListPtr) == 0)
1050 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), 1048 iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc),
1051 np->base + TxListPtr); 1049 np->base + TxListPtr);
1052 return;
1053} 1050}
1054 1051
1055static netdev_tx_t 1052static netdev_tx_t
@@ -1084,7 +1081,6 @@ start_tx (struct sk_buff *skb, struct net_device *dev)
1084 } else { 1081 } else {
1085 netif_stop_queue (dev); 1082 netif_stop_queue (dev);
1086 } 1083 }
1087 dev->trans_start = jiffies;
1088 if (netif_msg_tx_queued(np)) { 1084 if (netif_msg_tx_queued(np)) {
1089 printk (KERN_DEBUG 1085 printk (KERN_DEBUG
1090 "%s: Transmit frame #%d queued in slot %d.\n", 1086 "%s: Transmit frame #%d queued in slot %d.\n",
@@ -1379,7 +1375,6 @@ not_done:
1379 if (np->budget <= 0) 1375 if (np->budget <= 0)
1380 np->budget = RX_BUDGET; 1376 np->budget = RX_BUDGET;
1381 tasklet_schedule(&np->rx_tasklet); 1377 tasklet_schedule(&np->rx_tasklet);
1382 return;
1383} 1378}
1384 1379
1385static void refill_rx (struct net_device *dev) 1380static void refill_rx (struct net_device *dev)
@@ -1410,7 +1405,6 @@ static void refill_rx (struct net_device *dev)
1410 np->rx_ring[entry].status = 0; 1405 np->rx_ring[entry].status = 0;
1411 cnt++; 1406 cnt++;
1412 } 1407 }
1413 return;
1414} 1408}
1415static void netdev_error(struct net_device *dev, int intr_status) 1409static void netdev_error(struct net_device *dev, int intr_status)
1416{ 1410{
@@ -1522,13 +1516,13 @@ static void set_rx_mode(struct net_device *dev)
1522 memset(mc_filter, 0xff, sizeof(mc_filter)); 1516 memset(mc_filter, 0xff, sizeof(mc_filter));
1523 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1517 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1524 } else if (!netdev_mc_empty(dev)) { 1518 } else if (!netdev_mc_empty(dev)) {
1525 struct dev_mc_list *mclist; 1519 struct netdev_hw_addr *ha;
1526 int bit; 1520 int bit;
1527 int index; 1521 int index;
1528 int crc; 1522 int crc;
1529 memset (mc_filter, 0, sizeof (mc_filter)); 1523 memset (mc_filter, 0, sizeof (mc_filter));
1530 netdev_for_each_mc_addr(mclist, dev) { 1524 netdev_for_each_mc_addr(ha, dev) {
1531 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1525 crc = ether_crc_le(ETH_ALEN, ha->addr);
1532 for (index=0, bit=0; bit < 6; bit++, crc <<= 1) 1526 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1533 if (crc & 0x80000000) index |= 1 << bit; 1527 if (crc & 0x80000000) index |= 1 << bit;
1534 mc_filter[index/16] |= (1 << (index % 16)); 1528 mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1c4e8c..434f9d735333 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1136,7 +1136,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1136 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1136 writel(gp->tx_new, gp->regs + TXDMA_KICK);
1137 spin_unlock_irqrestore(&gp->tx_lock, flags); 1137 spin_unlock_irqrestore(&gp->tx_lock, flags);
1138 1138
1139 dev->trans_start = jiffies; 1139 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
1140 1140
1141 return NETDEV_TX_OK; 1141 return NETDEV_TX_OK;
1142} 1142}
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
1846 } else { 1846 } else {
1847 u16 hash_table[16]; 1847 u16 hash_table[16];
1848 u32 crc; 1848 u32 crc;
1849 struct dev_mc_list *dmi; 1849 struct netdev_hw_addr *ha;
1850 int i; 1850 int i;
1851 1851
1852 memset(hash_table, 0, sizeof(hash_table)); 1852 memset(hash_table, 0, sizeof(hash_table));
1853 netdev_for_each_mc_addr(dmi, gp->dev) { 1853 netdev_for_each_mc_addr(ha, gp->dev) {
1854 char *addrs = dmi->dmi_addr; 1854 char *addrs = ha->addr;
1855 1855
1856 if (!(*addrs & 1)) 1856 if (!(*addrs & 1))
1857 continue; 1857 continue;
@@ -2923,7 +2923,6 @@ static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr)
2923 dev_addr[1] = 0x00; 2923 dev_addr[1] = 0x00;
2924 dev_addr[2] = 0x20; 2924 dev_addr[2] = 0x20;
2925 get_random_bytes(dev_addr + 3, 3); 2925 get_random_bytes(dev_addr + 3, 3);
2926 return;
2927} 2926}
2928#endif /* not Sparc and not PPC */ 2927#endif /* not Sparc and not PPC */
2929 2928
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index ad2cfc5bb9e1..3d9650b8d38f 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -855,7 +855,7 @@ static void happy_meal_timer(unsigned long data)
855 hp->timer_ticks = 0; 855 hp->timer_ticks = 0;
856 hp->timer_state = asleep; /* foo on you */ 856 hp->timer_state = asleep; /* foo on you */
857 break; 857 break;
858 }; 858 }
859 859
860 if (restart_timer) { 860 if (restart_timer) {
861 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ 861 hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
@@ -1488,7 +1488,7 @@ static int happy_meal_init(struct happy_meal *hp)
1488 HMD(("external, disable MII, ")); 1488 HMD(("external, disable MII, "));
1489 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); 1489 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1490 break; 1490 break;
1491 }; 1491 }
1492 1492
1493 if (happy_meal_tcvr_reset(hp, tregs)) 1493 if (happy_meal_tcvr_reset(hp, tregs))
1494 return -EAGAIN; 1494 return -EAGAIN;
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); 1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) { 1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1525 u16 hash_table[4]; 1525 u16 hash_table[4];
1526 struct dev_mc_list *dmi; 1526 struct netdev_hw_addr *ha;
1527 char *addrs; 1527 char *addrs;
1528 u32 crc; 1528 u32 crc;
1529 1529
1530 memset(hash_table, 0, sizeof(hash_table)); 1530 memset(hash_table, 0, sizeof(hash_table));
1531 netdev_for_each_mc_addr(dmi, hp->dev) { 1531 netdev_for_each_mc_addr(ha, hp->dev) {
1532 addrs = dmi->dmi_addr; 1532 addrs = ha->addr;
1533 1533
1534 if (!(*addrs & 1)) 1534 if (!(*addrs & 1))
1535 continue; 1535 continue;
@@ -1734,7 +1734,7 @@ static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
1734 case external: 1734 case external:
1735 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); 1735 hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
1736 break; 1736 break;
1737 }; 1737 }
1738 if (happy_meal_tcvr_reset(hp, tregs)) 1738 if (happy_meal_tcvr_reset(hp, tregs))
1739 return; 1739 return;
1740 1740
@@ -2341,8 +2341,6 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2341 2341
2342 spin_unlock_irq(&hp->happy_lock); 2342 spin_unlock_irq(&hp->happy_lock);
2343 2343
2344 dev->trans_start = jiffies;
2345
2346 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2344 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2347 return NETDEV_TX_OK; 2345 return NETDEV_TX_OK;
2348} 2346}
@@ -2362,7 +2360,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
2362{ 2360{
2363 struct happy_meal *hp = netdev_priv(dev); 2361 struct happy_meal *hp = netdev_priv(dev);
2364 void __iomem *bregs = hp->bigmacregs; 2362 void __iomem *bregs = hp->bigmacregs;
2365 struct dev_mc_list *dmi; 2363 struct netdev_hw_addr *ha;
2366 char *addrs; 2364 char *addrs;
2367 u32 crc; 2365 u32 crc;
2368 2366
@@ -2380,8 +2378,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
2380 u16 hash_table[4]; 2378 u16 hash_table[4];
2381 2379
2382 memset(hash_table, 0, sizeof(hash_table)); 2380 memset(hash_table, 0, sizeof(hash_table));
2383 netdev_for_each_mc_addr(dmi, dev) { 2381 netdev_for_each_mc_addr(ha, dev) {
2384 addrs = dmi->dmi_addr; 2382 addrs = ha->addr;
2385 2383
2386 if (!(*addrs & 1)) 2384 if (!(*addrs & 1))
2387 continue; 2385 continue;
@@ -2945,7 +2943,6 @@ static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
2945 dev_addr[1] = 0x00; 2943 dev_addr[1] = 0x00;
2946 dev_addr[2] = 0x20; 2944 dev_addr[2] = 0x20;
2947 get_random_bytes(&dev_addr[3], 3); 2945 get_random_bytes(&dev_addr[3], 3);
2948 return;
2949} 2946}
2950#endif /* !(CONFIG_SPARC) */ 2947#endif /* !(CONFIG_SPARC) */
2951 2948
@@ -3004,7 +3001,6 @@ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
3004 dev->base_addr = (long) pdev; 3001 dev->base_addr = (long) pdev;
3005 3002
3006 hp = netdev_priv(dev); 3003 hp = netdev_priv(dev);
3007 memset(hp, 0, sizeof(*hp));
3008 3004
3009 hp->happy_dev = pdev; 3005 hp->happy_dev = pdev;
3010 hp->dma_dev = &pdev->dev; 3006 hp->dma_dev = &pdev->dev;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0fc014ef9e98..7d9c33dd9d1a 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1003,7 +1003,7 @@ static int lance_reset(struct net_device *dev)
1003 } 1003 }
1004 lp->init_ring(dev); 1004 lp->init_ring(dev);
1005 load_csrs(lp); 1005 load_csrs(lp);
1006 dev->trans_start = jiffies; 1006 dev->trans_start = jiffies; /* prevent tx timeout */
1007 status = init_restart_lance(lp); 1007 status = init_restart_lance(lp);
1008 return status; 1008 return status;
1009} 1009}
@@ -1054,7 +1054,7 @@ static void lance_piocopy_from_skb(void __iomem *dest, unsigned char *src, int l
1054 } 1054 }
1055 src = (char *) p16; 1055 src = (char *) p16;
1056 break; 1056 break;
1057 }; 1057 }
1058 if (len >= 2) { 1058 if (len >= 2) {
1059 u16 val = src[0] << 8 | src[1]; 1059 u16 val = src[0] << 8 | src[1];
1060 sbus_writew(val, piobuf); 1060 sbus_writew(val, piobuf);
@@ -1160,7 +1160,6 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1160 1160
1161 spin_unlock_irq(&lp->lock); 1161 spin_unlock_irq(&lp->lock);
1162 1162
1163 dev->trans_start = jiffies;
1164 dev_kfree_skb(skb); 1163 dev_kfree_skb(skb);
1165 1164
1166 return NETDEV_TX_OK; 1165 return NETDEV_TX_OK;
@@ -1170,7 +1169,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1170static void lance_load_multicast(struct net_device *dev) 1169static void lance_load_multicast(struct net_device *dev)
1171{ 1170{
1172 struct lance_private *lp = netdev_priv(dev); 1171 struct lance_private *lp = netdev_priv(dev);
1173 struct dev_mc_list *dmi; 1172 struct netdev_hw_addr *ha;
1174 char *addrs; 1173 char *addrs;
1175 u32 crc; 1174 u32 crc;
1176 u32 val; 1175 u32 val;
@@ -1195,8 +1194,8 @@ static void lance_load_multicast(struct net_device *dev)
1195 return; 1194 return;
1196 1195
1197 /* Add addresses */ 1196 /* Add addresses */
1198 netdev_for_each_mc_addr(dmi, dev) { 1197 netdev_for_each_mc_addr(ha, dev) {
1199 addrs = dmi->dmi_addr; 1198 addrs = ha->addr;
1200 1199
1201 /* multicast address? */ 1200 /* multicast address? */
1202 if (!(*addrs & 1)) 1201 if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 8fe86b287e51..72b579c8d812 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -602,7 +602,6 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
602 qep->tx_new = NEXT_TX(entry); 602 qep->tx_new = NEXT_TX(entry);
603 603
604 /* Get it going. */ 604 /* Get it going. */
605 dev->trans_start = jiffies;
606 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL); 605 sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
607 606
608 dev->stats.tx_packets++; 607 dev->stats.tx_packets++;
@@ -627,7 +626,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
627static void qe_set_multicast(struct net_device *dev) 626static void qe_set_multicast(struct net_device *dev)
628{ 627{
629 struct sunqe *qep = netdev_priv(dev); 628 struct sunqe *qep = netdev_priv(dev);
630 struct dev_mc_list *dmi; 629 struct netdev_hw_addr *ha;
631 u8 new_mconfig = qep->mconfig; 630 u8 new_mconfig = qep->mconfig;
632 char *addrs; 631 char *addrs;
633 int i; 632 int i;
@@ -651,8 +650,8 @@ static void qe_set_multicast(struct net_device *dev)
651 u8 *hbytes = (unsigned char *) &hash_table[0]; 650 u8 *hbytes = (unsigned char *) &hash_table[0];
652 651
653 memset(hash_table, 0, sizeof(hash_table)); 652 memset(hash_table, 0, sizeof(hash_table));
654 netdev_for_each_mc_addr(dmi, dev) { 653 netdev_for_each_mc_addr(ha, dev) {
655 addrs = dmi->dmi_addr; 654 addrs = ha->addr;
656 655
657 if (!(*addrs & 1)) 656 if (!(*addrs & 1))
658 continue; 657 continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7cea7f6b..d281a7b34701 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -717,7 +717,6 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
717 717
718 dev_kfree_skb(skb); 718 dev_kfree_skb(skb);
719 719
720 dev->trans_start = jiffies;
721 return NETDEV_TX_OK; 720 return NETDEV_TX_OK;
722 721
723out_dropped_unlock: 722out_dropped_unlock:
@@ -763,12 +762,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
763 762
764static void __update_mc_list(struct vnet *vp, struct net_device *dev) 763static void __update_mc_list(struct vnet *vp, struct net_device *dev)
765{ 764{
766 struct dev_addr_list *p; 765 struct netdev_hw_addr *ha;
767 766
768 netdev_for_each_mc_addr(p, dev) { 767 netdev_for_each_mc_addr(ha, dev) {
769 struct vnet_mcast_entry *m; 768 struct vnet_mcast_entry *m;
770 769
771 m = __vnet_mc_find(vp, p->dmi_addr); 770 m = __vnet_mc_find(vp, ha->addr);
772 if (m) { 771 if (m) {
773 m->hit = 1; 772 m->hit = 1;
774 continue; 773 continue;
@@ -778,7 +777,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
778 m = kzalloc(sizeof(*m), GFP_ATOMIC); 777 m = kzalloc(sizeof(*m), GFP_ATOMIC);
779 if (!m) 778 if (!m)
780 continue; 779 continue;
781 memcpy(m->addr, p->dmi_addr, ETH_ALEN); 780 memcpy(m->addr, ha->addr, ETH_ALEN);
782 m->hit = 1; 781 m->hit = 1;
783 782
784 m->next = vp->mcast_list; 783 m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c0d583..be08b75dbc15 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1357,8 +1357,6 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1357 } 1357 }
1358 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM; 1358 lp->tfd_start = (lp->tfd_start + 1) % TX_FD_NUM;
1359 1359
1360 dev->trans_start = jiffies;
1361
1362 /* If we just used up the very last entry in the 1360 /* If we just used up the very last entry in the
1363 * TX ring on this device, tell the queueing 1361 * TX ring on this device, tell the queueing
1364 * layer to send no more. 1362 * layer to send no more.
@@ -1954,16 +1952,16 @@ tc35815_set_multicast_list(struct net_device *dev)
1954 /* Disable promiscuous mode, use normal mode. */ 1952 /* Disable promiscuous mode, use normal mode. */
1955 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 1953 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1956 } else if (!netdev_mc_empty(dev)) { 1954 } else if (!netdev_mc_empty(dev)) {
1957 struct dev_mc_list *cur_addr; 1955 struct netdev_hw_addr *ha;
1958 int i; 1956 int i;
1959 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 1957 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1960 1958
1961 tc_writel(0, &tr->CAM_Ctl); 1959 tc_writel(0, &tr->CAM_Ctl);
1962 /* Walk the address list, and load the filter */ 1960 /* Walk the address list, and load the filter */
1963 i = 0; 1961 i = 0;
1964 netdev_for_each_mc_addr(cur_addr, dev) { 1962 netdev_for_each_mc_addr(ha, dev) {
1965 /* entry 0,1 is reserved. */ 1963 /* entry 0,1 is reserved. */
1966 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); 1964 tc35815_set_cam_entry(dev, i + 2, ha->addr);
1967 ena_bits |= CAM_Ena_Bit(i + 2); 1965 ena_bits |= CAM_Ena_Bit(i + 2);
1968 i++; 1966 i++;
1969 } 1967 }
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f5493092521a..20ab16192325 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
808 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); 808 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
809 } else if (!netdev_mc_empty(ndev)) { 809 } else if (!netdev_mc_empty(ndev)) {
810 u8 hash; 810 u8 hash;
811 struct dev_mc_list *mclist; 811 struct netdev_hw_addr *ha;
812 u32 reg, val; 812 u32 reg, val;
813 813
814 /* set IMF to deny all multicast frames */ 814 /* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
825 * into RX_MAC_MCST regs. we skip this phase now and accept ALL 825 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
826 * multicast frames throu IMF */ 826 * multicast frames throu IMF */
827 /* accept the rest of addresses throu IMF */ 827 /* accept the rest of addresses throu IMF */
828 netdev_for_each_mc_addr(mclist, ndev) { 828 netdev_for_each_mc_addr(ha, ndev) {
829 hash = 0; 829 hash = 0;
830 for (i = 0; i < ETH_ALEN; i++) 830 for (i = 0; i < ETH_ALEN; i++)
831 hash ^= mclist->dmi_addr[i]; 831 hash ^= ha->addr[i];
832 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); 832 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
833 val = READ_REG(priv, reg); 833 val = READ_REG(priv, reg);
834 val |= (1 << (hash % 32)); 834 val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1303 priv->net_stats.rx_bytes += len; 1303 priv->net_stats.rx_bytes += len;
1304 1304
1305 skb_put(skb, len); 1305 skb_put(skb, len);
1306 skb->dev = priv->ndev;
1307 skb->ip_summed = CHECKSUM_UNNECESSARY; 1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 skb->protocol = eth_type_trans(skb, priv->ndev); 1307 skb->protocol = eth_type_trans(skb, priv->ndev);
1309 1308
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1509 int nr_frags = skb_shinfo(skb)->nr_frags; 1508 int nr_frags = skb_shinfo(skb)->nr_frags;
1510 int i; 1509 int i;
1511 1510
1512 db->wptr->len = skb->len - skb->data_len; 1511 db->wptr->len = skb_headlen(skb);
1513 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, 1512 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1514 db->wptr->len, PCI_DMA_TODEVICE); 1513 db->wptr->len, PCI_DMA_TODEVICE);
1515 pbl->len = CPU_CHIP_SWAP32(db->wptr->len); 1514 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
@@ -2034,7 +2033,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2034 /************** priv ****************/ 2033 /************** priv ****************/
2035 priv = nic->priv[port] = netdev_priv(ndev); 2034 priv = nic->priv[port] = netdev_priv(ndev);
2036 2035
2037 memset(priv, 0, sizeof(struct bdx_priv));
2038 priv->pBdxRegs = nic->regs + port * 0x8000; 2036 priv->pBdxRegs = nic->regs + port * 0x8000;
2039 priv->port = port; 2037 priv->port = port;
2040 priv->pdev = pdev; 2038 priv->pdev = pdev;
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 22cf1c446de3..573054ae7b58 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67#include "tg3.h" 67#include "tg3.h"
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define DRV_MODULE_VERSION "3.108" 70#define DRV_MODULE_VERSION "3.110"
71#define DRV_MODULE_RELDATE "February 17, 2010" 71#define DRV_MODULE_RELDATE "April 9, 2010"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
101#define TG3_DEF_RX_RING_PENDING 200 101#define TG3_DEF_RX_RING_PENDING 200
102#define TG3_RX_JUMBO_RING_SIZE 256 102#define TG3_RX_JUMBO_RING_SIZE 256
103#define TG3_DEF_RX_JUMBO_RING_PENDING 100 103#define TG3_DEF_RX_JUMBO_RING_PENDING 100
104#define TG3_RSS_INDIR_TBL_SIZE 128 104#define TG3_RSS_INDIR_TBL_SIZE 128
105 105
106/* Do not place this n-ring entries value into the tp struct itself, 106/* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et 107 * we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
126 TG3_TX_RING_SIZE) 126 TG3_TX_RING_SIZE)
127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 128
129#define TG3_RX_DMA_ALIGN 16
130#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
131
129#define TG3_DMA_BYTE_ENAB 64 132#define TG3_DMA_BYTE_ENAB 64
130 133
131#define TG3_RX_STD_DMA_SZ 1536 134#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
142#define TG3_RX_JMB_BUFF_RING_SIZE \ 145#define TG3_RX_JMB_BUFF_RING_SIZE \
143 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
144 147
148#define TG3_RSS_MIN_NUM_MSIX_VECS 2
149
150/* Due to a hardware bug, the 5701 can only DMA to memory addresses
151 * that are at least dword aligned when used in PCIX mode. The driver
152 * works around this bug by double copying the packet. This workaround
153 * is built into the normal double copy length check for efficiency.
154 *
155 * However, the double copy is only necessary on those architectures
156 * where unaligned memory accesses are inefficient. For those architectures
157 * where unaligned memory accesses incur little penalty, we can reintegrate
158 * the 5701 in the normal rx path. Doing so saves a device structure
159 * dereference by hardcoding the double copy threshold in place.
160 */
161#define TG3_RX_COPY_THRESHOLD 256
162#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
163 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
164#else
165 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
166#endif
167
145/* minimum number of free TX descriptors required to wake up TX process */ 168/* minimum number of free TX descriptors required to wake up TX process */
146#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 169#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
147 170
@@ -152,6 +175,8 @@
152 175
153#define TG3_NUM_TEST 6 176#define TG3_NUM_TEST 6
154 177
178#define TG3_FW_UPDATE_TIMEOUT_SEC 5
179
155#define FIRMWARE_TG3 "tigon/tg3.bin" 180#define FIRMWARE_TG3 "tigon/tg3.bin"
156#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 181#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
157#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 182#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
167MODULE_FIRMWARE(FIRMWARE_TG3TSO); 192MODULE_FIRMWARE(FIRMWARE_TG3TSO);
168MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 193MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
169 194
170#define TG3_RSS_MIN_NUM_MSIX_VECS 2
171
172static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 195static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
173module_param(tg3_debug, int, 0); 196module_param(tg3_debug, int, 0);
174MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 197MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
360 383
361static u32 tg3_read32(struct tg3 *tp, u32 off) 384static u32 tg3_read32(struct tg3 *tp, u32 off)
362{ 385{
363 return (readl(tp->regs + off)); 386 return readl(tp->regs + off);
364} 387}
365 388
366static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 389static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
370 393
371static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 394static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
372{ 395{
373 return (readl(tp->aperegs + off)); 396 return readl(tp->aperegs + off);
374} 397}
375 398
376static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 399static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
488 511
489static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 512static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
490{ 513{
491 return (readl(tp->regs + off + GRCMBOX_BASE)); 514 return readl(tp->regs + off + GRCMBOX_BASE);
492} 515}
493 516
494static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 517static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
496 writel(val, tp->regs + off + GRCMBOX_BASE); 519 writel(val, tp->regs + off + GRCMBOX_BASE);
497} 520}
498 521
499#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 522#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
500#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 523#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
501#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 524#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
502#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 525#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
503#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 526#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
504 527
505#define tw32(reg,val) tp->write32(tp, reg, val) 528#define tw32(reg, val) tp->write32(tp, reg, val)
506#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) 529#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
507#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) 530#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
508#define tr32(reg) tp->read32(tp, reg) 531#define tr32(reg) tp->read32(tp, reg)
509 532
510static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 533static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
511{ 534{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 return 0; 602 return 0;
580 603
581 switch (locknum) { 604 switch (locknum) {
582 case TG3_APE_LOCK_GRC: 605 case TG3_APE_LOCK_GRC:
583 case TG3_APE_LOCK_MEM: 606 case TG3_APE_LOCK_MEM:
584 break; 607 break;
585 default: 608 default:
586 return -EINVAL; 609 return -EINVAL;
587 } 610 }
588 611
589 off = 4 * locknum; 612 off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 return; 640 return;
618 641
619 switch (locknum) { 642 switch (locknum) {
620 case TG3_APE_LOCK_GRC: 643 case TG3_APE_LOCK_GRC:
621 case TG3_APE_LOCK_MEM: 644 case TG3_APE_LOCK_MEM:
622 break; 645 break;
623 default: 646 default:
624 return; 647 return;
625 } 648 }
626 649
627 off = 4 * locknum; 650 off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
651 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 674 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
652 for (i = 0; i < tp->irq_cnt; i++) { 675 for (i = 0; i < tp->irq_cnt; i++) {
653 struct tg3_napi *tnapi = &tp->napi[i]; 676 struct tg3_napi *tnapi = &tp->napi[i];
677
654 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 678 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
655 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 679 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
656 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 680 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1098 1122
1099 i = mdiobus_register(tp->mdio_bus); 1123 i = mdiobus_register(tp->mdio_bus);
1100 if (i) { 1124 if (i) {
1101 netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i); 1125 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1102 mdiobus_free(tp->mdio_bus); 1126 mdiobus_free(tp->mdio_bus);
1103 return i; 1127 return i;
1104 } 1128 }
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1106 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1130 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1107 1131
1108 if (!phydev || !phydev->drv) { 1132 if (!phydev || !phydev->drv) {
1109 netdev_warn(tp->dev, "No PHY devices\n"); 1133 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1110 mdiobus_unregister(tp->mdio_bus); 1134 mdiobus_unregister(tp->mdio_bus);
1111 mdiobus_free(tp->mdio_bus); 1135 mdiobus_free(tp->mdio_bus);
1112 return -ENODEV; 1136 return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
1437 phydev->speed != tp->link_config.active_speed || 1461 phydev->speed != tp->link_config.active_speed ||
1438 phydev->duplex != tp->link_config.active_duplex || 1462 phydev->duplex != tp->link_config.active_duplex ||
1439 oldflowctrl != tp->link_config.active_flowctrl) 1463 oldflowctrl != tp->link_config.active_flowctrl)
1440 linkmesg = 1; 1464 linkmesg = 1;
1441 1465
1442 tp->link_config.active_speed = phydev->speed; 1466 tp->link_config.active_speed = phydev->speed;
1443 tp->link_config.active_duplex = phydev->duplex; 1467 tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
1464 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1488 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465 phydev->dev_flags, phydev->interface); 1489 phydev->dev_flags, phydev->interface);
1466 if (IS_ERR(phydev)) { 1490 if (IS_ERR(phydev)) {
1467 netdev_err(tp->dev, "Could not attach to PHY\n"); 1491 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1468 return PTR_ERR(phydev); 1492 return PTR_ERR(phydev);
1469 } 1493 }
1470 1494
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1856 /* Set Extended packet length bit for jumbo frames */ 1880 /* Set Extended packet length bit for jumbo frames */
1857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); 1881 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1858 } 1882 } else {
1859 else {
1860 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1883 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1861 } 1884 }
1862 1885
@@ -1974,8 +1997,7 @@ out:
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); 1997 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1975 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1998 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1977 } 2000 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1978 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1979 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2001 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1981 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { 2003 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
2007 u32 phy_reg; 2029 u32 phy_reg;
2008 2030
2009 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) 2031 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2010 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2032 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2011 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2033 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2012 } 2034 }
2013 2035
2014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3425 ap->rxconfig = rx_cfg_reg; 3447 ap->rxconfig = rx_cfg_reg;
3426 ret = ANEG_OK; 3448 ret = ANEG_OK;
3427 3449
3428 switch(ap->state) { 3450 switch (ap->state) {
3429 case ANEG_STATE_UNKNOWN: 3451 case ANEG_STATE_UNKNOWN:
3430 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 3452 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3431 ap->state = ANEG_STATE_AN_ENABLE; 3453 ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3463 /* fallthru */ 3485 /* fallthru */
3464 case ANEG_STATE_RESTART: 3486 case ANEG_STATE_RESTART:
3465 delta = ap->cur_time - ap->link_time; 3487 delta = ap->cur_time - ap->link_time;
3466 if (delta > ANEG_STATE_SETTLE_TIME) { 3488 if (delta > ANEG_STATE_SETTLE_TIME)
3467 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 3489 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3468 } else { 3490 else
3469 ret = ANEG_TIMER_ENAB; 3491 ret = ANEG_TIMER_ENAB;
3470 }
3471 break; 3492 break;
3472 3493
3473 case ANEG_STATE_DISABLE_LINK_OK: 3494 case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3491 break; 3512 break;
3492 3513
3493 case ANEG_STATE_ABILITY_DETECT: 3514 case ANEG_STATE_ABILITY_DETECT:
3494 if (ap->ability_match != 0 && ap->rxconfig != 0) { 3515 if (ap->ability_match != 0 && ap->rxconfig != 0)
3495 ap->state = ANEG_STATE_ACK_DETECT_INIT; 3516 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3496 }
3497 break; 3517 break;
3498 3518
3499 case ANEG_STATE_ACK_DETECT_INIT: 3519 case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4171 current_duplex = DUPLEX_FULL; 4191 current_duplex = DUPLEX_FULL;
4172 else 4192 else
4173 current_duplex = DUPLEX_HALF; 4193 current_duplex = DUPLEX_HALF;
4174 } 4194 } else {
4175 else
4176 current_link_up = 0; 4195 current_link_up = 0;
4196 }
4177 } 4197 }
4178 } 4198 }
4179 4199
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4211 tp->serdes_counter--; 4231 tp->serdes_counter--;
4212 return; 4232 return;
4213 } 4233 }
4234
4214 if (!netif_carrier_ok(tp->dev) && 4235 if (!netif_carrier_ok(tp->dev) &&
4215 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 4236 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4216 u32 bmcr; 4237 u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4240 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; 4261 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4241 } 4262 }
4242 } 4263 }
4243 } 4264 } else if (netif_carrier_ok(tp->dev) &&
4244 else if (netif_carrier_ok(tp->dev) && 4265 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4245 (tp->link_config.autoneg == AUTONEG_ENABLE) && 4266 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4246 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4247 u32 phy2; 4267 u32 phy2;
4248 4268
4249 /* Select expansion interrupt status register */ 4269 /* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4266{ 4286{
4267 int err; 4287 int err;
4268 4288
4269 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 4289 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4270 err = tg3_setup_fiber_phy(tp, force_reset); 4290 err = tg3_setup_fiber_phy(tp, force_reset);
4271 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 4291 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4272 err = tg3_setup_fiber_mii_phy(tp, force_reset); 4292 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4273 } else { 4293 else
4274 err = tg3_setup_copper_phy(tp, force_reset); 4294 err = tg3_setup_copper_phy(tp, force_reset);
4275 }
4276 4295
4277 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 4296 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4278 u32 val, scale; 4297 u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
4335 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 4354 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4336 tp->write32_tx_mbox == tg3_write_indirect_mbox); 4355 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4337 4356
4338 netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n" 4357 netdev_warn(tp->dev,
4339 "Please report the problem to the driver maintainer and include system chipset information.\n"); 4358 "The system may be re-ordering memory-mapped I/O "
4359 "cycles to the network device, attempting to recover. "
4360 "Please report the problem to the driver maintainer "
4361 "and include system chipset information.\n");
4340 4362
4341 spin_lock(&tp->lock); 4363 spin_lock(&tp->lock);
4342 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 4364 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4378 } 4400 }
4379 4401
4380 pci_unmap_single(tp->pdev, 4402 pci_unmap_single(tp->pdev,
4381 pci_unmap_addr(ri, mapping), 4403 dma_unmap_addr(ri, mapping),
4382 skb_headlen(skb), 4404 skb_headlen(skb),
4383 PCI_DMA_TODEVICE); 4405 PCI_DMA_TODEVICE);
4384 4406
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4392 tx_bug = 1; 4414 tx_bug = 1;
4393 4415
4394 pci_unmap_page(tp->pdev, 4416 pci_unmap_page(tp->pdev,
4395 pci_unmap_addr(ri, mapping), 4417 dma_unmap_addr(ri, mapping),
4396 skb_shinfo(skb)->frags[i].size, 4418 skb_shinfo(skb)->frags[i].size,
4397 PCI_DMA_TODEVICE); 4419 PCI_DMA_TODEVICE);
4398 sw_idx = NEXT_TX(sw_idx); 4420 sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4430 if (!ri->skb) 4452 if (!ri->skb)
4431 return; 4453 return;
4432 4454
4433 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), 4455 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4434 map_sz, PCI_DMA_FROMDEVICE); 4456 map_sz, PCI_DMA_FROMDEVICE);
4435 dev_kfree_skb_any(ri->skb); 4457 dev_kfree_skb_any(ri->skb);
4436 ri->skb = NULL; 4458 ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4496 } 4518 }
4497 4519
4498 map->skb = skb; 4520 map->skb = skb;
4499 pci_unmap_addr_set(map, mapping, mapping); 4521 dma_unmap_addr_set(map, mapping, mapping);
4500 4522
4501 desc->addr_hi = ((u64)mapping >> 32); 4523 desc->addr_hi = ((u64)mapping >> 32);
4502 desc->addr_lo = ((u64)mapping & 0xffffffff); 4524 desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4516 struct tg3 *tp = tnapi->tp; 4538 struct tg3 *tp = tnapi->tp;
4517 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4539 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4518 struct ring_info *src_map, *dest_map; 4540 struct ring_info *src_map, *dest_map;
4519 int dest_idx;
4520 struct tg3_rx_prodring_set *spr = &tp->prodring[0]; 4541 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4542 int dest_idx;
4521 4543
4522 switch (opaque_key) { 4544 switch (opaque_key) {
4523 case RXD_OPAQUE_RING_STD: 4545 case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4541 } 4563 }
4542 4564
4543 dest_map->skb = src_map->skb; 4565 dest_map->skb = src_map->skb;
4544 pci_unmap_addr_set(dest_map, mapping, 4566 dma_unmap_addr_set(dest_map, mapping,
4545 pci_unmap_addr(src_map, mapping)); 4567 dma_unmap_addr(src_map, mapping));
4546 dest_desc->addr_hi = src_desc->addr_hi; 4568 dest_desc->addr_hi = src_desc->addr_hi;
4547 dest_desc->addr_lo = src_desc->addr_lo; 4569 dest_desc->addr_lo = src_desc->addr_lo;
4548 4570
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4605 struct sk_buff *skb; 4627 struct sk_buff *skb;
4606 dma_addr_t dma_addr; 4628 dma_addr_t dma_addr;
4607 u32 opaque_key, desc_idx, *post_ptr; 4629 u32 opaque_key, desc_idx, *post_ptr;
4630 bool hw_vlan __maybe_unused = false;
4631 u16 vtag __maybe_unused = 0;
4608 4632
4609 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4633 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4610 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4611 if (opaque_key == RXD_OPAQUE_RING_STD) { 4635 if (opaque_key == RXD_OPAQUE_RING_STD) {
4612 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4636 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4613 dma_addr = pci_unmap_addr(ri, mapping); 4637 dma_addr = dma_unmap_addr(ri, mapping);
4614 skb = ri->skb; 4638 skb = ri->skb;
4615 post_ptr = &std_prod_idx; 4639 post_ptr = &std_prod_idx;
4616 rx_std_posted++; 4640 rx_std_posted++;
4617 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4641 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4618 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4642 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4619 dma_addr = pci_unmap_addr(ri, mapping); 4643 dma_addr = dma_unmap_addr(ri, mapping);
4620 skb = ri->skb; 4644 skb = ri->skb;
4621 post_ptr = &jmb_prod_idx; 4645 post_ptr = &jmb_prod_idx;
4622 } else 4646 } else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4638 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4662 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4639 ETH_FCS_LEN; 4663 ETH_FCS_LEN;
4640 4664
4641 if (len > RX_COPY_THRESHOLD && 4665 if (len > TG3_RX_COPY_THRESH(tp)) {
4642 tp->rx_offset == NET_IP_ALIGN) {
4643 /* rx_offset will likely not equal NET_IP_ALIGN
4644 * if this is a 5701 card running in PCI-X mode
4645 * [see tg3_get_invariants()]
4646 */
4647 int skb_size; 4666 int skb_size;
4648 4667
4649 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, 4668 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4668 tg3_recycle_rx(tnapi, tpr, opaque_key, 4687 tg3_recycle_rx(tnapi, tpr, opaque_key,
4669 desc_idx, *post_ptr); 4688 desc_idx, *post_ptr);
4670 4689
4671 copy_skb = netdev_alloc_skb(tp->dev, 4690 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4672 len + TG3_RAW_IP_ALIGN); 4691 TG3_RAW_IP_ALIGN);
4673 if (copy_skb == NULL) 4692 if (copy_skb == NULL)
4674 goto drop_it_no_recycle; 4693 goto drop_it_no_recycle;
4675 4694
4676 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); 4695 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4677 skb_put(copy_skb, len); 4696 skb_put(copy_skb, len);
4678 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4697 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4679 skb_copy_from_linear_data(skb, copy_skb->data, len); 4698 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4699 goto next_pkt; 4718 goto next_pkt;
4700 } 4719 }
4701 4720
4721 if (desc->type_flags & RXD_FLAG_VLAN &&
4722 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4723 vtag = desc->err_vlan & RXD_VLAN_MASK;
4702#if TG3_VLAN_TAG_USED 4724#if TG3_VLAN_TAG_USED
4703 if (tp->vlgrp != NULL && 4725 if (tp->vlgrp)
4704 desc->type_flags & RXD_FLAG_VLAN) { 4726 hw_vlan = true;
4705 vlan_gro_receive(&tnapi->napi, tp->vlgrp, 4727 else
4706 desc->err_vlan & RXD_VLAN_MASK, skb); 4728#endif
4707 } else 4729 {
4730 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4731 __skb_push(skb, VLAN_HLEN);
4732
4733 memmove(ve, skb->data + VLAN_HLEN,
4734 ETH_ALEN * 2);
4735 ve->h_vlan_proto = htons(ETH_P_8021Q);
4736 ve->h_vlan_TCI = htons(vtag);
4737 }
4738 }
4739
4740#if TG3_VLAN_TAG_USED
4741 if (hw_vlan)
4742 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4743 else
4708#endif 4744#endif
4709 napi_gro_receive(&tnapi->napi, skb); 4745 napi_gro_receive(&tnapi->napi, skb);
4710 4746
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
4978 if (unlikely(work_done >= budget)) 5014 if (unlikely(work_done >= budget))
4979 break; 5015 break;
4980 5016
4981 /* tp->last_tag is used in tg3_restart_ints() below 5017 /* tp->last_tag is used in tg3_int_reenable() below
4982 * to tell the hw how much work has been processed, 5018 * to tell the hw how much work has been processed,
4983 * so we must read it before checking for more work. 5019 * so we must read it before checking for more work.
4984 */ 5020 */
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
4987 rmb(); 5023 rmb();
4988 5024
4989 /* check for RX/TX work to do */ 5025 /* check for RX/TX work to do */
4990 if (sblk->idx[0].tx_consumer == tnapi->tx_cons && 5026 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4991 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { 5027 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
4992 napi_complete(napi); 5028 napi_complete(napi);
4993 /* Reenable interrupts. */ 5029 /* Reenable interrupts. */
4994 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 5030 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5260 5296
5261 err = tg3_init_hw(tp, reset_phy); 5297 err = tg3_init_hw(tp, reset_phy);
5262 if (err) { 5298 if (err) {
5263 netdev_err(tp->dev, "Failed to re-initialize device, aborting\n"); 5299 netdev_err(tp->dev,
5300 "Failed to re-initialize device, aborting\n");
5264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5301 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5265 tg3_full_unlock(tp); 5302 tg3_full_unlock(tp);
5266 del_timer_sync(&tp->timer); 5303 del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5437 len = skb_shinfo(skb)->frags[i-1].size; 5474 len = skb_shinfo(skb)->frags[i-1].size;
5438 5475
5439 pci_unmap_single(tp->pdev, 5476 pci_unmap_single(tp->pdev,
5440 pci_unmap_addr(&tnapi->tx_buffers[entry], 5477 dma_unmap_addr(&tnapi->tx_buffers[entry],
5441 mapping), 5478 mapping),
5442 len, PCI_DMA_TODEVICE); 5479 len, PCI_DMA_TODEVICE);
5443 if (i == 0) { 5480 if (i == 0) {
5444 tnapi->tx_buffers[entry].skb = new_skb; 5481 tnapi->tx_buffers[entry].skb = new_skb;
5445 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5482 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5446 new_addr); 5483 new_addr);
5447 } else { 5484 } else {
5448 tnapi->tx_buffers[entry].skb = NULL; 5485 tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5492 struct netdev_queue *txq; 5529 struct netdev_queue *txq;
5493 unsigned int i, last; 5530 unsigned int i, last;
5494 5531
5495
5496 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5532 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5497 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5533 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5498 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5534 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5508 netif_tx_stop_queue(txq); 5544 netif_tx_stop_queue(txq);
5509 5545
5510 /* This is a hard error, log it. */ 5546 /* This is a hard error, log it. */
5511 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 5547 netdev_err(dev,
5548 "BUG! Tx Ring full when queue awake!\n");
5512 } 5549 }
5513 return NETDEV_TX_BUSY; 5550 return NETDEV_TX_BUSY;
5514 } 5551 }
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5552 5589
5553 tcp_hdr(skb)->check = 0; 5590 tcp_hdr(skb)->check = 0;
5554 5591
5555 } 5592 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5556 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5557 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5593 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5594 }
5595
5558#if TG3_VLAN_TAG_USED 5596#if TG3_VLAN_TAG_USED
5559 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5597 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5560 base_flags |= (TXD_FLAG_VLAN | 5598 base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5571 } 5609 }
5572 5610
5573 tnapi->tx_buffers[entry].skb = skb; 5611 tnapi->tx_buffers[entry].skb = skb;
5574 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5612 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5575 5613
5576 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5614 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5577 !mss && skb->len > ETH_DATA_LEN) 5615 !mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5597 goto dma_error; 5635 goto dma_error;
5598 5636
5599 tnapi->tx_buffers[entry].skb = NULL; 5637 tnapi->tx_buffers[entry].skb = NULL;
5600 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5638 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5601 mapping); 5639 mapping);
5602 5640
5603 tg3_set_txd(tnapi, entry, mapping, len, 5641 tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
5627 entry = tnapi->tx_prod; 5665 entry = tnapi->tx_prod;
5628 tnapi->tx_buffers[entry].skb = NULL; 5666 tnapi->tx_buffers[entry].skb = NULL;
5629 pci_unmap_single(tp->pdev, 5667 pci_unmap_single(tp->pdev,
5630 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5668 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5631 skb_headlen(skb), 5669 skb_headlen(skb),
5632 PCI_DMA_TODEVICE); 5670 PCI_DMA_TODEVICE);
5633 for (i = 0; i <= last; i++) { 5671 for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
5635 entry = NEXT_TX(entry); 5673 entry = NEXT_TX(entry);
5636 5674
5637 pci_unmap_page(tp->pdev, 5675 pci_unmap_page(tp->pdev,
5638 pci_unmap_addr(&tnapi->tx_buffers[entry], 5676 dma_unmap_addr(&tnapi->tx_buffers[entry],
5639 mapping), 5677 mapping),
5640 frag->size, PCI_DMA_TODEVICE); 5678 frag->size, PCI_DMA_TODEVICE);
5641 } 5679 }
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5695 struct netdev_queue *txq; 5733 struct netdev_queue *txq;
5696 unsigned int i, last; 5734 unsigned int i, last;
5697 5735
5698
5699 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5736 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5700 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5737 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5701 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5738 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5711 netif_tx_stop_queue(txq); 5748 netif_tx_stop_queue(txq);
5712 5749
5713 /* This is a hard error, log it. */ 5750 /* This is a hard error, log it. */
5714 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 5751 netdev_err(dev,
5752 "BUG! Tx Ring full when queue awake!\n");
5715 } 5753 }
5716 return NETDEV_TX_BUSY; 5754 return NETDEV_TX_BUSY;
5717 } 5755 }
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5737 hdr_len = ip_tcp_len + tcp_opt_len; 5775 hdr_len = ip_tcp_len + tcp_opt_len;
5738 if (unlikely((ETH_HLEN + hdr_len) > 80) && 5776 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5739 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) 5777 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5740 return (tg3_tso_bug(tp, skb)); 5778 return tg3_tso_bug(tp, skb);
5741 5779
5742 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 5780 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5743 TXD_FLAG_CPU_POST_DMA); 5781 TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5797 } 5835 }
5798 5836
5799 tnapi->tx_buffers[entry].skb = skb; 5837 tnapi->tx_buffers[entry].skb = skb;
5800 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5838 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5801 5839
5802 would_hit_hwbug = 0; 5840 would_hit_hwbug = 0;
5803 5841
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5833 len, PCI_DMA_TODEVICE); 5871 len, PCI_DMA_TODEVICE);
5834 5872
5835 tnapi->tx_buffers[entry].skb = NULL; 5873 tnapi->tx_buffers[entry].skb = NULL;
5836 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5874 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5837 mapping); 5875 mapping);
5838 if (pci_dma_mapping_error(tp->pdev, mapping)) 5876 if (pci_dma_mapping_error(tp->pdev, mapping))
5839 goto dma_error; 5877 goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
5898 entry = tnapi->tx_prod; 5936 entry = tnapi->tx_prod;
5899 tnapi->tx_buffers[entry].skb = NULL; 5937 tnapi->tx_buffers[entry].skb = NULL;
5900 pci_unmap_single(tp->pdev, 5938 pci_unmap_single(tp->pdev,
5901 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5939 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5902 skb_headlen(skb), 5940 skb_headlen(skb),
5903 PCI_DMA_TODEVICE); 5941 PCI_DMA_TODEVICE);
5904 for (i = 0; i <= last; i++) { 5942 for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
5906 entry = NEXT_TX(entry); 5944 entry = NEXT_TX(entry);
5907 5945
5908 pci_unmap_page(tp->pdev, 5946 pci_unmap_page(tp->pdev,
5909 pci_unmap_addr(&tnapi->tx_buffers[entry], 5947 dma_unmap_addr(&tnapi->tx_buffers[entry],
5910 mapping), 5948 mapping),
5911 frag->size, PCI_DMA_TODEVICE); 5949 frag->size, PCI_DMA_TODEVICE);
5912 } 5950 }
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5924 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 5962 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5925 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 5963 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5926 ethtool_op_set_tso(dev, 0); 5964 ethtool_op_set_tso(dev, 0);
5927 } 5965 } else {
5928 else
5929 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 5966 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5967 }
5930 } else { 5968 } else {
5931 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 5969 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5932 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 5970 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6007 } 6045 }
6008} 6046}
6009 6047
6010/* Initialize tx/rx rings for packet processing. 6048/* Initialize rx rings for packet processing.
6011 * 6049 *
6012 * The chip has been shut down and the driver detached from 6050 * The chip has been shut down and the driver detached from
6013 * the networking, so no interrupts or new tx packets will 6051 * the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6058 /* Now allocate fresh SKBs for each rx ring. */ 6096 /* Now allocate fresh SKBs for each rx ring. */
6059 for (i = 0; i < tp->rx_pending; i++) { 6097 for (i = 0; i < tp->rx_pending; i++) {
6060 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 6098 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6061 netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n", 6099 netdev_warn(tp->dev,
6062 i, tp->rx_pending); 6100 "Using a smaller RX standard ring. Only "
6101 "%d out of %d buffers were allocated "
6102 "successfully\n", i, tp->rx_pending);
6063 if (i == 0) 6103 if (i == 0)
6064 goto initfail; 6104 goto initfail;
6065 tp->rx_pending = i; 6105 tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6088 6128
6089 for (i = 0; i < tp->rx_jumbo_pending; i++) { 6129 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6090 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { 6130 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6091 netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n", 6131 netdev_warn(tp->dev,
6092 i, tp->rx_jumbo_pending); 6132 "Using a smaller RX jumbo ring. Only %d "
6133 "out of %d buffers were allocated "
6134 "successfully\n", i, tp->rx_jumbo_pending);
6093 if (i == 0) 6135 if (i == 0)
6094 goto initfail; 6136 goto initfail;
6095 tp->rx_jumbo_pending = i; 6137 tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
6187 } 6229 }
6188 6230
6189 pci_unmap_single(tp->pdev, 6231 pci_unmap_single(tp->pdev,
6190 pci_unmap_addr(txp, mapping), 6232 dma_unmap_addr(txp, mapping),
6191 skb_headlen(skb), 6233 skb_headlen(skb),
6192 PCI_DMA_TODEVICE); 6234 PCI_DMA_TODEVICE);
6193 txp->skb = NULL; 6235 txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
6197 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { 6239 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6198 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; 6240 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6199 pci_unmap_page(tp->pdev, 6241 pci_unmap_page(tp->pdev,
6200 pci_unmap_addr(txp, mapping), 6242 dma_unmap_addr(txp, mapping),
6201 skb_shinfo(skb)->frags[k].size, 6243 skb_shinfo(skb)->frags[k].size,
6202 PCI_DMA_TODEVICE); 6244 PCI_DMA_TODEVICE);
6203 i++; 6245 i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
6433 } 6475 }
6434 6476
6435 if (i == MAX_WAIT_CNT && !silent) { 6477 if (i == MAX_WAIT_CNT && !silent) {
6436 pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 6478 dev_err(&tp->pdev->dev,
6437 ofs, enable_bit); 6479 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6480 ofs, enable_bit);
6438 return -ENODEV; 6481 return -ENODEV;
6439 } 6482 }
6440 6483
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
6480 break; 6523 break;
6481 } 6524 }
6482 if (i >= MAX_WAIT_CNT) { 6525 if (i >= MAX_WAIT_CNT) {
6483 netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", 6526 dev_err(&tp->pdev->dev,
6484 __func__, tr32(MAC_TX_MODE)); 6527 "%s timed out, TX_MODE_ENABLE will not clear "
6528 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6485 err |= -ENODEV; 6529 err |= -ENODEV;
6486 } 6530 }
6487 6531
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6551 return; 6595 return;
6552 6596
6553 switch (kind) { 6597 switch (kind) {
6554 case RESET_KIND_INIT: 6598 case RESET_KIND_INIT:
6555 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 6599 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6556 APE_HOST_SEG_SIG_MAGIC); 6600 APE_HOST_SEG_SIG_MAGIC);
6557 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 6601 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6558 APE_HOST_SEG_LEN_MAGIC); 6602 APE_HOST_SEG_LEN_MAGIC);
6559 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 6603 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6560 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 6604 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6561 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 6605 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6562 APE_HOST_DRIVER_ID_MAGIC); 6606 APE_HOST_DRIVER_ID_MAGIC);
6563 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6607 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6564 APE_HOST_BEHAV_NO_PHYLOCK); 6608 APE_HOST_BEHAV_NO_PHYLOCK);
6565 6609
6566 event = APE_EVENT_STATUS_STATE_START; 6610 event = APE_EVENT_STATUS_STATE_START;
6567 break; 6611 break;
6568 case RESET_KIND_SHUTDOWN: 6612 case RESET_KIND_SHUTDOWN:
6569 /* With the interface we are currently using, 6613 /* With the interface we are currently using,
6570 * APE does not track driver state. Wiping 6614 * APE does not track driver state. Wiping
6571 * out the HOST SEGMENT SIGNATURE forces 6615 * out the HOST SEGMENT SIGNATURE forces
6572 * the APE to assume OS absent status. 6616 * the APE to assume OS absent status.
6573 */ 6617 */
6574 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 6618 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6575 6619
6576 event = APE_EVENT_STATUS_STATE_UNLOAD; 6620 event = APE_EVENT_STATUS_STATE_UNLOAD;
6577 break; 6621 break;
6578 case RESET_KIND_SUSPEND: 6622 case RESET_KIND_SUSPEND:
6579 event = APE_EVENT_STATUS_STATE_SUSPEND; 6623 event = APE_EVENT_STATUS_STATE_SUSPEND;
6580 break; 6624 break;
6581 default: 6625 default:
6582 return; 6626 return;
6583 } 6627 }
6584 6628
6585 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 6629 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
7156 7200
7157 if (cpu_base == TX_CPU_BASE && 7201 if (cpu_base == TX_CPU_BASE &&
7158 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7202 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7159 netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", 7203 netdev_err(tp->dev,
7204 "%s: Trying to load TX cpu firmware which is 5705\n",
7160 __func__); 7205 __func__);
7161 return -EINVAL; 7206 return -EINVAL;
7162 } 7207 }
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7236 udelay(1000); 7281 udelay(1000);
7237 } 7282 }
7238 if (i >= 5) { 7283 if (i >= 5) {
7239 netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n", 7284 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7285 "should be %08x\n", __func__,
7240 tr32(RX_CPU_BASE + CPU_PC), info.fw_base); 7286 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7241 return -ENODEV; 7287 return -ENODEV;
7242 } 7288 }
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
7300 udelay(1000); 7346 udelay(1000);
7301 } 7347 }
7302 if (i >= 5) { 7348 if (i >= 5) {
7303 netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", 7349 netdev_err(tp->dev,
7350 "%s fails to set CPU PC, is %08x should be %08x\n",
7304 __func__, tr32(cpu_base + CPU_PC), info.fw_base); 7351 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7305 return -ENODEV; 7352 return -ENODEV;
7306 } 7353 }
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7568 7615
7569 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 7616 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7570 7617
7571 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { 7618 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7572 tg3_abort_hw(tp, 1); 7619 tg3_abort_hw(tp, 1);
7573 }
7574 7620
7575 if (reset_phy) 7621 if (reset_phy)
7576 tg3_phy_reset(tp); 7622 tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7631 tw32(GRC_MODE, grc_mode); 7677 tw32(GRC_MODE, grc_mode);
7632 } 7678 }
7633 7679
7680 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7681 u32 grc_mode = tr32(GRC_MODE);
7682
7683 /* Access the lower 1K of PL PCIE block registers. */
7684 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7685 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7686
7687 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7688 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7689 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7690
7691 tw32(GRC_MODE, grc_mode);
7692
7693 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7694 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7695 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7696 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7697 }
7698
7634 /* This works around an issue with Athlon chipsets on 7699 /* This works around an issue with Athlon chipsets on
7635 * B3 tigon3 silicon. This bit has no effect on any 7700 * B3 tigon3 silicon. This bit has no effect on any
7636 * other revision. But do not set this on PCI Express 7701 * other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7680 val = tr32(TG3PCI_DMA_RW_CTRL) & 7745 val = tr32(TG3PCI_DMA_RW_CTRL) &
7681 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7746 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7747 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7748 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7682 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 7749 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7683 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7750 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7751 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7723 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 7790 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7724 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 7791 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7725 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 7792 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7726 } 7793 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7727 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7728 int fw_len; 7794 int fw_len;
7729 7795
7730 fw_len = tp->fw_len; 7796 fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7841 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 7907 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7842 (RX_STD_MAX_SIZE << 2); 7908 (TG3_RX_STD_DMA_SZ << 2);
7843 else 7909 else
7844 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; 7910 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7845 } else 7911 } else
7846 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 7912 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7847 7913
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
8476 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 8542 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8477 FWCMD_NICDRV_ALIVE3); 8543 FWCMD_NICDRV_ALIVE3);
8478 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 8544 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8479 /* 5 seconds timeout */ 8545 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 8546 TG3_FW_UPDATE_TIMEOUT_SEC);
8481 8547
8482 tg3_generate_fw_event(tp); 8548 tg3_generate_fw_event(tp);
8483 } 8549 }
@@ -8625,14 +8691,16 @@ static int tg3_test_msi(struct tg3 *tp)
8625 return err; 8691 return err;
8626 8692
8627 /* MSI test failed, go back to INTx mode */ 8693 /* MSI test failed, go back to INTx mode */
8628 netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n" 8694 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8629 "Please report this failure to the PCI maintainer and include system chipset information\n"); 8695 "to INTx mode. Please report this failure to the PCI "
8696 "maintainer and include system chipset information\n");
8630 8697
8631 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 8698 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8632 8699
8633 pci_disable_msi(tp->pdev); 8700 pci_disable_msi(tp->pdev);
8634 8701
8635 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI; 8702 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8703 tp->napi[0].irq_vec = tp->pdev->irq;
8636 8704
8637 err = tg3_request_irq(tp, 0); 8705 err = tg3_request_irq(tp, 0);
8638 if (err) 8706 if (err)
@@ -8738,7 +8806,8 @@ static void tg3_ints_init(struct tg3 *tp)
8738 /* All MSI supporting chips should support tagged 8806 /* All MSI supporting chips should support tagged
8739 * status. Assert that this is the case. 8807 * status. Assert that this is the case.
8740 */ 8808 */
8741 netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n"); 8809 netdev_warn(tp->dev,
8810 "MSI without TAGGED_STATUS? Not using MSI\n");
8742 goto defcfg; 8811 goto defcfg;
8743 } 8812 }
8744 8813
@@ -8913,236 +8982,6 @@ err_out1:
8913 return err; 8982 return err;
8914} 8983}
8915 8984
8916#if 0
8917/*static*/ void tg3_dump_state(struct tg3 *tp)
8918{
8919 u32 val32, val32_2, val32_3, val32_4, val32_5;
8920 u16 val16;
8921 int i;
8922 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8923
8924 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8925 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8926 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8927 val16, val32);
8928
8929 /* MAC block */
8930 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8931 tr32(MAC_MODE), tr32(MAC_STATUS));
8932 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8933 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8934 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8935 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8936 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8937 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8938
8939 /* Send data initiator control block */
8940 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8941 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8942 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8943 tr32(SNDDATAI_STATSCTRL));
8944
8945 /* Send data completion control block */
8946 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8947
8948 /* Send BD ring selector block */
8949 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8950 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8951
8952 /* Send BD initiator control block */
8953 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8954 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8955
8956 /* Send BD completion control block */
8957 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8958
8959 /* Receive list placement control block */
8960 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8961 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8962 printk(" RCVLPC_STATSCTRL[%08x]\n",
8963 tr32(RCVLPC_STATSCTRL));
8964
8965 /* Receive data and receive BD initiator control block */
8966 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8967 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8968
8969 /* Receive data completion control block */
8970 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8971 tr32(RCVDCC_MODE));
8972
8973 /* Receive BD initiator control block */
8974 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8975 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8976
8977 /* Receive BD completion control block */
8978 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8979 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8980
8981 /* Receive list selector control block */
8982 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8983 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8984
8985 /* Mbuf cluster free block */
8986 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8987 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8988
8989 /* Host coalescing control block */
8990 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8991 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8992 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8993 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8994 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8995 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8996 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8997 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8998 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8999 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
9000 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
9001 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
9002
9003 /* Memory arbiter control block */
9004 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
9005 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
9006
9007 /* Buffer manager control block */
9008 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
9009 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
9010 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
9011 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
9012 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
9013 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
9014 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
9015 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
9016
9017 /* Read DMA control block */
9018 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
9019 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
9020
9021 /* Write DMA control block */
9022 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
9023 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
9024
9025 /* DMA completion block */
9026 printk("DEBUG: DMAC_MODE[%08x]\n",
9027 tr32(DMAC_MODE));
9028
9029 /* GRC block */
9030 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
9031 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
9032 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
9033 tr32(GRC_LOCAL_CTRL));
9034
9035 /* TG3_BDINFOs */
9036 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
9037 tr32(RCVDBDI_JUMBO_BD + 0x0),
9038 tr32(RCVDBDI_JUMBO_BD + 0x4),
9039 tr32(RCVDBDI_JUMBO_BD + 0x8),
9040 tr32(RCVDBDI_JUMBO_BD + 0xc));
9041 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
9042 tr32(RCVDBDI_STD_BD + 0x0),
9043 tr32(RCVDBDI_STD_BD + 0x4),
9044 tr32(RCVDBDI_STD_BD + 0x8),
9045 tr32(RCVDBDI_STD_BD + 0xc));
9046 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
9047 tr32(RCVDBDI_MINI_BD + 0x0),
9048 tr32(RCVDBDI_MINI_BD + 0x4),
9049 tr32(RCVDBDI_MINI_BD + 0x8),
9050 tr32(RCVDBDI_MINI_BD + 0xc));
9051
9052 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
9053 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
9054 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
9055 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
9056 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
9057 val32, val32_2, val32_3, val32_4);
9058
9059 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
9060 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
9061 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
9062 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
9063 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
9064 val32, val32_2, val32_3, val32_4);
9065
9066 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
9067 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
9068 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
9069 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
9070 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
9071 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
9072 val32, val32_2, val32_3, val32_4, val32_5);
9073
9074 /* SW status block */
9075 printk(KERN_DEBUG
9076 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
9077 sblk->status,
9078 sblk->status_tag,
9079 sblk->rx_jumbo_consumer,
9080 sblk->rx_consumer,
9081 sblk->rx_mini_consumer,
9082 sblk->idx[0].rx_producer,
9083 sblk->idx[0].tx_consumer);
9084
9085 /* SW statistics block */
9086 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
9087 ((u32 *)tp->hw_stats)[0],
9088 ((u32 *)tp->hw_stats)[1],
9089 ((u32 *)tp->hw_stats)[2],
9090 ((u32 *)tp->hw_stats)[3]);
9091
9092 /* Mailboxes */
9093 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
9094 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
9095 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
9096 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
9097 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
9098
9099 /* NIC side send descriptors. */
9100 for (i = 0; i < 6; i++) {
9101 unsigned long txd;
9102
9103 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
9104 + (i * sizeof(struct tg3_tx_buffer_desc));
9105 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
9106 i,
9107 readl(txd + 0x0), readl(txd + 0x4),
9108 readl(txd + 0x8), readl(txd + 0xc));
9109 }
9110
9111 /* NIC side RX descriptors. */
9112 for (i = 0; i < 6; i++) {
9113 unsigned long rxd;
9114
9115 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
9116 + (i * sizeof(struct tg3_rx_buffer_desc));
9117 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
9118 i,
9119 readl(rxd + 0x0), readl(rxd + 0x4),
9120 readl(rxd + 0x8), readl(rxd + 0xc));
9121 rxd += (4 * sizeof(u32));
9122 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
9123 i,
9124 readl(rxd + 0x0), readl(rxd + 0x4),
9125 readl(rxd + 0x8), readl(rxd + 0xc));
9126 }
9127
9128 for (i = 0; i < 6; i++) {
9129 unsigned long rxd;
9130
9131 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
9132 + (i * sizeof(struct tg3_rx_buffer_desc));
9133 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
9134 i,
9135 readl(rxd + 0x0), readl(rxd + 0x4),
9136 readl(rxd + 0x8), readl(rxd + 0xc));
9137 rxd += (4 * sizeof(u32));
9138 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
9139 i,
9140 readl(rxd + 0x0), readl(rxd + 0x4),
9141 readl(rxd + 0x8), readl(rxd + 0xc));
9142 }
9143}
9144#endif
9145
9146static struct net_device_stats *tg3_get_stats(struct net_device *); 8985static struct net_device_stats *tg3_get_stats(struct net_device *);
9147static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); 8986static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9148 8987
@@ -9161,9 +9000,6 @@ static int tg3_close(struct net_device *dev)
9161 tg3_phy_stop(tp); 9000 tg3_phy_stop(tp);
9162 9001
9163 tg3_full_lock(tp, 1); 9002 tg3_full_lock(tp, 1);
9164#if 0
9165 tg3_dump_state(tp);
9166#endif
9167 9003
9168 tg3_disable_ints(tp); 9004 tg3_disable_ints(tp);
9169 9005
@@ -9405,9 +9241,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
9405 9241
9406 reg >>= 1; 9242 reg >>= 1;
9407 9243
9408 if (tmp) { 9244 if (tmp)
9409 reg ^= 0xedb88320; 9245 reg ^= 0xedb88320;
9410 }
9411 } 9246 }
9412 } 9247 }
9413 9248
@@ -9451,20 +9286,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9451 rx_mode |= RX_MODE_PROMISC; 9286 rx_mode |= RX_MODE_PROMISC;
9452 } else if (dev->flags & IFF_ALLMULTI) { 9287 } else if (dev->flags & IFF_ALLMULTI) {
9453 /* Accept all multicast. */ 9288 /* Accept all multicast. */
9454 tg3_set_multi (tp, 1); 9289 tg3_set_multi(tp, 1);
9455 } else if (netdev_mc_empty(dev)) { 9290 } else if (netdev_mc_empty(dev)) {
9456 /* Reject all multicast. */ 9291 /* Reject all multicast. */
9457 tg3_set_multi (tp, 0); 9292 tg3_set_multi(tp, 0);
9458 } else { 9293 } else {
9459 /* Accept one or more multicast(s). */ 9294 /* Accept one or more multicast(s). */
9460 struct dev_mc_list *mclist; 9295 struct netdev_hw_addr *ha;
9461 u32 mc_filter[4] = { 0, }; 9296 u32 mc_filter[4] = { 0, };
9462 u32 regidx; 9297 u32 regidx;
9463 u32 bit; 9298 u32 bit;
9464 u32 crc; 9299 u32 crc;
9465 9300
9466 netdev_for_each_mc_addr(mclist, dev) { 9301 netdev_for_each_mc_addr(ha, dev) {
9467 crc = calc_crc (mclist->dmi_addr, ETH_ALEN); 9302 crc = calc_crc(ha->addr, ETH_ALEN);
9468 bit = ~crc & 0x7f; 9303 bit = ~crc & 0x7f;
9469 regidx = (bit & 0x60) >> 5; 9304 regidx = (bit & 0x60) >> 5;
9470 bit &= 0x1f; 9305 bit &= 0x1f;
@@ -9617,7 +9452,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9617 memcpy(data, ((char*)&val) + b_offset, b_count); 9452 memcpy(data, ((char*)&val) + b_offset, b_count);
9618 len -= b_count; 9453 len -= b_count;
9619 offset += b_count; 9454 offset += b_count;
9620 eeprom->len += b_count; 9455 eeprom->len += b_count;
9621 } 9456 }
9622 9457
9623 /* read bytes upto the last 4 byte boundary */ 9458 /* read bytes upto the last 4 byte boundary */
@@ -10165,8 +10000,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10165 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 10000 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10166 if (data != 0) 10001 if (data != 0)
10167 return -EINVAL; 10002 return -EINVAL;
10168 return 0; 10003 return 0;
10169 } 10004 }
10170 10005
10171 spin_lock_bh(&tp->lock); 10006 spin_lock_bh(&tp->lock);
10172 if (data) 10007 if (data)
@@ -10185,8 +10020,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10185 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 10020 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10186 if (data != 0) 10021 if (data != 0)
10187 return -EINVAL; 10022 return -EINVAL;
10188 return 0; 10023 return 0;
10189 } 10024 }
10190 10025
10191 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10026 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10192 ethtool_op_set_tx_ipv6_csum(dev, data); 10027 ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10196,7 +10031,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10196 return 0; 10031 return 0;
10197} 10032}
10198 10033
10199static int tg3_get_sset_count (struct net_device *dev, int sset) 10034static int tg3_get_sset_count(struct net_device *dev, int sset)
10200{ 10035{
10201 switch (sset) { 10036 switch (sset) {
10202 case ETH_SS_TEST: 10037 case ETH_SS_TEST:
@@ -10208,7 +10043,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
10208 } 10043 }
10209} 10044}
10210 10045
10211static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 10046static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10212{ 10047{
10213 switch (stringset) { 10048 switch (stringset) {
10214 case ETH_SS_STATS: 10049 case ETH_SS_STATS:
@@ -10255,7 +10090,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
10255 return 0; 10090 return 0;
10256} 10091}
10257 10092
10258static void tg3_get_ethtool_stats (struct net_device *dev, 10093static void tg3_get_ethtool_stats(struct net_device *dev,
10259 struct ethtool_stats *estats, u64 *tmp_stats) 10094 struct ethtool_stats *estats, u64 *tmp_stats)
10260{ 10095{
10261 struct tg3 *tp = netdev_priv(dev); 10096 struct tg3 *tp = netdev_priv(dev);
@@ -10361,8 +10196,7 @@ static int tg3_test_nvram(struct tg3 *tp)
10361 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 10196 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10362 parity[k++] = buf8[i] & msk; 10197 parity[k++] = buf8[i] & msk;
10363 i++; 10198 i++;
10364 } 10199 } else if (i == 16) {
10365 else if (i == 16) {
10366 int l; 10200 int l;
10367 u8 msk; 10201 u8 msk;
10368 10202
@@ -10460,7 +10294,7 @@ static int tg3_test_registers(struct tg3 *tp)
10460 { MAC_ADDR_0_HIGH, 0x0000, 10294 { MAC_ADDR_0_HIGH, 0x0000,
10461 0x00000000, 0x0000ffff }, 10295 0x00000000, 0x0000ffff },
10462 { MAC_ADDR_0_LOW, 0x0000, 10296 { MAC_ADDR_0_LOW, 0x0000,
10463 0x00000000, 0xffffffff }, 10297 0x00000000, 0xffffffff },
10464 { MAC_RX_MTU_SIZE, 0x0000, 10298 { MAC_RX_MTU_SIZE, 0x0000,
10465 0x00000000, 0x0000ffff }, 10299 0x00000000, 0x0000ffff },
10466 { MAC_TX_MODE, 0x0000, 10300 { MAC_TX_MODE, 0x0000,
@@ -10648,7 +10482,8 @@ static int tg3_test_registers(struct tg3 *tp)
10648 10482
10649out: 10483out:
10650 if (netif_msg_hw(tp)) 10484 if (netif_msg_hw(tp))
10651 pr_err("Register test failed at offset %x\n", offset); 10485 netdev_err(tp->dev,
10486 "Register test failed at offset %x\n", offset);
10652 tw32(offset, save_val); 10487 tw32(offset, save_val);
10653 return -EIO; 10488 return -EIO;
10654} 10489}
@@ -10824,9 +10659,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10824 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10659 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10825 } 10660 }
10826 tw32(MAC_MODE, mac_mode); 10661 tw32(MAC_MODE, mac_mode);
10827 } 10662 } else {
10828 else
10829 return -EINVAL; 10663 return -EINVAL;
10664 }
10830 10665
10831 err = -EIO; 10666 err = -EIO;
10832 10667
@@ -10908,7 +10743,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10908 10743
10909 rx_skb = tpr->rx_std_buffers[desc_idx].skb; 10744 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10910 10745
10911 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); 10746 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10912 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 10747 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10913 10748
10914 for (i = 14; i < tx_len; i++) { 10749 for (i = 14; i < tx_len; i++) {
@@ -11082,7 +10917,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11082 return phy_mii_ioctl(phydev, data, cmd); 10917 return phy_mii_ioctl(phydev, data, cmd);
11083 } 10918 }
11084 10919
11085 switch(cmd) { 10920 switch (cmd) {
11086 case SIOCGMIIPHY: 10921 case SIOCGMIIPHY:
11087 data->phy_id = tp->phy_addr; 10922 data->phy_id = tp->phy_addr;
11088 10923
@@ -11775,7 +11610,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11775 tp->tg3_flags |= TG3_FLAG_NVRAM; 11610 tp->tg3_flags |= TG3_FLAG_NVRAM;
11776 11611
11777 if (tg3_nvram_lock(tp)) { 11612 if (tg3_nvram_lock(tp)) {
11778 netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n", 11613 netdev_warn(tp->dev,
11614 "Cannot get nvram lock, %s failed\n",
11779 __func__); 11615 __func__);
11780 return; 11616 return;
11781 } 11617 }
@@ -11894,7 +11730,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11894 if (ret) 11730 if (ret)
11895 break; 11731 break;
11896 11732
11897 page_off = offset & pagemask; 11733 page_off = offset & pagemask;
11898 size = pagesize; 11734 size = pagesize;
11899 if (len < size) 11735 if (len < size)
11900 size = len; 11736 size = len;
@@ -11922,7 +11758,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11922 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 11758 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11923 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 11759 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11924 11760
11925 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 11761 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11926 break; 11762 break;
11927 11763
11928 /* Issue another write enable to start the write. */ 11764 /* Issue another write enable to start the write. */
@@ -11976,7 +11812,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11976 memcpy(&data, buf + i, 4); 11812 memcpy(&data, buf + i, 4);
11977 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 11813 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11978 11814
11979 page_off = offset % tp->nvram_pagesize; 11815 page_off = offset % tp->nvram_pagesize;
11980 11816
11981 phy_addr = tg3_nvram_phys_addr(tp, offset); 11817 phy_addr = tg3_nvram_phys_addr(tp, offset);
11982 11818
@@ -11984,7 +11820,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11984 11820
11985 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 11821 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11986 11822
11987 if ((page_off == 0) || (i == 0)) 11823 if (page_off == 0 || i == 0)
11988 nvram_cmd |= NVRAM_CMD_FIRST; 11824 nvram_cmd |= NVRAM_CMD_FIRST;
11989 if (page_off == (tp->nvram_pagesize - 4)) 11825 if (page_off == (tp->nvram_pagesize - 4))
11990 nvram_cmd |= NVRAM_CMD_LAST; 11826 nvram_cmd |= NVRAM_CMD_LAST;
@@ -12027,8 +11863,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12027 11863
12028 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { 11864 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12029 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 11865 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12030 } 11866 } else {
12031 else {
12032 u32 grc_mode; 11867 u32 grc_mode;
12033 11868
12034 ret = tg3_nvram_lock(tp); 11869 ret = tg3_nvram_lock(tp);
@@ -12048,8 +11883,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12048 11883
12049 ret = tg3_nvram_write_block_buffered(tp, offset, len, 11884 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12050 buf); 11885 buf);
12051 } 11886 } else {
12052 else {
12053 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 11887 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12054 buf); 11888 buf);
12055 } 11889 }
@@ -12544,11 +12378,11 @@ skip_phy_reset:
12544 return err; 12378 return err;
12545} 12379}
12546 12380
12547static void __devinit tg3_read_partno(struct tg3 *tp) 12381static void __devinit tg3_read_vpd(struct tg3 *tp)
12548{ 12382{
12549 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ 12383 u8 vpd_data[TG3_NVM_VPD_LEN];
12550 unsigned int block_end, rosize, len; 12384 unsigned int block_end, rosize, len;
12551 int i = 0; 12385 int j, i = 0;
12552 u32 magic; 12386 u32 magic;
12553 12387
12554 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12388 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12597,6 +12431,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
12597 if (block_end > TG3_NVM_VPD_LEN) 12431 if (block_end > TG3_NVM_VPD_LEN)
12598 goto out_not_found; 12432 goto out_not_found;
12599 12433
12434 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12435 PCI_VPD_RO_KEYWORD_MFR_ID);
12436 if (j > 0) {
12437 len = pci_vpd_info_field_size(&vpd_data[j]);
12438
12439 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12440 if (j + len > block_end || len != 4 ||
12441 memcmp(&vpd_data[j], "1028", 4))
12442 goto partno;
12443
12444 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12445 PCI_VPD_RO_KEYWORD_VENDOR0);
12446 if (j < 0)
12447 goto partno;
12448
12449 len = pci_vpd_info_field_size(&vpd_data[j]);
12450
12451 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12452 if (j + len > block_end)
12453 goto partno;
12454
12455 memcpy(tp->fw_ver, &vpd_data[j], len);
12456 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12457 }
12458
12459partno:
12600 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 12460 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12601 PCI_VPD_RO_KEYWORD_PARTNO); 12461 PCI_VPD_RO_KEYWORD_PARTNO);
12602 if (i < 0) 12462 if (i < 0)
@@ -12666,7 +12526,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12666static void __devinit tg3_read_bc_ver(struct tg3 *tp) 12526static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12667{ 12527{
12668 u32 val, offset, start, ver_offset; 12528 u32 val, offset, start, ver_offset;
12669 int i; 12529 int i, dst_off;
12670 bool newver = false; 12530 bool newver = false;
12671 12531
12672 if (tg3_nvram_read(tp, 0xc, &offset) || 12532 if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12686,8 +12546,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12686 newver = true; 12546 newver = true;
12687 } 12547 }
12688 12548
12549 dst_off = strlen(tp->fw_ver);
12550
12689 if (newver) { 12551 if (newver) {
12690 if (tg3_nvram_read(tp, offset + 8, &ver_offset)) 12552 if (TG3_VER_SIZE - dst_off < 16 ||
12553 tg3_nvram_read(tp, offset + 8, &ver_offset))
12691 return; 12554 return;
12692 12555
12693 offset = offset + ver_offset - start; 12556 offset = offset + ver_offset - start;
@@ -12696,7 +12559,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12696 if (tg3_nvram_read_be32(tp, offset + i, &v)) 12559 if (tg3_nvram_read_be32(tp, offset + i, &v))
12697 return; 12560 return;
12698 12561
12699 memcpy(tp->fw_ver + i, &v, sizeof(v)); 12562 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12700 } 12563 }
12701 } else { 12564 } else {
12702 u32 major, minor; 12565 u32 major, minor;
@@ -12707,7 +12570,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12707 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 12570 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12708 TG3_NVM_BCVER_MAJSFT; 12571 TG3_NVM_BCVER_MAJSFT;
12709 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 12572 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12710 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor); 12573 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12574 "v%d.%02d", major, minor);
12711 } 12575 }
12712} 12576}
12713 12577
@@ -12731,9 +12595,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12731{ 12595{
12732 u32 offset, major, minor, build; 12596 u32 offset, major, minor, build;
12733 12597
12734 tp->fw_ver[0] = 's'; 12598 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12735 tp->fw_ver[1] = 'b';
12736 tp->fw_ver[2] = '\0';
12737 12599
12738 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 12600 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12739 return; 12601 return;
@@ -12770,11 +12632,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12770 if (minor > 99 || build > 26) 12632 if (minor > 99 || build > 26)
12771 return; 12633 return;
12772 12634
12773 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor); 12635 offset = strlen(tp->fw_ver);
12636 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12637 " v%d.%02d", major, minor);
12774 12638
12775 if (build > 0) { 12639 if (build > 0) {
12776 tp->fw_ver[8] = 'a' + build - 1; 12640 offset = strlen(tp->fw_ver);
12777 tp->fw_ver[9] = '\0'; 12641 if (offset < TG3_VER_SIZE - 1)
12642 tp->fw_ver[offset] = 'a' + build - 1;
12778 } 12643 }
12779} 12644}
12780 12645
@@ -12861,12 +12726,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12861static void __devinit tg3_read_fw_ver(struct tg3 *tp) 12726static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12862{ 12727{
12863 u32 val; 12728 u32 val;
12729 bool vpd_vers = false;
12864 12730
12865 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { 12731 if (tp->fw_ver[0] != 0)
12866 tp->fw_ver[0] = 's'; 12732 vpd_vers = true;
12867 tp->fw_ver[1] = 'b';
12868 tp->fw_ver[2] = '\0';
12869 12733
12734 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12735 strcat(tp->fw_ver, "sb");
12870 return; 12736 return;
12871 } 12737 }
12872 12738
@@ -12883,11 +12749,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12883 return; 12749 return;
12884 12750
12885 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 12751 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12886 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 12752 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12887 return; 12753 goto done;
12888 12754
12889 tg3_read_mgmtfw_ver(tp); 12755 tg3_read_mgmtfw_ver(tp);
12890 12756
12757done:
12891 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 12758 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12892} 12759}
12893 12760
@@ -12897,9 +12764,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12897{ 12764{
12898 static struct pci_device_id write_reorder_chipsets[] = { 12765 static struct pci_device_id write_reorder_chipsets[] = {
12899 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12766 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12900 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 12767 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12901 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12768 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12902 PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 12769 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12903 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 12770 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12904 PCI_DEVICE_ID_VIA_8385_0) }, 12771 PCI_DEVICE_ID_VIA_8385_0) },
12905 { }, 12772 { },
@@ -13065,8 +12932,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13065 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 12932 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13066 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 12933 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13067 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 12934 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13068 } 12935 } else {
13069 else {
13070 struct pci_dev *bridge = NULL; 12936 struct pci_dev *bridge = NULL;
13071 12937
13072 do { 12938 do {
@@ -13128,6 +12994,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13128 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 12994 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13129 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 12995 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13130 tp->dev->features |= NETIF_F_IPV6_CSUM; 12996 tp->dev->features |= NETIF_F_IPV6_CSUM;
12997 tp->dev->features |= NETIF_F_GRO;
13131 } 12998 }
13132 12999
13133 /* Determine TSO capabilities */ 13000 /* Determine TSO capabilities */
@@ -13188,8 +13055,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13188 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13055 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13189 13056
13190 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13057 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13191 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 13058 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13192 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) 13059 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13193 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; 13060 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13194 13061
13195 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 13062 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13223,7 +13090,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13223 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13090 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13224 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 13091 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13225 if (!tp->pcix_cap) { 13092 if (!tp->pcix_cap) {
13226 pr_err("Cannot find PCI-X capability, aborting\n"); 13093 dev_err(&tp->pdev->dev,
13094 "Cannot find PCI-X capability, aborting\n");
13227 return -EIO; 13095 return -EIO;
13228 } 13096 }
13229 13097
@@ -13420,7 +13288,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13420 /* Force the chip into D0. */ 13288 /* Force the chip into D0. */
13421 err = tg3_set_power_state(tp, PCI_D0); 13289 err = tg3_set_power_state(tp, PCI_D0);
13422 if (err) { 13290 if (err) {
13423 pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev)); 13291 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13424 return err; 13292 return err;
13425 } 13293 }
13426 13294
@@ -13594,13 +13462,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13594 13462
13595 err = tg3_phy_probe(tp); 13463 err = tg3_phy_probe(tp);
13596 if (err) { 13464 if (err) {
13597 pr_err("(%s) phy probe failed, err %d\n", 13465 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13598 pci_name(tp->pdev), err);
13599 /* ... but do not return immediately ... */ 13466 /* ... but do not return immediately ... */
13600 tg3_mdio_fini(tp); 13467 tg3_mdio_fini(tp);
13601 } 13468 }
13602 13469
13603 tg3_read_partno(tp); 13470 tg3_read_vpd(tp);
13604 tg3_read_fw_ver(tp); 13471 tg3_read_fw_ver(tp);
13605 13472
13606 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 13473 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13638,10 +13505,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13638 else 13505 else
13639 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13506 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13640 13507
13641 tp->rx_offset = NET_IP_ALIGN; 13508 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13509 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13510 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13643 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) 13511 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13644 tp->rx_offset = 0; 13512 tp->rx_offset -= NET_IP_ALIGN;
13513#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13514 tp->rx_copy_thresh = ~(u16)0;
13515#endif
13516 }
13645 13517
13646 tp->rx_std_max_post = TG3_RX_RING_SIZE; 13518 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13647 13519
@@ -13964,11 +13836,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
13964 } 13836 }
13965 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 13837 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13966 13838
13967 if (to_device) { 13839 if (to_device)
13968 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 13840 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13969 } else { 13841 else
13970 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 13842 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13971 }
13972 13843
13973 ret = -ENODEV; 13844 ret = -ENODEV;
13974 for (i = 0; i < 40; i++) { 13845 for (i = 0; i < 40; i++) {
@@ -14104,8 +13975,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14104 /* Send the buffer to the chip. */ 13975 /* Send the buffer to the chip. */
14105 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 13976 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14106 if (ret) { 13977 if (ret) {
14107 pr_err("tg3_test_dma() Write the buffer failed %d\n", 13978 dev_err(&tp->pdev->dev,
14108 ret); 13979 "%s: Buffer write failed. err = %d\n",
13980 __func__, ret);
14109 break; 13981 break;
14110 } 13982 }
14111 13983
@@ -14115,8 +13987,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14115 u32 val; 13987 u32 val;
14116 tg3_read_mem(tp, 0x2100 + (i*4), &val); 13988 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14117 if (le32_to_cpu(val) != p[i]) { 13989 if (le32_to_cpu(val) != p[i]) {
14118 pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", 13990 dev_err(&tp->pdev->dev,
14119 val, i); 13991 "%s: Buffer corrupted on device! "
13992 "(%d != %d)\n", __func__, val, i);
14120 /* ret = -ENODEV here? */ 13993 /* ret = -ENODEV here? */
14121 } 13994 }
14122 p[i] = 0; 13995 p[i] = 0;
@@ -14125,9 +13998,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14125 /* Now read it back. */ 13998 /* Now read it back. */
14126 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 13999 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14127 if (ret) { 14000 if (ret) {
14128 pr_err("tg3_test_dma() Read the buffer failed %d\n", 14001 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14129 ret); 14002 "err = %d\n", __func__, ret);
14130
14131 break; 14003 break;
14132 } 14004 }
14133 14005
@@ -14143,8 +14015,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14143 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14015 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14144 break; 14016 break;
14145 } else { 14017 } else {
14146 pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", 14018 dev_err(&tp->pdev->dev,
14147 p[i], i); 14019 "%s: Buffer corrupted on read back! "
14020 "(%d != %d)\n", __func__, p[i], i);
14148 ret = -ENODEV; 14021 ret = -ENODEV;
14149 goto out; 14022 goto out;
14150 } 14023 }
@@ -14171,10 +14044,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14171 if (pci_dev_present(dma_wait_state_chipsets)) { 14044 if (pci_dev_present(dma_wait_state_chipsets)) {
14172 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14045 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14173 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14046 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14174 } 14047 } else {
14175 else
14176 /* Safe to use the calculated DMA boundary. */ 14048 /* Safe to use the calculated DMA boundary. */
14177 tp->dma_rwctrl = saved_dma_rwctrl; 14049 tp->dma_rwctrl = saved_dma_rwctrl;
14050 }
14178 14051
14179 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14052 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14180 } 14053 }
@@ -14436,13 +14309,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14436 14309
14437 err = pci_enable_device(pdev); 14310 err = pci_enable_device(pdev);
14438 if (err) { 14311 if (err) {
14439 pr_err("Cannot enable PCI device, aborting\n"); 14312 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14440 return err; 14313 return err;
14441 } 14314 }
14442 14315
14443 err = pci_request_regions(pdev, DRV_MODULE_NAME); 14316 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14444 if (err) { 14317 if (err) {
14445 pr_err("Cannot obtain PCI resources, aborting\n"); 14318 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14446 goto err_out_disable_pdev; 14319 goto err_out_disable_pdev;
14447 } 14320 }
14448 14321
@@ -14451,14 +14324,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14451 /* Find power-management capability. */ 14324 /* Find power-management capability. */
14452 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 14325 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14453 if (pm_cap == 0) { 14326 if (pm_cap == 0) {
14454 pr_err("Cannot find PowerManagement capability, aborting\n"); 14327 dev_err(&pdev->dev,
14328 "Cannot find Power Management capability, aborting\n");
14455 err = -EIO; 14329 err = -EIO;
14456 goto err_out_free_res; 14330 goto err_out_free_res;
14457 } 14331 }
14458 14332
14459 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 14333 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14460 if (!dev) { 14334 if (!dev) {
14461 pr_err("Etherdev alloc failed, aborting\n"); 14335 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14462 err = -ENOMEM; 14336 err = -ENOMEM;
14463 goto err_out_free_res; 14337 goto err_out_free_res;
14464 } 14338 }
@@ -14508,7 +14382,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14508 14382
14509 tp->regs = pci_ioremap_bar(pdev, BAR_0); 14383 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14510 if (!tp->regs) { 14384 if (!tp->regs) {
14511 netdev_err(dev, "Cannot map device registers, aborting\n"); 14385 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14512 err = -ENOMEM; 14386 err = -ENOMEM;
14513 goto err_out_free_dev; 14387 goto err_out_free_dev;
14514 } 14388 }
@@ -14524,7 +14398,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14524 14398
14525 err = tg3_get_invariants(tp); 14399 err = tg3_get_invariants(tp);
14526 if (err) { 14400 if (err) {
14527 netdev_err(dev, "Problem fetching invariants of chip, aborting\n"); 14401 dev_err(&pdev->dev,
14402 "Problem fetching invariants of chip, aborting\n");
14528 goto err_out_iounmap; 14403 goto err_out_iounmap;
14529 } 14404 }
14530 14405
@@ -14559,7 +14434,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14559 err = pci_set_consistent_dma_mask(pdev, 14434 err = pci_set_consistent_dma_mask(pdev,
14560 persist_dma_mask); 14435 persist_dma_mask);
14561 if (err < 0) { 14436 if (err < 0) {
14562 netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); 14437 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14438 "DMA for consistent allocations\n");
14563 goto err_out_iounmap; 14439 goto err_out_iounmap;
14564 } 14440 }
14565 } 14441 }
@@ -14567,7 +14443,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14567 if (err || dma_mask == DMA_BIT_MASK(32)) { 14443 if (err || dma_mask == DMA_BIT_MASK(32)) {
14568 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 14444 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14569 if (err) { 14445 if (err) {
14570 netdev_err(dev, "No usable DMA configuration, aborting\n"); 14446 dev_err(&pdev->dev,
14447 "No usable DMA configuration, aborting\n");
14571 goto err_out_iounmap; 14448 goto err_out_iounmap;
14572 } 14449 }
14573 } 14450 }
@@ -14616,14 +14493,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14616 14493
14617 err = tg3_get_device_address(tp); 14494 err = tg3_get_device_address(tp);
14618 if (err) { 14495 if (err) {
14619 netdev_err(dev, "Could not obtain valid ethernet address, aborting\n"); 14496 dev_err(&pdev->dev,
14497 "Could not obtain valid ethernet address, aborting\n");
14620 goto err_out_iounmap; 14498 goto err_out_iounmap;
14621 } 14499 }
14622 14500
14623 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 14501 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14624 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 14502 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14625 if (!tp->aperegs) { 14503 if (!tp->aperegs) {
14626 netdev_err(dev, "Cannot map APE registers, aborting\n"); 14504 dev_err(&pdev->dev,
14505 "Cannot map APE registers, aborting\n");
14627 err = -ENOMEM; 14506 err = -ENOMEM;
14628 goto err_out_iounmap; 14507 goto err_out_iounmap;
14629 } 14508 }
@@ -14647,7 +14526,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14647 14526
14648 err = tg3_test_dma(tp); 14527 err = tg3_test_dma(tp);
14649 if (err) { 14528 if (err) {
14650 netdev_err(dev, "DMA engine test failed, aborting\n"); 14529 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14651 goto err_out_apeunmap; 14530 goto err_out_apeunmap;
14652 } 14531 }
14653 14532
@@ -14708,7 +14587,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14708 14587
14709 err = register_netdev(dev); 14588 err = register_netdev(dev);
14710 if (err) { 14589 if (err) {
14711 netdev_err(dev, "Cannot register net device, aborting\n"); 14590 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14712 goto err_out_apeunmap; 14591 goto err_out_apeunmap;
14713 } 14592 }
14714 14593
@@ -14721,11 +14600,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14721 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 14600 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14722 struct phy_device *phydev; 14601 struct phy_device *phydev;
14723 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 14602 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14724 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14603 netdev_info(dev,
14604 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14725 phydev->drv->name, dev_name(&phydev->dev)); 14605 phydev->drv->name, dev_name(&phydev->dev));
14726 } else 14606 } else
14727 netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14607 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14728 tg3_phy_string(tp), 14608 "(WireSpeed[%d])\n", tg3_phy_string(tp),
14729 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 14609 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14730 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : 14610 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14731 "10/100/1000Base-T")), 14611 "10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc4d353..ce9c4918c318 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ 23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
24#define TG3_BDINFO_SIZE 0x10UL 24#define TG3_BDINFO_SIZE 0x10UL
25 25
26#define RX_COPY_THRESHOLD 256
27
28#define TG3_RX_INTERNAL_RING_SZ_5906 32 26#define TG3_RX_INTERNAL_RING_SZ_5906 32
29 27
30#define RX_STD_MAX_SIZE 1536
31#define RX_STD_MAX_SIZE_5705 512 28#define RX_STD_MAX_SIZE_5705 512
32#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ 29#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
33 30
@@ -183,6 +180,7 @@
183#define METAL_REV_B2 0x02 180#define METAL_REV_B2 0x02
184#define TG3PCI_DMA_RW_CTRL 0x0000006c 181#define TG3PCI_DMA_RW_CTRL 0x0000006c
185#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 182#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
183#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
186#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 184#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
187#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 185#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
188#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 186#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
252/* 0x94 --> 0x98 unused */ 250/* 0x94 --> 0x98 unused */
253#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 251#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
254#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 252#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
255/* 0xa0 --> 0xb8 unused */ 253/* 0xa8 --> 0xb8 unused */
256#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 254#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
257#define DUAL_MAC_CTRL_CH_MASK 0x00000003 255#define DUAL_MAC_CTRL_CH_MASK 0x00000003
258#define DUAL_MAC_CTRL_ID 0x00000004 256#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
1854#define TG3_PCIE_TLDLPL_PORT 0x00007c00 1852#define TG3_PCIE_TLDLPL_PORT 0x00007c00
1855#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 1853#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
1856#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 1854#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
1855#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
1856#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
1857 1857
1858/* OTP bit definitions */ 1858/* OTP bit definitions */
1859#define TG3_OTP_AGCTGT_MASK 0x000000e0 1859#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
2082#define MII_TG3_DSP_AADJ1CH0 0x001f 2082#define MII_TG3_DSP_AADJ1CH0 0x001f
2083#define MII_TG3_DSP_AADJ1CH3 0x601f 2083#define MII_TG3_DSP_AADJ1CH3 0x601f
2084#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 2084#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
2085#define MII_TG3_DSP_EXP8 0x0708 2085#define MII_TG3_DSP_EXP8 0x0f08
2086#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 2086#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
2087#define MII_TG3_DSP_EXP8_AEDW 0x0200 2087#define MII_TG3_DSP_EXP8_AEDW 0x0200
2088#define MII_TG3_DSP_EXP75 0x0f75 2088#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
2512 */ 2512 */
2513struct ring_info { 2513struct ring_info {
2514 struct sk_buff *skb; 2514 struct sk_buff *skb;
2515 DECLARE_PCI_UNMAP_ADDR(mapping) 2515 DEFINE_DMA_UNMAP_ADDR(mapping);
2516}; 2516};
2517 2517
2518struct tg3_config_info { 2518struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
2561 2561
2562struct tg3_ethtool_stats { 2562struct tg3_ethtool_stats {
2563 /* Statistics maintained by Receive MAC. */ 2563 /* Statistics maintained by Receive MAC. */
2564 u64 rx_octets; 2564 u64 rx_octets;
2565 u64 rx_fragments; 2565 u64 rx_fragments;
2566 u64 rx_ucast_packets; 2566 u64 rx_ucast_packets;
2567 u64 rx_mcast_packets; 2567 u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
2751 struct tg3_napi napi[TG3_IRQ_MAX_VECS]; 2751 struct tg3_napi napi[TG3_IRQ_MAX_VECS];
2752 void (*write32_rx_mbox) (struct tg3 *, u32, 2752 void (*write32_rx_mbox) (struct tg3 *, u32,
2753 u32); 2753 u32);
2754 u32 rx_copy_thresh;
2754 u32 rx_pending; 2755 u32 rx_pending;
2755 u32 rx_jumbo_pending; 2756 u32 rx_jumbo_pending;
2756 u32 rx_std_max_post; 2757 u32 rx_std_max_post;
2758 u32 rx_offset;
2757 u32 rx_pkt_map_sz; 2759 u32 rx_pkt_map_sz;
2758#if TG3_VLAN_TAG_USED 2760#if TG3_VLAN_TAG_USED
2759 struct vlan_group *vlgrp; 2761 struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
2773 unsigned long last_event_jiffies; 2775 unsigned long last_event_jiffies;
2774 }; 2776 };
2775 2777
2776 u32 rx_offset;
2777 u32 tg3_flags; 2778 u32 tg3_flags;
2778#define TG3_FLAG_TAGGED_STATUS 0x00000001 2779#define TG3_FLAG_TAGGED_STATUS 0x00000001
2779#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2780#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c101c7..ccee3eddc5f4 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1034,7 +1034,7 @@ static void TLan_tx_timeout(struct net_device *dev)
1034 TLan_ResetLists( dev ); 1034 TLan_ResetLists( dev );
1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 1035 TLan_ReadAndClearStats( dev, TLAN_IGNORE );
1036 TLan_ResetAdapter( dev ); 1036 TLan_ResetAdapter( dev );
1037 dev->trans_start = jiffies; 1037 dev->trans_start = jiffies; /* prevent tx timeout */
1038 netif_wake_queue( dev ); 1038 netif_wake_queue( dev );
1039 1039
1040} 1040}
@@ -1147,7 +1147,6 @@ static netdev_tx_t TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
1147 1147
1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS ); 1148 CIRC_INC( priv->txTail, TLAN_NUM_TX_LISTS );
1149 1149
1150 dev->trans_start = jiffies;
1151 return NETDEV_TX_OK; 1150 return NETDEV_TX_OK;
1152 1151
1153} /* TLan_StartTx */ 1152} /* TLan_StartTx */
@@ -1314,7 +1313,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1314 1313
1315static void TLan_SetMulticastList( struct net_device *dev ) 1314static void TLan_SetMulticastList( struct net_device *dev )
1316{ 1315{
1317 struct dev_mc_list *dmi; 1316 struct netdev_hw_addr *ha;
1318 u32 hash1 = 0; 1317 u32 hash1 = 0;
1319 u32 hash2 = 0; 1318 u32 hash2 = 0;
1320 int i; 1319 int i;
@@ -1336,12 +1335,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1335 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
1337 } else { 1336 } else {
1338 i = 0; 1337 i = 0;
1339 netdev_for_each_mc_addr(dmi, dev) { 1338 netdev_for_each_mc_addr(ha, dev) {
1340 if ( i < 3 ) { 1339 if ( i < 3 ) {
1341 TLan_SetMac( dev, i + 1, 1340 TLan_SetMac( dev, i + 1,
1342 (char *) &dmi->dmi_addr ); 1341 (char *) &ha->addr);
1343 } else { 1342 } else {
1344 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1343 offset = TLan_HashFunc((u8 *)&ha->addr);
1345 if ( offset < 32 ) 1344 if ( offset < 32 )
1346 hash1 |= ( 1 << offset ); 1345 hash1 |= ( 1 << offset );
1347 else 1346 else
@@ -2464,7 +2463,7 @@ static void TLan_PhyPrint( struct net_device *dev )
2464 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2463 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
2465 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2464 } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
2466 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2465 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
2467 printk( "TLAN: Off. +0 +1 +2 +3 \n" ); 2466 printk( "TLAN: Off. +0 +1 +2 +3\n" );
2468 for ( i = 0; i < 0x20; i+= 4 ) { 2467 for ( i = 0; i < 0x20; i+= 4 ) {
2469 printk( "TLAN: 0x%02x", i ); 2468 printk( "TLAN: 0x%02x", i );
2470 TLan_MiiReadReg( dev, phy, i, &data0 ); 2469 TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3eef1ab3..10800f16a231 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
77 77
78#define FW_NAME "3com/3C359.bin" 78#define FW_NAME "3com/3C359.bin"
79MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 79MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
80MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; 80MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
81MODULE_FIRMWARE(FW_NAME); 81MODULE_FIRMWARE(FW_NAME);
82 82
83/* Module parameters */ 83/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
164 int i ; 164 int i ;
165 165
166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head, 166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; 167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
168 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n"); 168 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
169 for (i = 0; i < 16; i++) { 169 for (i = 0; i < 16; i++) {
170 txd = &(xl_priv->xl_tx_ring[i]) ; 170 txd = &(xl_priv->xl_tx_ring[i]) ;
171 printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd), 171 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
172 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; 172 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
173 } 173 }
174 174
175 printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) ); 175 printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
176 176
177 printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); 177 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
178 printk("Queue status = %0x \n",netif_running(dev) ) ; 178 printk("Queue status = %0x\n",netif_running(dev) ) ;
179} 179}
180 180
181static void print_rx_state(struct net_device *dev) 181static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
187 int i ; 187 int i ;
188 188
189 printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ; 189 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
190 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n"); 190 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
191 for (i = 0; i < 16; i++) { 191 for (i = 0; i < 16; i++) {
192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */ 192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
193 rxd = &(xl_priv->xl_rx_ring[i]) ; 193 rxd = &(xl_priv->xl_rx_ring[i]) ;
194 printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd), 194 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
195 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; 195 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
196 } 196 }
197 197
198 printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) ); 198 printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
199 199
200 printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); 200 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
201 printk("Queue status = %0x \n",netif_running(dev) ) ; 201 printk("Queue status = %0x\n",netif_running(dev));
202} 202}
203#endif 203#endif
204 204
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
391 struct xl_private *xl_priv = netdev_priv(dev); 391 struct xl_private *xl_priv = netdev_priv(dev);
392 int err; 392 int err;
393 393
394 printk(KERN_INFO "%s \n", version); 394 printk(KERN_INFO "%s\n", version);
395 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", 395 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq); 396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
397 397
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
463 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); 463 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
464 464
465#if XL_DEBUG 465#if XL_DEBUG
466 printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ; 466 printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
467#endif 467#endif
468 468
469 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) { 469 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
591#if XL_DEBUG 591#if XL_DEBUG
592 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 592 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
593 if ( readw(xl_mmio + MMIO_MACDATA) & 2) { 593 if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
594 printk(KERN_INFO "Default ring speed 4 mbps \n") ; 594 printk(KERN_INFO "Default ring speed 4 mbps\n");
595 } else { 595 } else {
596 printk(KERN_INFO "Default ring speed 16 mbps \n") ; 596 printk(KERN_INFO "Default ring speed 16 mbps\n");
597 } 597 }
598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb); 598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
599#endif 599#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
651 651
652 if (open_err != 0) { /* Something went wrong with the open command */ 652 if (open_err != 0) { /* Something went wrong with the open command */
653 if (open_err & 0x07) { /* Wrong speed, retry at different speed */ 653 if (open_err & 0x07) { /* Wrong speed, retry at different speed */
654 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ; 654 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
655 switchsettings = switchsettings ^ 2 ; 655 switchsettings = switchsettings ^ 2 ;
656 xl_ee_write(dev,0x08,switchsettings) ; 656 xl_ee_write(dev,0x08,switchsettings) ;
657 xl_hw_reset(dev) ; 657 xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
703 } 703 }
704 704
705 if (i==0) { 705 if (i==0) {
706 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ; 706 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
707 free_irq(dev->irq,dev) ; 707 free_irq(dev->irq,dev) ;
708 kfree(xl_priv->xl_tx_ring); 708 kfree(xl_priv->xl_tx_ring);
709 kfree(xl_priv->xl_rx_ring); 709 kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
853 853
854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
856 printk(", ARB: %04x \n",xl_priv->arb ) ; 856 printk(", ARB: %04x\n",xl_priv->arb );
857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
858 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 858 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
859 859
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
867 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; 867 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
868 } 868 }
869 ver_str[i] = '\0' ; 869 ver_str[i] = '\0' ;
870 printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str); 870 printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
871 } 871 }
872 872
873 /* 873 /*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; 991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
992 992
993 if (skb==NULL) { /* Still need to fix the rx ring */ 993 if (skb==NULL) { /* Still need to fix the rx ring */
994 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ; 994 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
995 adv_rx_ring(dev) ; 995 adv_rx_ring(dev) ;
996 dev->stats.rx_dropped++ ; 996 dev->stats.rx_dropped++ ;
997 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 997 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1092 */ 1092 */
1093 if (intstatus == 0x0001) { 1093 if (intstatus == 0x0001) {
1094 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1094 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1095 printk(KERN_INFO "%s: 00001 int received \n",dev->name) ; 1095 printk(KERN_INFO "%s: 00001 int received\n",dev->name);
1096 } else { 1096 } else {
1097 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { 1097 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
1098 1098
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1103 */ 1103 */
1104 1104
1105 if (intstatus & HOSTERRINT) { 1105 if (intstatus & HOSTERRINT) {
1106 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ; 1106 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
1107 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; 1107 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1108 printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); 1108 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1109 netif_stop_queue(dev) ; 1109 netif_stop_queue(dev) ;
1110 xl_freemem(dev) ; 1110 xl_freemem(dev) ;
1111 free_irq(dev->irq,dev); 1111 free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1128 Must put a timeout check here ! */ 1128 Must put a timeout check here ! */
1129 /* Empty Loop */ 1129 /* Empty Loop */
1130 } 1130 }
1131 printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ; 1131 printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
1132 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1132 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1133 } /* TxUnderRun */ 1133 } /* TxUnderRun */
1134 1134
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1157 macstatus = readw(xl_mmio + MMIO_MACDATA) ; 1157 macstatus = readw(xl_mmio + MMIO_MACDATA) ;
1158 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name); 1158 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
1159 if (macstatus & (1<<14)) 1159 if (macstatus & (1<<14))
1160 printk(KERN_WARNING "tchk error: Unrecoverable error \n") ; 1160 printk(KERN_WARNING "tchk error: Unrecoverable error\n");
1161 if (macstatus & (1<<3)) 1161 if (macstatus & (1<<3))
1162 printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ; 1162 printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
1163 if (macstatus & (1<<2)) 1163 if (macstatus & (1<<2))
1164 printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ; 1164 printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
1165 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; 1165 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
1166 printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); 1166 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1167 netif_stop_queue(dev) ; 1167 netif_stop_queue(dev) ;
1168 xl_freemem(dev) ; 1168 xl_freemem(dev) ;
1169 free_irq(dev->irq,dev); 1169 free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1175 return IRQ_HANDLED; 1175 return IRQ_HANDLED;
1176 } 1176 }
1177 } else { 1177 } else {
1178 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ; 1178 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
1179 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1179 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1180 } 1180 }
1181 } 1181 }
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
1350 1350
1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD); 1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1352 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { 1352 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
1353 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ; 1353 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
1354 } else { 1354 } else {
1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1356 if (readb(xl_mmio + MMIO_MACDATA)==0) { 1356 if (readb(xl_mmio + MMIO_MACDATA)==0) {
1357 printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ; 1357 printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
1358 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1358 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1359 1359
1360 xl_freemem(dev) ; 1360 xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
1391static void xl_set_rx_mode(struct net_device *dev) 1391static void xl_set_rx_mode(struct net_device *dev)
1392{ 1392{
1393 struct xl_private *xl_priv = netdev_priv(dev); 1393 struct xl_private *xl_priv = netdev_priv(dev);
1394 struct dev_mc_list *dmi; 1394 struct netdev_hw_addr *ha;
1395 unsigned char dev_mc_address[4] ; 1395 unsigned char dev_mc_address[4] ;
1396 u16 options ; 1396 u16 options ;
1397 1397
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
1408 1408
1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1410 1410
1411 netdev_for_each_mc_addr(dmi, dev) { 1411 netdev_for_each_mc_addr(ha, dev) {
1412 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1412 dev_mc_address[0] |= ha->addr[2];
1413 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1413 dev_mc_address[1] |= ha->addr[3];
1414 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1414 dev_mc_address[2] |= ha->addr[4];
1415 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1415 dev_mc_address[3] |= ha->addr[5];
1416 } 1416 }
1417 1417
1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ 1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
1447 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; 1447 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
1448 break ; 1448 break ;
1449 case 4: 1449 case 4:
1450 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ; 1450 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
1451 break ; 1451 break ;
1452 1452
1453 case 6: 1453 case 6:
1454 printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ; 1454 printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
1455 break ; 1455 break ;
1456 1456
1457 case 0: /* Successful command execution */ 1457 case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
1472 break ; 1472 break ;
1473 case SET_FUNC_ADDRESS: 1473 case SET_FUNC_ADDRESS:
1474 if(xl_priv->xl_message_level) 1474 if(xl_priv->xl_message_level)
1475 printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ; 1475 printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
1476 break ; 1476 break ;
1477 case CLOSE_NIC: 1477 case CLOSE_NIC:
1478 if(xl_priv->xl_message_level) 1478 if(xl_priv->xl_message_level)
1479 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ; 1479 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
1480 break ; 1480 break ;
1481 case SET_MULTICAST_MODE: 1481 case SET_MULTICAST_MODE:
1482 if(xl_priv->xl_message_level) 1482 if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
1485 case SET_RECEIVE_MODE: 1485 case SET_RECEIVE_MODE:
1486 if(xl_priv->xl_message_level) { 1486 if(xl_priv->xl_message_level) {
1487 if (xl_priv->xl_copy_all_options == 0x0004) 1487 if (xl_priv->xl_copy_all_options == 0x0004)
1488 printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ; 1488 printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
1489 else 1489 else
1490 printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ; 1490 printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
1491 } 1491 }
1492 break ; 1492 break ;
1493 1493
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
1557 xl_freemem(dev) ; 1557 xl_freemem(dev) ;
1558 free_irq(dev->irq,dev); 1558 free_irq(dev->irq,dev);
1559 1559
1560 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; 1560 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1561 } /* If serious error */ 1561 } /* If serious error */
1562 1562
1563 if (xl_priv->xl_message_level) { 1563 if (xl_priv->xl_message_level) {
1564 if (lan_status_diff & LSC_SIG_LOSS) 1564 if (lan_status_diff & LSC_SIG_LOSS)
1565 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; 1565 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1566 if (lan_status_diff & LSC_HARD_ERR) 1566 if (lan_status_diff & LSC_HARD_ERR)
1567 printk(KERN_INFO "%s: Beaconing \n",dev->name); 1567 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1568 if (lan_status_diff & LSC_SOFT_ERR) 1568 if (lan_status_diff & LSC_SOFT_ERR)
1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1570 if (lan_status_diff & LSC_TRAN_BCN) 1570 if (lan_status_diff & LSC_TRAN_BCN)
1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1572 if (lan_status_diff & LSC_SS) 1572 if (lan_status_diff & LSC_SS)
1573 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1574 if (lan_status_diff & LSC_RING_REC) 1574 if (lan_status_diff & LSC_RING_REC)
1575 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); 1575 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1576 if (lan_status_diff & LSC_FDX_MODE) 1576 if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
1579 1579
1580 if (lan_status_diff & LSC_CO) { 1580 if (lan_status_diff & LSC_CO) {
1581 if (xl_priv->xl_message_level) 1581 if (xl_priv->xl_message_level)
1582 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1582 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1583 /* Issue READ.LOG command */ 1583 /* Issue READ.LOG command */
1584 xl_srb_cmd(dev, READ_LOG) ; 1584 xl_srb_cmd(dev, READ_LOG) ;
1585 } 1585 }
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
1595 } /* Lan.change.status */ 1595 } /* Lan.change.status */
1596 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */ 1596 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
1597#if XL_DEBUG 1597#if XL_DEBUG
1598 printk(KERN_INFO "Received.Data \n") ; 1598 printk(KERN_INFO "Received.Data\n");
1599#endif 1599#endif
1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
1630 xl_asb_cmd(dev) ; 1630 xl_asb_cmd(dev) ;
1631 1631
1632 } else { 1632 } else {
1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ; 1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1634 } 1634 }
1635 1635
1636 /* Acknowledge the arb interrupt */ 1636 /* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
1687 ret_code = readb(xl_mmio + MMIO_MACDATA) ; 1687 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1688 switch (ret_code) { 1688 switch (ret_code) {
1689 case 0x01: 1689 case 0x01:
1690 printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ; 1690 printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
1691 break ; 1691 break ;
1692 case 0x26: 1692 case 0x26:
1693 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ; 1693 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
1694 break ; 1694 break ;
1695 case 0x40: 1695 case 0x40:
1696 printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ; 1696 printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
1697 break ; 1697 break ;
1698 } 1698 }
1699 xl_priv->asb_queued = 0 ; 1699 xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a0967246e2f..91e6c78271a3 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
986static void tok_set_multicast_list(struct net_device *dev) 986static void tok_set_multicast_list(struct net_device *dev)
987{ 987{
988 struct tok_info *ti = netdev_priv(dev); 988 struct tok_info *ti = netdev_priv(dev);
989 struct dev_mc_list *mclist; 989 struct netdev_hw_addr *ha;
990 unsigned char address[4]; 990 unsigned char address[4];
991 991
992 int i; 992 int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ 995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; 996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
997 address[0] = address[1] = address[2] = address[3] = 0; 997 address[0] = address[1] = address[2] = address[3] = 0;
998 netdev_for_each_mc_addr(mclist, dev) { 998 netdev_for_each_mc_addr(ha, dev) {
999 address[0] |= mclist->dmi_addr[2]; 999 address[0] |= ha->addr[2];
1000 address[1] |= mclist->dmi_addr[3]; 1000 address[1] |= ha->addr[3];
1001 address[2] |= mclist->dmi_addr[4]; 1001 address[2] |= ha->addr[4];
1002 address[3] |= mclist->dmi_addr[5]; 1002 address[3] |= ha->addr[5];
1003 } 1003 }
1004 SET_PAGE(ti->srb_page); 1004 SET_PAGE(ti->srb_page);
1005 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) 1005 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
@@ -1041,7 +1041,6 @@ static netdev_tx_t tok_send_packet(struct sk_buff *skb,
1041 writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST); 1041 writew(ti->exsap_station_id, ti->srb + STATION_ID_OFST);
1042 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD); 1042 writeb(CMD_IN_SRB, ti->mmio + ACA_OFFSET + ACA_SET + ISRA_ODD);
1043 spin_unlock_irqrestore(&(ti->lock), flags); 1043 spin_unlock_irqrestore(&(ti->lock), flags);
1044 dev->trans_start = jiffies;
1045 return NETDEV_TX_OK; 1044 return NETDEV_TX_OK;
1046} 1045}
1047 1046
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5a9d71..5bd140704533 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
358 pcr |= PCI_COMMAND_SERR; 358 pcr |= PCI_COMMAND_SERR;
359 pci_write_config_word (pdev, PCI_COMMAND, pcr); 359 pci_write_config_word (pdev, PCI_COMMAND, pcr);
360 360
361 printk("%s \n", version); 361 printk("%s\n", version);
362 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name, 362 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
363 streamer_priv->streamer_card_name, 363 streamer_priv->streamer_card_name,
364 (unsigned int) dev->base_addr, 364 (unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
651#if STREAMER_DEBUG 651#if STREAMER_DEBUG
652 writew(readw(streamer_mmio + LAPWWO), 652 writew(readw(streamer_mmio + LAPWWO),
653 streamer_mmio + LAPA); 653 streamer_mmio + LAPA);
654 printk("srb open request: \n"); 654 printk("srb open request:\n");
655 for (i = 0; i < 16; i++) { 655 for (i = 0; i < 16; i++) {
656 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); 656 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
657 } 657 }
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
701 if (srb_word != 0) { 701 if (srb_word != 0) {
702 if (srb_word == 0x07) { 702 if (srb_word == 0x07) {
703 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */ 703 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
704 printk(KERN_WARNING "%s: Retrying at different ring speed \n", 704 printk(KERN_WARNING "%s: Retrying at different ring speed\n",
705 dev->name); 705 dev->name);
706 open_finished = 0; 706 open_finished = 0;
707 } else { 707 } else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
717 ((error_code & 0x0f) == 0x0d)) 717 ((error_code & 0x0f) == 0x0d))
718 { 718 {
719 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name); 719 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
720 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name); 720 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
721 free_irq(dev->irq, dev); 721 free_irq(dev->irq, dev);
722 return -EIO; 722 return -EIO;
723 } 723 }
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
923 923
924 if (rx_desc->status & 0x7E830000) { /* errors */ 924 if (rx_desc->status & 0x7E830000) { /* errors */
925 if (streamer_priv->streamer_message_level) { 925 if (streamer_priv->streamer_message_level) {
926 printk(KERN_WARNING "%s: Rx Error %x \n", 926 printk(KERN_WARNING "%s: Rx Error %x\n",
927 dev->name, rx_desc->status); 927 dev->name, rx_desc->status);
928 } 928 }
929 } else { /* received without errors */ 929 } else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
936 936
937 if (skb == NULL) 937 if (skb == NULL)
938 { 938 {
939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); 939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
940 dev->stats.rx_dropped++; 940 dev->stats.rx_dropped++;
941 } else { /* we allocated an skb OK */ 941 } else { /* we allocated an skb OK */
942 if (buffer_cnt == 1) { 942 if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
1267 netdev_priv(dev); 1267 netdev_priv(dev);
1268 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; 1268 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1269 __u8 options = 0; 1269 __u8 options = 0;
1270 struct dev_mc_list *dmi; 1270 struct netdev_hw_addr *ha;
1271 unsigned char dev_mc_address[5]; 1271 unsigned char dev_mc_address[5];
1272 1272
1273 writel(streamer_priv->srb, streamer_mmio + LAPA); 1273 writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
1303 writel(streamer_priv->srb,streamer_mmio+LAPA); 1303 writel(streamer_priv->srb,streamer_mmio+LAPA);
1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1305 1305
1306 netdev_for_each_mc_addr(dmi, dev) { 1306 netdev_for_each_mc_addr(ha, dev) {
1307 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1307 dev_mc_address[0] |= ha->addr[2];
1308 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1308 dev_mc_address[1] |= ha->addr[3];
1309 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1309 dev_mc_address[2] |= ha->addr[4];
1310 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1310 dev_mc_address[3] |= ha->addr[5];
1311 } 1311 }
1312 1312
1313 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); 1313 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
1364 case 0x00: 1364 case 0x00:
1365 break; 1365 break;
1366 case 0x01: 1366 case 0x01:
1367 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name); 1367 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1368 break; 1368 break;
1369 case 0x04: 1369 case 0x04:
1370 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1370 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
1392 case 0x00: 1392 case 0x00:
1393 break; 1393 break;
1394 case 0x01: 1394 case 0x01:
1395 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1395 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1396 break; 1396 break;
1397 case 0x04: 1397 case 0x04:
1398 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1398 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1399 break; 1399 break;
1400 case 0x39: /* Must deal with this if individual multicast addresses used */ 1400 case 0x39: /* Must deal with this if individual multicast addresses used */
1401 printk(KERN_INFO "%s: Group address not found \n", dev->name); 1401 printk(KERN_INFO "%s: Group address not found\n", dev->name);
1402 break; 1402 break;
1403 default: 1403 default:
1404 break; 1404 break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
1414 switch (srb_word) { 1414 switch (srb_word) {
1415 case 0x00: 1415 case 0x00:
1416 if (streamer_priv->streamer_message_level) 1416 if (streamer_priv->streamer_message_level)
1417 printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name); 1417 printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
1418 break; 1418 break;
1419 case 0x01: 1419 case 0x01:
1420 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1420 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1421 break; 1421 break;
1422 case 0x04: 1422 case 0x04:
1423 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1423 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
1448 } 1448 }
1449 break; 1449 break;
1450 case 0x01: 1450 case 0x01:
1451 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1451 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1452 break; 1452 break;
1453 case 0x04: 1453 case 0x04:
1454 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1454 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
1467 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name); 1467 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
1468 break; 1468 break;
1469 case 0x01: 1469 case 0x01:
1470 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1470 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1471 break; 1471 break;
1472 case 0x04: 1472 case 0x04:
1473 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1473 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
1556 (streamer_mmio + LAPDINC))); 1556 (streamer_mmio + LAPDINC)));
1557 } 1557 }
1558 1558
1559 printk("next %04x, fs %02x, len %04x \n", next, 1559 printk("next %04x, fs %02x, len %04x\n", next,
1560 status, len); 1560 status, len);
1561 } 1561 }
1562#endif 1562#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
1593 1593
1594 mac_frame->protocol = tr_type_trans(mac_frame, dev); 1594 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1595#if STREAMER_NETWORK_MONITOR 1595#if STREAMER_NETWORK_MONITOR
1596 printk(KERN_WARNING "%s: Received MAC Frame, details: \n", 1596 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
1597 dev->name); 1597 dev->name);
1598 mac_hdr = tr_hdr(mac_frame); 1598 mac_hdr = tr_hdr(mac_frame);
1599 printk(KERN_WARNING 1599 printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
1669 /* If serious error */ 1669 /* If serious error */
1670 if (streamer_priv->streamer_message_level) { 1670 if (streamer_priv->streamer_message_level) {
1671 if (lan_status_diff & LSC_SIG_LOSS) 1671 if (lan_status_diff & LSC_SIG_LOSS)
1672 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name); 1672 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1673 if (lan_status_diff & LSC_HARD_ERR) 1673 if (lan_status_diff & LSC_HARD_ERR)
1674 printk(KERN_INFO "%s: Beaconing \n", dev->name); 1674 printk(KERN_INFO "%s: Beaconing\n", dev->name);
1675 if (lan_status_diff & LSC_SOFT_ERR) 1675 if (lan_status_diff & LSC_SOFT_ERR)
1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name); 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1677 if (lan_status_diff & LSC_TRAN_BCN) 1677 if (lan_status_diff & LSC_TRAN_BCN)
1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
1679 if (lan_status_diff & LSC_SS) 1679 if (lan_status_diff & LSC_SS)
1680 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1681 if (lan_status_diff & LSC_RING_REC) 1681 if (lan_status_diff & LSC_RING_REC)
1682 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name); 1682 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
1683 if (lan_status_diff & LSC_FDX_MODE) 1683 if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
1686 1686
1687 if (lan_status_diff & LSC_CO) { 1687 if (lan_status_diff & LSC_CO) {
1688 if (streamer_priv->streamer_message_level) 1688 if (streamer_priv->streamer_message_level)
1689 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1689 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1690 1690
1691 /* Issue READ.LOG command */ 1691 /* Issue READ.LOG command */
1692 1692
@@ -1716,7 +1716,7 @@ drop_frame:
1716 streamer_priv->streamer_lan_status = lan_status; 1716 streamer_priv->streamer_lan_status = lan_status;
1717 } /* Lan.change.status */ 1717 } /* Lan.change.status */
1718 else 1718 else
1719 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); 1719 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1720} 1720}
1721 1721
1722static void streamer_asb_bh(struct net_device *dev) 1722static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
1747 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8; 1747 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1748 switch (rc) { 1748 switch (rc) {
1749 case 0x01: 1749 case 0x01:
1750 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); 1750 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1751 break; 1751 break;
1752 case 0x26: 1752 case 0x26:
1753 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); 1753 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1754 break; 1754 break;
1755 case 0xFF: 1755 case 0xFF:
1756 /* Valid response, everything should be ok again */ 1756 /* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
index 53f631ebb162..785ad1a2157b 100644
--- a/drivers/net/tokenring/madgemc.c
+++ b/drivers/net/tokenring/madgemc.c
@@ -109,7 +109,6 @@ static void madgemc_sifwriteb(struct net_device *dev, unsigned short val, unsign
109 SIFWRITEB(val, reg); 109 SIFWRITEB(val, reg);
110 madgemc_setregpage(dev, 0); 110 madgemc_setregpage(dev, 0);
111 } 111 }
112 return;
113} 112}
114 113
115/* 114/*
@@ -140,7 +139,6 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
140 SIFWRITEW(val, reg); 139 SIFWRITEW(val, reg);
141 madgemc_setregpage(dev, 0); 140 madgemc_setregpage(dev, 0);
142 } 141 }
143 return;
144} 142}
145 143
146static struct net_device_ops madgemc_netdev_ops __read_mostly; 144static struct net_device_ops madgemc_netdev_ops __read_mostly;
@@ -505,8 +503,6 @@ static void madgemc_setregpage(struct net_device *dev, int page)
505 dev->base_addr + MC_CONTROL_REG1); 503 dev->base_addr + MC_CONTROL_REG1);
506 } 504 }
507 reg1 = inb(dev->base_addr + MC_CONTROL_REG1); 505 reg1 = inb(dev->base_addr + MC_CONTROL_REG1);
508
509 return;
510} 506}
511 507
512/* 508/*
@@ -527,8 +523,6 @@ static void madgemc_setsifsel(struct net_device *dev, int val)
527 dev->base_addr + MC_CONTROL_REG0); 523 dev->base_addr + MC_CONTROL_REG0);
528 } 524 }
529 reg0 = inb(dev->base_addr + MC_CONTROL_REG0); 525 reg0 = inb(dev->base_addr + MC_CONTROL_REG0);
530
531 return;
532} 526}
533 527
534/* 528/*
@@ -550,8 +544,6 @@ static void madgemc_setint(struct net_device *dev, int val)
550 outb(reg1 | MC_CONTROL_REG1_SINTEN, 544 outb(reg1 | MC_CONTROL_REG1_SINTEN,
551 dev->base_addr + MC_CONTROL_REG1); 545 dev->base_addr + MC_CONTROL_REG1);
552 } 546 }
553
554 return;
555} 547}
556 548
557/* 549/*
@@ -594,8 +586,6 @@ static void madgemc_chipset_close(struct net_device *dev)
594 madgemc_setint(dev, 0); 586 madgemc_setint(dev, 0);
595 /* unmap SIF registers */ 587 /* unmap SIF registers */
596 madgemc_setsifsel(dev, 0); 588 madgemc_setsifsel(dev, 0);
597
598 return;
599} 589}
600 590
601/* 591/*
@@ -656,8 +646,6 @@ static void madgemc_read_rom(struct net_device *dev, struct card_info *card)
656 /* Restore original register values */ 646 /* Restore original register values */
657 outb(reg0, ioaddr + MC_CONTROL_REG0); 647 outb(reg0, ioaddr + MC_CONTROL_REG0);
658 outb(reg1, ioaddr + MC_CONTROL_REG1); 648 outb(reg1, ioaddr + MC_CONTROL_REG1);
659
660 return;
661} 649}
662 650
663static int madgemc_open(struct net_device *dev) 651static int madgemc_open(struct net_device *dev)
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e0434ae2..3d2fbe60b46e 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
302 olympic_priv=netdev_priv(dev); 302 olympic_priv=netdev_priv(dev);
303 olympic_mmio=olympic_priv->olympic_mmio; 303 olympic_mmio=olympic_priv->olympic_mmio;
304 304
305 printk("%s \n", version); 305 printk("%s\n", version);
306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq); 306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
307 307
308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL); 308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
468#if OLYMPIC_DEBUG 468#if OLYMPIC_DEBUG
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); 469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK)); 470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command \n"); 471 printk("Before the open command\n");
472#endif 472#endif
473 do { 473 do {
474 memset_io(init_srb,0,SRB_COMMAND_SIZE); 474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
520 break; 520 break;
521 } 521 }
522 if (time_after(jiffies, t + 10*HZ)) { 522 if (time_after(jiffies, t + 10*HZ)) {
523 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; 523 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
524 olympic_priv->srb_queued=0; 524 olympic_priv->srb_queued=0;
525 break ; 525 break ;
526 } 526 }
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
549 break; 549 break;
550 case 0x07: 550 case 0x07:
551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */ 551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
552 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name); 552 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
553 open_finished = 0 ; 553 open_finished = 0 ;
554 continue; 554 continue;
555 } 555 }
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
558 558
559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { 559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name); 560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name); 561 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
562 } else { 562 } else {
563 printk(KERN_WARNING "%s: %s - %s\n", dev->name, 563 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
564 open_maj_error[(err & 0xf0) >> 4], 564 open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
759 olympic_priv->rx_status_last_received++ ; 759 olympic_priv->rx_status_last_received++ ;
760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1); 760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
761#if OLYMPIC_DEBUG 761#if OLYMPIC_DEBUG
762 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); 762 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
763#endif 763#endif
764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; 764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; 765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
774 if (l_status_buffercnt & 0x3B000000) { 774 if (l_status_buffercnt & 0x3B000000) {
775 if (olympic_priv->olympic_message_level) { 775 if (olympic_priv->olympic_message_level) {
776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ 776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
777 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name); 777 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ 778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
779 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name); 779 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */ 780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
781 printk(KERN_WARNING "%s: No receive buffers \n",dev->name); 781 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ 782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
783 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name); 783 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ 784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
785 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name); 785 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
786 } 786 }
787 olympic_priv->rx_ring_last_received += i ; 787 olympic_priv->rx_ring_last_received += i ;
788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
796 } 796 }
797 797
798 if (skb == NULL) { 798 if (skb == NULL) {
799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; 799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
800 dev->stats.rx_dropped++; 800 dev->stats.rx_dropped++;
801 /* Update counters even though we don't transfer the frame */ 801 /* Update counters even though we don't transfer the frame */
802 olympic_priv->rx_ring_last_received += i ; 802 olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
1101 } 1101 }
1102 1102
1103 if (t == 0) { 1103 if (t == 0) {
1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ; 1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1105 } 1105 }
1106 olympic_priv->srb_queued=0; 1106 olympic_priv->srb_queued=0;
1107 } 1107 }
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; 1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 u8 options = 0; 1140 u8 options = 0;
1141 u8 __iomem *srb; 1141 u8 __iomem *srb;
1142 struct dev_mc_list *dmi; 1142 struct netdev_hw_addr *ha;
1143 unsigned char dev_mc_address[4] ; 1143 unsigned char dev_mc_address[4] ;
1144 1144
1145 writel(olympic_priv->srb,olympic_mmio+LAPA); 1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
1177 1177
1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1179 1179
1180 netdev_for_each_mc_addr(dmi, dev) { 1180 netdev_for_each_mc_addr(ha, dev) {
1181 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1181 dev_mc_address[0] |= ha->addr[2];
1182 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1182 dev_mc_address[1] |= ha->addr[3];
1183 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1183 dev_mc_address[2] |= ha->addr[4];
1184 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1184 dev_mc_address[3] |= ha->addr[5];
1185 } 1185 }
1186 1186
1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0); 1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
1239 case 0x00: 1239 case 0x00:
1240 break ; 1240 break ;
1241 case 0x01: 1241 case 0x01:
1242 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1242 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1243 break ; 1243 break ;
1244 case 0x04: 1244 case 0x04:
1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); 1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
1266 case 0x00: 1266 case 0x00:
1267 break ; 1267 break ;
1268 case 0x01: 1268 case 0x01:
1269 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1269 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1270 break ; 1270 break ;
1271 case 0x04: 1271 case 0x04:
1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 break ; 1273 break ;
1274 case 0x39: /* Must deal with this if individual multicast addresses used */ 1274 case 0x39: /* Must deal with this if individual multicast addresses used */
1275 printk(KERN_INFO "%s: Group address not found \n",dev->name); 1275 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1276 break ; 1276 break ;
1277 default: 1277 default:
1278 break ; 1278 break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
1287 switch (readb(srb+2)) { 1287 switch (readb(srb+2)) {
1288 case 0x00: 1288 case 0x00:
1289 if (olympic_priv->olympic_message_level) 1289 if (olympic_priv->olympic_message_level)
1290 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ; 1290 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1291 break ; 1291 break ;
1292 case 0x01: 1292 case 0x01:
1293 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1293 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1294 break ; 1294 break ;
1295 case 0x04: 1295 case 0x04:
1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; 1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1311 break ; 1311 break ;
1312 case 0x01: 1312 case 0x01:
1313 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1313 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1314 break ; 1314 break ;
1315 case 0x04: 1315 case 0x04:
1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; 1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1329 break ; 1329 break ;
1330 case 0x01: 1330 case 0x01:
1331 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1331 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1332 break ; 1332 break ;
1333 case 0x04: 1333 case 0x04:
1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
1404 printk("Loc %d = %02x\n",i,readb(frame_data + i)); 1404 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1405 } 1405 }
1406 1406
1407 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); 1407 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1408} 1408}
1409#endif 1409#endif
1410 mac_frame = dev_alloc_skb(frame_len) ; 1410 mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
1426 1426
1427 if (olympic_priv->olympic_network_monitor) { 1427 if (olympic_priv->olympic_network_monitor) {
1428 struct trh_hdr *mac_hdr; 1428 struct trh_hdr *mac_hdr;
1429 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); 1429 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
1430 mac_hdr = tr_hdr(mac_frame); 1430 mac_hdr = tr_hdr(mac_frame);
1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n", 1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1432 dev->name, mac_hdr->daddr); 1432 dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); 1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1490 netif_stop_queue(dev); 1490 netif_stop_queue(dev);
1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; 1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1492 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; 1492 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1493 } /* If serious error */ 1493 } /* If serious error */
1494 1494
1495 if (olympic_priv->olympic_message_level) { 1495 if (olympic_priv->olympic_message_level) {
1496 if (lan_status_diff & LSC_SIG_LOSS) 1496 if (lan_status_diff & LSC_SIG_LOSS)
1497 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; 1497 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1498 if (lan_status_diff & LSC_HARD_ERR) 1498 if (lan_status_diff & LSC_HARD_ERR)
1499 printk(KERN_INFO "%s: Beaconing \n",dev->name); 1499 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1500 if (lan_status_diff & LSC_SOFT_ERR) 1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN) 1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS) 1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC) 1506 if (lan_status_diff & LSC_RING_REC)
1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); 1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1508 if (lan_status_diff & LSC_FDX_MODE) 1508 if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
1512 if (lan_status_diff & LSC_CO) { 1512 if (lan_status_diff & LSC_CO) {
1513 1513
1514 if (olympic_priv->olympic_message_level) 1514 if (olympic_priv->olympic_message_level)
1515 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1515 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1516 1516
1517 /* Issue READ.LOG command */ 1517 /* Issue READ.LOG command */
1518 1518
@@ -1551,7 +1551,7 @@ drop_frame:
1551 1551
1552 } /* Lan.change.status */ 1552 } /* Lan.change.status */
1553 else 1553 else
1554 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); 1554 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1555} 1555}
1556 1556
1557static void olympic_asb_bh(struct net_device *dev) 1557static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
1578 if (olympic_priv->asb_queued == 2) { 1578 if (olympic_priv->asb_queued == 2) {
1579 switch (readb(asb_block+2)) { 1579 switch (readb(asb_block+2)) {
1580 case 0x01: 1580 case 0x01:
1581 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); 1581 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1582 break ; 1582 break ;
1583 case 0x26: 1583 case 0x26:
1584 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); 1584 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1585 break ; 1585 break ;
1586 case 0xFF: 1586 case 0xFF:
1587 /* Valid response, everything should be ok again */ 1587 /* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/smctr.c b/drivers/net/tokenring/smctr.c
index e40560137c46..0929fff5982c 100644
--- a/drivers/net/tokenring/smctr.c
+++ b/drivers/net/tokenring/smctr.c
@@ -4562,7 +4562,7 @@ static void smctr_timeout(struct net_device *dev)
4562 * fake transmission time and go on trying. Our own timeout 4562 * fake transmission time and go on trying. Our own timeout
4563 * routine is in sktr_timer_chk() 4563 * routine is in sktr_timer_chk()
4564 */ 4564 */
4565 dev->trans_start = jiffies; 4565 dev->trans_start = jiffies; /* prevent tx timeout */
4566 netif_wake_queue(dev); 4566 netif_wake_queue(dev);
4567} 4567}
4568 4568
@@ -5147,8 +5147,6 @@ static void smctr_set_multicast_list(struct net_device *dev)
5147{ 5147{
5148 if(smctr_debug > 10) 5148 if(smctr_debug > 10)
5149 printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name); 5149 printk(KERN_DEBUG "%s: smctr_set_multicast_list\n", dev->name);
5150
5151 return;
5152} 5150}
5153 5151
5154static int smctr_set_page(struct net_device *dev, __u8 *buf) 5152static int smctr_set_page(struct net_device *dev, __u8 *buf)
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c922410..435ef7d5470f 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -325,8 +325,6 @@ static void tms380tr_timer_end_wait(unsigned long data)
325 tp->Sleeping = 0; 325 tp->Sleeping = 0;
326 wake_up_interruptible(&tp->wait_for_tok_int); 326 wake_up_interruptible(&tp->wait_for_tok_int);
327 } 327 }
328
329 return;
330} 328}
331 329
332/* 330/*
@@ -460,8 +458,6 @@ static void tms380tr_init_net_local(struct net_device *dev)
460 tp->RplHead = &tp->Rpl[0]; 458 tp->RplHead = &tp->Rpl[0];
461 tp->RplTail = &tp->Rpl[RPL_NUM-1]; 459 tp->RplTail = &tp->Rpl[RPL_NUM-1];
462 tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ); 460 tp->RplTail->Status = (RX_START_FRAME | RX_END_FRAME | RX_FRAME_IRQ);
463
464 return;
465} 461}
466 462
467/* 463/*
@@ -481,8 +477,6 @@ static void tms380tr_init_ipb(struct net_local *tp)
481 tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES; 477 tp->ipb.DMA_Abort_Thrhld = DMA_RETRIES;
482 tp->ipb.SCB_Addr = 0; 478 tp->ipb.SCB_Addr = 0;
483 tp->ipb.SSB_Addr = 0; 479 tp->ipb.SSB_Addr = 0;
484
485 return;
486} 480}
487 481
488/* 482/*
@@ -527,8 +521,6 @@ static void tms380tr_init_opb(struct net_device *dev)
527 521
528 tp->ocpl.ProdIDAddr[0] = LOWORD(Addr); 522 tp->ocpl.ProdIDAddr[0] = LOWORD(Addr);
529 tp->ocpl.ProdIDAddr[1] = HIWORD(Addr); 523 tp->ocpl.ProdIDAddr[1] = HIWORD(Addr);
530
531 return;
532} 524}
533 525
534/* 526/*
@@ -543,8 +535,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
543 535
544 tp->OpenCommandIssued = 1; 536 tp->OpenCommandIssued = 1;
545 tms380tr_exec_cmd(dev, OC_OPEN); 537 tms380tr_exec_cmd(dev, OC_OPEN);
546
547 return;
548} 538}
549 539
550/* 540/*
@@ -554,8 +544,6 @@ static void tms380tr_open_adapter(struct net_device *dev)
554static void tms380tr_disable_interrupts(struct net_device *dev) 544static void tms380tr_disable_interrupts(struct net_device *dev)
555{ 545{
556 SIFWRITEB(0, SIFACL); 546 SIFWRITEB(0, SIFACL);
557
558 return;
559} 547}
560 548
561/* 549/*
@@ -565,8 +553,6 @@ static void tms380tr_disable_interrupts(struct net_device *dev)
565static void tms380tr_enable_interrupts(struct net_device *dev) 553static void tms380tr_enable_interrupts(struct net_device *dev)
566{ 554{
567 SIFWRITEB(ACL_SINTEN, SIFACL); 555 SIFWRITEB(ACL_SINTEN, SIFACL);
568
569 return;
570} 556}
571 557
572/* 558/*
@@ -578,8 +564,6 @@ static void tms380tr_exec_cmd(struct net_device *dev, unsigned short Command)
578 564
579 tp->CMDqueue |= Command; 565 tp->CMDqueue |= Command;
580 tms380tr_chk_outstanding_cmds(dev); 566 tms380tr_chk_outstanding_cmds(dev);
581
582 return;
583} 567}
584 568
585static void tms380tr_timeout(struct net_device *dev) 569static void tms380tr_timeout(struct net_device *dev)
@@ -592,7 +576,7 @@ static void tms380tr_timeout(struct net_device *dev)
592 * fake transmission time and go on trying. Our own timeout 576 * fake transmission time and go on trying. Our own timeout
593 * routine is in tms380tr_timer_chk() 577 * routine is in tms380tr_timer_chk()
594 */ 578 */
595 dev->trans_start = jiffies; 579 dev->trans_start = jiffies; /* prevent tx timeout */
596 netif_wake_queue(dev); 580 netif_wake_queue(dev);
597} 581}
598 582
@@ -712,8 +696,6 @@ static void tms380tr_chk_src_addr(unsigned char *frame, unsigned char *hw_addr)
712 SRBit = frame[8] & 0x80; 696 SRBit = frame[8] & 0x80;
713 memcpy(&frame[8], hw_addr, 6); 697 memcpy(&frame[8], hw_addr, 6);
714 frame[8] |= SRBit; 698 frame[8] |= SRBit;
715
716 return;
717} 699}
718 700
719/* 701/*
@@ -743,8 +725,6 @@ static void tms380tr_timer_chk(unsigned long data)
743 return; 725 return;
744 tp->ReOpenInProgress = 1; 726 tp->ReOpenInProgress = 1;
745 tms380tr_open_adapter(dev); 727 tms380tr_open_adapter(dev);
746
747 return;
748} 728}
749 729
750/* 730/*
@@ -863,8 +843,6 @@ static void tms380tr_reset_interrupt(struct net_device *dev)
863 * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts. 843 * and clear STS_SYSTEM_IRQ bit: enable adapter for further interrupts.
864 */ 844 */
865 tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ); 845 tms380tr_exec_sifcmd(dev, CMD_SSB_CLEAR | CMD_CLEAR_SYSTEM_IRQ);
866
867 return;
868} 846}
869 847
870/* 848/*
@@ -1119,8 +1097,6 @@ static void tms380tr_cmd_status_irq(struct net_device *dev)
1119 tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error; 1097 tp->MacStat.frequency_errors += tp->errorlogtable.Frequency_Error;
1120 tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error; 1098 tp->MacStat.internal_errors += tp->errorlogtable.Internal_Error;
1121 } 1099 }
1122
1123 return;
1124} 1100}
1125 1101
1126/* 1102/*
@@ -1211,17 +1187,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
1211 } 1187 }
1212 else 1188 else
1213 { 1189 {
1214 struct dev_mc_list *mclist; 1190 struct netdev_hw_addr *ha;
1215 1191
1216 netdev_for_each_mc_addr(mclist, dev) { 1192 netdev_for_each_mc_addr(ha, dev) {
1217 ((char *)(&tp->ocpl.FunctAddr))[0] |= 1193 ((char *)(&tp->ocpl.FunctAddr))[0] |=
1218 mclist->dmi_addr[2]; 1194 ha->addr[2];
1219 ((char *)(&tp->ocpl.FunctAddr))[1] |= 1195 ((char *)(&tp->ocpl.FunctAddr))[1] |=
1220 mclist->dmi_addr[3]; 1196 ha->addr[3];
1221 ((char *)(&tp->ocpl.FunctAddr))[2] |= 1197 ((char *)(&tp->ocpl.FunctAddr))[2] |=
1222 mclist->dmi_addr[4]; 1198 ha->addr[4];
1223 ((char *)(&tp->ocpl.FunctAddr))[3] |= 1199 ((char *)(&tp->ocpl.FunctAddr))[3] |=
1224 mclist->dmi_addr[5]; 1200 ha->addr[5];
1225 } 1201 }
1226 } 1202 }
1227 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); 1203 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1229,7 +1205,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
1229 1205
1230 tp->ocpl.OPENOptions = OpenOptions; 1206 tp->ocpl.OPENOptions = OpenOptions;
1231 tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS); 1207 tms380tr_exec_cmd(dev, OC_MODIFY_OPEN_PARMS);
1232 return;
1233} 1208}
1234 1209
1235/* 1210/*
@@ -1247,7 +1222,6 @@ void tms380tr_wait(unsigned long time)
1247#else 1222#else
1248 udelay(time); 1223 udelay(time);
1249#endif 1224#endif
1250 return;
1251} 1225}
1252 1226
1253/* 1227/*
@@ -1266,8 +1240,6 @@ static void tms380tr_exec_sifcmd(struct net_device *dev, unsigned int WriteValue
1266 SifStsValue = SIFREADW(SIFSTS); 1240 SifStsValue = SIFREADW(SIFSTS);
1267 } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--); 1241 } while((SifStsValue & CMD_INTERRUPT_ADAPTER) && loop_counter--);
1268 SIFWRITEW(cmd, SIFCMD); 1242 SIFWRITEW(cmd, SIFCMD);
1269
1270 return;
1271} 1243}
1272 1244
1273/* 1245/*
@@ -1390,7 +1362,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1390 Status &= STS_MASK; 1362 Status &= STS_MASK;
1391 1363
1392 if(tms380tr_debug > 3) 1364 if(tms380tr_debug > 3)
1393 printk(KERN_DEBUG " %04X \n", Status); 1365 printk(KERN_DEBUG " %04X\n", Status);
1394 /* BUD successfully completed */ 1366 /* BUD successfully completed */
1395 if(Status == STS_INITIALIZE) 1367 if(Status == STS_INITIALIZE)
1396 return (1); 1368 return (1);
@@ -1700,8 +1672,6 @@ static void tms380tr_chk_outstanding_cmds(struct net_device *dev)
1700 1672
1701 /* Execute SCB and generate IRQ when done. */ 1673 /* Execute SCB and generate IRQ when done. */
1702 tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST); 1674 tms380tr_exec_sifcmd(dev, CMD_EXECUTE | CMD_SCB_REQUEST);
1703
1704 return;
1705} 1675}
1706 1676
1707/* 1677/*
@@ -1774,8 +1744,6 @@ static void tms380tr_ring_status_irq(struct net_device *dev)
1774 tp->AdapterOpenFlag = 0; 1744 tp->AdapterOpenFlag = 0;
1775 tms380tr_open_adapter(dev); 1745 tms380tr_open_adapter(dev);
1776 } 1746 }
1777
1778 return;
1779} 1747}
1780 1748
1781/* 1749/*
@@ -1846,7 +1814,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
1846 break; 1814 break;
1847 1815
1848 case DMA_WRITE_ABORT: 1816 case DMA_WRITE_ABORT:
1849 printk(KERN_INFO "%s: DMA write operation aborted: \n", 1817 printk(KERN_INFO "%s: DMA write operation aborted:\n",
1850 dev->name); 1818 dev->name);
1851 switch (AdapterCheckBlock[1]) 1819 switch (AdapterCheckBlock[1])
1852 { 1820 {
@@ -1932,8 +1900,6 @@ static void tms380tr_chk_irq(struct net_device *dev)
1932 /* Restart of firmware successful */ 1900 /* Restart of firmware successful */
1933 tp->AdapterOpenFlag = 1; 1901 tp->AdapterOpenFlag = 1;
1934 } 1902 }
1935
1936 return;
1937} 1903}
1938 1904
1939/* 1905/*
@@ -1988,8 +1954,6 @@ static void tms380tr_read_ram(struct net_device *dev, unsigned char *Data,
1988 /* Restore original values */ 1954 /* Restore original values */
1989 SIFWRITEW(old_sifadx, SIFADX); 1955 SIFWRITEW(old_sifadx, SIFADX);
1990 SIFWRITEW(old_sifadr, SIFADR); 1956 SIFWRITEW(old_sifadr, SIFADR);
1991
1992 return;
1993} 1957}
1994 1958
1995/* 1959/*
@@ -2021,8 +1985,6 @@ static void tms380tr_cancel_tx_queue(struct net_local* tp)
2021 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE); 1985 dma_unmap_single(tp->pdev, tpl->DMABuff, tpl->Skb->len, DMA_TO_DEVICE);
2022 dev_kfree_skb_any(tpl->Skb); 1986 dev_kfree_skb_any(tpl->Skb);
2023 } 1987 }
2024
2025 return;
2026} 1988}
2027 1989
2028/* 1990/*
@@ -2094,7 +2056,6 @@ static void tms380tr_tx_status_irq(struct net_device *dev)
2094 2056
2095 if(!tp->TplFree->NextTPLPtr->BusyFlag) 2057 if(!tp->TplFree->NextTPLPtr->BusyFlag)
2096 netif_wake_queue(dev); 2058 netif_wake_queue(dev);
2097 return;
2098} 2059}
2099 2060
2100/* 2061/*
@@ -2255,8 +2216,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2255 /* Inform adapter about RPL valid. */ 2216 /* Inform adapter about RPL valid. */
2256 tms380tr_exec_sifcmd(dev, CMD_RX_VALID); 2217 tms380tr_exec_sifcmd(dev, CMD_RX_VALID);
2257 } 2218 }
2258
2259 return;
2260} 2219}
2261 2220
2262/* 2221/*
@@ -2269,8 +2228,6 @@ static void tms380tr_rcv_status_irq(struct net_device *dev)
2269static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status) 2228static void tms380tr_write_rpl_status(RPL *rpl, unsigned int Status)
2270{ 2229{
2271 rpl->Status = Status; 2230 rpl->Status = Status;
2272
2273 return;
2274} 2231}
2275 2232
2276/* 2233/*
@@ -2287,8 +2244,6 @@ static void tms380tr_update_rcv_stats(struct net_local *tp, unsigned char DataPt
2287 /* Test functional bit */ 2244 /* Test functional bit */
2288 if(DataPtr[2] & GROUP_BIT) 2245 if(DataPtr[2] & GROUP_BIT)
2289 tp->MacStat.multicast++; 2246 tp->MacStat.multicast++;
2290
2291 return;
2292} 2247}
2293 2248
2294static int tms380tr_set_mac_address(struct net_device *dev, void *addr) 2249static int tms380tr_set_mac_address(struct net_device *dev, void *addr)
@@ -2318,8 +2273,6 @@ static void tms380tr_dump(unsigned char *Data, int length)
2318 Data[j+0],Data[j+1],Data[j+2],Data[j+3], 2273 Data[j+0],Data[j+1],Data[j+2],Data[j+3],
2319 Data[j+4],Data[j+5],Data[j+6],Data[j+7]); 2274 Data[j+4],Data[j+5],Data[j+6],Data[j+7]);
2320 } 2275 }
2321
2322 return;
2323} 2276}
2324#endif 2277#endif
2325 2278
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3c3b51..a03730bd1da5 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
263 return; 263 return;
264 udelay(10); 264 udelay(10);
265 } 265 }
266 printk(KERN_ERR "%s function time out \n", __func__); 266 printk(KERN_ERR "%s function time out\n", __func__);
267} 267}
268 268
269static int mii_speed(struct mii_if_info *mii) 269static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
704 704
705 if (i == 0) { 705 if (i == 0) {
706 data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 706 data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
707 skb->len - skb->data_len, DMA_TO_DEVICE); 707 skb_headlen(skb), DMA_TO_DEVICE);
708 data->txring[tx].len = skb->len - skb->data_len; 708 data->txring[tx].len = skb_headlen(skb);
709 misc |= TSI108_TX_SOF; 709 misc |= TSI108_TX_SOF;
710 } else { 710 } else {
711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
1056 return; 1056 return;
1057 udelay(10); 1057 udelay(10);
1058 } 1058 }
1059 printk(KERN_ERR "%s function time out \n", __func__); 1059 printk(KERN_ERR "%s function time out\n", __func__);
1060} 1060}
1061 1061
1062static void tsi108_reset_ether(struct tsi108_prv_data * data) 1062static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
1186 1186
1187 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 1187 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
1188 int i; 1188 int i;
1189 struct dev_mc_list *mc; 1189 struct netdev_hw_addr *ha;
1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; 1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1191 1191
1192 memset(data->mc_hash, 0, sizeof(data->mc_hash)); 1192 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1193 1193
1194 netdev_for_each_mc_addr(mc, dev) { 1194 netdev_for_each_mc_addr(ha, dev) {
1195 u32 hash, crc; 1195 u32 hash, crc;
1196 1196
1197 crc = ether_crc(6, mc->dmi_addr); 1197 crc = ether_crc(6, ha->addr);
1198 hash = crc >> 23; 1198 hash = crc >> 23;
1199 __set_bit(hash, &data->mc_hash[0]); 1199 __set_bit(hash, &data->mc_hash[0]);
1200 } 1200 }
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
1233 udelay(10); 1233 udelay(10);
1234 } 1234 }
1235 if (i == 0) 1235 if (i == 0)
1236 printk(KERN_ERR "%s function time out \n", __func__); 1236 printk(KERN_ERR "%s function time out\n", __func__);
1237 1237
1238 if (data->phy_type == TSI108_PHY_BCM54XX) { 1238 if (data->phy_type == TSI108_PHY_BCM54XX) {
1239 tsi108_write_mii(data, 0x09, 0x0300); 1239 tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2b418d..c0e70006374e 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -654,7 +654,6 @@ static netdev_tx_t de_start_xmit (struct sk_buff *skb,
654 654
655 /* Trigger an immediate transmit demand. */ 655 /* Trigger an immediate transmit demand. */
656 dw32(TxPoll, NormalTxPoll); 656 dw32(TxPoll, NormalTxPoll);
657 dev->trans_start = jiffies;
658 657
659 return NETDEV_TX_OK; 658 return NETDEV_TX_OK;
660} 659}
@@ -671,15 +670,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
671{ 670{
672 struct de_private *de = netdev_priv(dev); 671 struct de_private *de = netdev_priv(dev);
673 u16 hash_table[32]; 672 u16 hash_table[32];
674 struct dev_mc_list *mclist; 673 struct netdev_hw_addr *ha;
675 int i; 674 int i;
676 u16 *eaddrs; 675 u16 *eaddrs;
677 676
678 memset(hash_table, 0, sizeof(hash_table)); 677 memset(hash_table, 0, sizeof(hash_table));
679 set_bit_le(255, hash_table); /* Broadcast entry */ 678 set_bit_le(255, hash_table); /* Broadcast entry */
680 /* This should work on big-endian machines as well. */ 679 /* This should work on big-endian machines as well. */
681 netdev_for_each_mc_addr(mclist, dev) { 680 netdev_for_each_mc_addr(ha, dev) {
682 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 681 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
683 682
684 set_bit_le(index, hash_table); 683 set_bit_le(index, hash_table);
685 } 684 }
@@ -700,13 +699,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
700static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 699static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
701{ 700{
702 struct de_private *de = netdev_priv(dev); 701 struct de_private *de = netdev_priv(dev);
703 struct dev_mc_list *mclist; 702 struct netdev_hw_addr *ha;
704 u16 *eaddrs; 703 u16 *eaddrs;
705 704
706 /* We have <= 14 addresses so we can use the wonderful 705 /* We have <= 14 addresses so we can use the wonderful
707 16 address perfect filtering of the Tulip. */ 706 16 address perfect filtering of the Tulip. */
708 netdev_for_each_mc_addr(mclist, dev) { 707 netdev_for_each_mc_addr(ha, dev) {
709 eaddrs = (u16 *)mclist->dmi_addr; 708 eaddrs = (u16 *) ha->addr;
710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 709 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b57193a16a..75a64c88cf7a 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1337,7 +1337,7 @@ de4x5_open(struct net_device *dev)
1337 } 1337 }
1338 1338
1339 lp->interrupt = UNMASK_INTERRUPTS; 1339 lp->interrupt = UNMASK_INTERRUPTS;
1340 dev->trans_start = jiffies; 1340 dev->trans_start = jiffies; /* prevent tx timeout */
1341 1341
1342 START_DE4X5; 1342 START_DE4X5;
1343 1343
@@ -1507,7 +1507,6 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1507 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */ 1507 outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
1508 1508
1509 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1509 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1510 dev->trans_start = jiffies;
1511 1510
1512 if (TX_BUFFS_AVAIL) { 1511 if (TX_BUFFS_AVAIL) {
1513 netif_start_queue(dev); /* Another pkt may be queued */ 1512 netif_start_queue(dev); /* Another pkt may be queued */
@@ -1884,8 +1883,6 @@ de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len)
1884 if (lp->pktStats.bins[0] == 0) { /* Reset counters */ 1883 if (lp->pktStats.bins[0] == 0) { /* Reset counters */
1885 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats)); 1884 memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
1886 } 1885 }
1887
1888 return;
1889} 1886}
1890 1887
1891/* 1888/*
@@ -1937,7 +1934,7 @@ set_multicast_list(struct net_device *dev)
1937 1934
1938 lp->tx_new = (++lp->tx_new) % lp->txRingSize; 1935 lp->tx_new = (++lp->tx_new) % lp->txRingSize;
1939 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */ 1936 outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
1940 dev->trans_start = jiffies; 1937 dev->trans_start = jiffies; /* prevent tx timeout */
1941 } 1938 }
1942 } 1939 }
1943} 1940}
@@ -1951,7 +1948,7 @@ static void
1951SetMulticastFilter(struct net_device *dev) 1948SetMulticastFilter(struct net_device *dev)
1952{ 1949{
1953 struct de4x5_private *lp = netdev_priv(dev); 1950 struct de4x5_private *lp = netdev_priv(dev);
1954 struct dev_mc_list *dmi; 1951 struct netdev_hw_addr *ha;
1955 u_long iobase = dev->base_addr; 1952 u_long iobase = dev->base_addr;
1956 int i, bit, byte; 1953 int i, bit, byte;
1957 u16 hashcode; 1954 u16 hashcode;
@@ -1966,8 +1963,8 @@ SetMulticastFilter(struct net_device *dev)
1966 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { 1963 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1967 omr |= OMR_PM; /* Pass all multicasts */ 1964 omr |= OMR_PM; /* Pass all multicasts */
1968 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ 1965 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1969 netdev_for_each_mc_addr(dmi, dev) { 1966 netdev_for_each_mc_addr(ha, dev) {
1970 addrs = dmi->dmi_addr; 1967 addrs = ha->addr;
1971 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1968 if ((*addrs & 0x01) == 1) { /* multicast address? */
1972 crc = ether_crc_le(ETH_ALEN, addrs); 1969 crc = ether_crc_le(ETH_ALEN, addrs);
1973 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ 1970 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1980,8 @@ SetMulticastFilter(struct net_device *dev)
1983 } 1980 }
1984 } 1981 }
1985 } else { /* Perfect filtering */ 1982 } else { /* Perfect filtering */
1986 netdev_for_each_mc_addr(dmi, dev) { 1983 netdev_for_each_mc_addr(ha, dev) {
1987 addrs = dmi->dmi_addr; 1984 addrs = ha->addr;
1988 for (i=0; i<ETH_ALEN; i++) { 1985 for (i=0; i<ETH_ALEN; i++) {
1989 *(pa + (i&1)) = *addrs++; 1986 *(pa + (i&1)) = *addrs++;
1990 if (i & 0x01) pa += 4; 1987 if (i & 0x01) pa += 4;
@@ -1992,8 +1989,6 @@ SetMulticastFilter(struct net_device *dev)
1992 } 1989 }
1993 } 1990 }
1994 outl(omr, DE4X5_OMR); 1991 outl(omr, DE4X5_OMR);
1995
1996 return;
1997} 1992}
1998 1993
1999#ifdef CONFIG_EISA 1994#ifdef CONFIG_EISA
@@ -2188,8 +2183,6 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
2188 return; 2183 return;
2189 } 2184 }
2190 } 2185 }
2191
2192 return;
2193} 2186}
2194 2187
2195/* 2188/*
@@ -3292,8 +3285,6 @@ de4x5_init_connection(struct net_device *dev)
3292 outl(POLL_DEMAND, DE4X5_TPD); 3285 outl(POLL_DEMAND, DE4X5_TPD);
3293 3286
3294 netif_wake_queue(dev); 3287 netif_wake_queue(dev);
3295
3296 return;
3297} 3288}
3298 3289
3299/* 3290/*
@@ -3665,8 +3656,6 @@ de4x5_free_rx_buffs(struct net_device *dev)
3665 lp->rx_ring[i].status = 0; 3656 lp->rx_ring[i].status = 0;
3666 lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */ 3657 lp->rx_skb[i] = (struct sk_buff *)1; /* Dummy entry */
3667 } 3658 }
3668
3669 return;
3670} 3659}
3671 3660
3672static void 3661static void
@@ -3709,8 +3698,6 @@ de4x5_save_skbs(struct net_device *dev)
3709 lp->cache.save_cnt++; 3698 lp->cache.save_cnt++;
3710 START_DE4X5; 3699 START_DE4X5;
3711 } 3700 }
3712
3713 return;
3714} 3701}
3715 3702
3716static void 3703static void
@@ -3742,8 +3729,6 @@ de4x5_rst_desc_ring(struct net_device *dev)
3742 lp->cache.save_cnt--; 3729 lp->cache.save_cnt--;
3743 START_DE4X5; 3730 START_DE4X5;
3744 } 3731 }
3745
3746 return;
3747} 3732}
3748 3733
3749static void 3734static void
@@ -3772,8 +3757,6 @@ de4x5_cache_state(struct net_device *dev, int flag)
3772 } 3757 }
3773 break; 3758 break;
3774 } 3759 }
3775
3776 return;
3777} 3760}
3778 3761
3779static void 3762static void
@@ -3846,8 +3829,6 @@ de4x5_setup_intr(struct net_device *dev)
3846 outl(sts, DE4X5_STS); 3829 outl(sts, DE4X5_STS);
3847 ENABLE_IRQs; 3830 ENABLE_IRQs;
3848 } 3831 }
3849
3850 return;
3851} 3832}
3852 3833
3853/* 3834/*
@@ -3880,8 +3861,6 @@ reset_init_sia(struct net_device *dev, s32 csr13, s32 csr14, s32 csr15)
3880 outl(csr13, DE4X5_SICR); 3861 outl(csr13, DE4X5_SICR);
3881 3862
3882 mdelay(10); 3863 mdelay(10);
3883
3884 return;
3885} 3864}
3886 3865
3887/* 3866/*
@@ -3902,8 +3881,6 @@ create_packet(struct net_device *dev, char *frame, int len)
3902 3881
3903 *buf++ = 0; /* Packet length (2 bytes) */ 3882 *buf++ = 0; /* Packet length (2 bytes) */
3904 *buf++ = 1; 3883 *buf++ = 1;
3905
3906 return;
3907} 3884}
3908 3885
3909/* 3886/*
@@ -4007,8 +3984,6 @@ DevicePresent(struct net_device *dev, u_long aprom_addr)
4007 } 3984 }
4008 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom); 3985 de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
4009 } 3986 }
4010
4011 return;
4012} 3987}
4013 3988
4014/* 3989/*
@@ -4046,8 +4021,6 @@ enet_addr_rst(u_long aprom_addr)
4046 } 4021 }
4047 } 4022 }
4048 } 4023 }
4049
4050 return;
4051} 4024}
4052 4025
4053/* 4026/*
@@ -4187,8 +4160,6 @@ srom_repair(struct net_device *dev, int card)
4187 lp->useSROM = true; 4160 lp->useSROM = true;
4188 break; 4161 break;
4189 } 4162 }
4190
4191 return;
4192} 4163}
4193 4164
4194/* 4165/*
@@ -4262,8 +4233,6 @@ srom_latch(u_int command, u_long addr)
4262 sendto_srom(command, addr); 4233 sendto_srom(command, addr);
4263 sendto_srom(command | DT_CLK, addr); 4234 sendto_srom(command | DT_CLK, addr);
4264 sendto_srom(command, addr); 4235 sendto_srom(command, addr);
4265
4266 return;
4267} 4236}
4268 4237
4269static void 4238static void
@@ -4272,8 +4241,6 @@ srom_command(u_int command, u_long addr)
4272 srom_latch(command, addr); 4241 srom_latch(command, addr);
4273 srom_latch(command, addr); 4242 srom_latch(command, addr);
4274 srom_latch((command & 0x0000ff00) | DT_CS, addr); 4243 srom_latch((command & 0x0000ff00) | DT_CS, addr);
4275
4276 return;
4277} 4244}
4278 4245
4279static void 4246static void
@@ -4288,8 +4255,6 @@ srom_address(u_int command, u_long addr, u_char offset)
4288 udelay(1); 4255 udelay(1);
4289 4256
4290 i = (getfrom_srom(addr) >> 3) & 0x01; 4257 i = (getfrom_srom(addr) >> 3) & 0x01;
4291
4292 return;
4293} 4258}
4294 4259
4295static short 4260static short
@@ -4323,8 +4288,6 @@ srom_busy(u_int command, u_long addr)
4323 } 4288 }
4324 4289
4325 sendto_srom(command & 0x0000ff00, addr); 4290 sendto_srom(command & 0x0000ff00, addr);
4326
4327 return;
4328} 4291}
4329*/ 4292*/
4330 4293
@@ -4333,8 +4296,6 @@ sendto_srom(u_int command, u_long addr)
4333{ 4296{
4334 outl(command, addr); 4297 outl(command, addr);
4335 udelay(1); 4298 udelay(1);
4336
4337 return;
4338} 4299}
4339 4300
4340static int 4301static int
@@ -4433,8 +4394,6 @@ srom_init(struct net_device *dev)
4433 p += ((*p & BLOCK_LEN) + 1); 4394 p += ((*p & BLOCK_LEN) + 1);
4434 } 4395 }
4435 } 4396 }
4436
4437 return;
4438} 4397}
4439 4398
4440/* 4399/*
@@ -4463,8 +4422,6 @@ srom_exec(struct net_device *dev, u_char *p)
4463 outl(lp->cache.csr14, DE4X5_STRR); 4422 outl(lp->cache.csr14, DE4X5_STRR);
4464 outl(lp->cache.csr13, DE4X5_SICR); 4423 outl(lp->cache.csr13, DE4X5_SICR);
4465 } 4424 }
4466
4467 return;
4468} 4425}
4469 4426
4470/* 4427/*
@@ -4889,8 +4846,6 @@ mii_wr(int data, u_char phyreg, u_char phyaddr, u_long ioaddr)
4889 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */ 4846 mii_ta(MII_STWR, ioaddr); /* Turn around time - 2 MDC */
4890 data = mii_swap(data, 16); /* Swap data bit ordering */ 4847 data = mii_swap(data, 16); /* Swap data bit ordering */
4891 mii_wdata(data, 16, ioaddr); /* Write data */ 4848 mii_wdata(data, 16, ioaddr); /* Write data */
4892
4893 return;
4894} 4849}
4895 4850
4896static int 4851static int
@@ -4916,8 +4871,6 @@ mii_wdata(int data, int len, u_long ioaddr)
4916 sendto_mii(MII_MWR | MII_WR, data, ioaddr); 4871 sendto_mii(MII_MWR | MII_WR, data, ioaddr);
4917 data >>= 1; 4872 data >>= 1;
4918 } 4873 }
4919
4920 return;
4921} 4874}
4922 4875
4923static void 4876static void
@@ -4930,8 +4883,6 @@ mii_address(u_char addr, u_long ioaddr)
4930 sendto_mii(MII_MWR | MII_WR, addr, ioaddr); 4883 sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
4931 addr >>= 1; 4884 addr >>= 1;
4932 } 4885 }
4933
4934 return;
4935} 4886}
4936 4887
4937static void 4888static void
@@ -4943,8 +4894,6 @@ mii_ta(u_long rw, u_long ioaddr)
4943 } else { 4894 } else {
4944 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */ 4895 getfrom_mii(MII_MRD | MII_RD, ioaddr); /* Tri-state MDIO */
4945 } 4896 }
4946
4947 return;
4948} 4897}
4949 4898
4950static int 4899static int
@@ -4971,8 +4920,6 @@ sendto_mii(u32 command, int data, u_long ioaddr)
4971 udelay(1); 4920 udelay(1);
4972 outl(command | MII_MDC | j, ioaddr); 4921 outl(command | MII_MDC | j, ioaddr);
4973 udelay(1); 4922 udelay(1);
4974
4975 return;
4976} 4923}
4977 4924
4978static int 4925static int
@@ -5077,7 +5024,7 @@ mii_get_phy(struct net_device *dev)
5077 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ 5024 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
5078 lp->mii_cnt++; 5025 lp->mii_cnt++;
5079 lp->active++; 5026 lp->active++;
5080 printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name); 5027 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5081 j = de4x5_debug; 5028 j = de4x5_debug;
5082 de4x5_debug |= DEBUG_MII; 5029 de4x5_debug |= DEBUG_MII;
5083 de4x5_dbg_mii(dev, k); 5030 de4x5_dbg_mii(dev, k);
@@ -5186,8 +5133,6 @@ gep_wr(s32 data, struct net_device *dev)
5186 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 5133 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
5187 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR); 5134 outl((data<<16) | lp->cache.csr15, DE4X5_SIGR);
5188 } 5135 }
5189
5190 return;
5191} 5136}
5192 5137
5193static int 5138static int
@@ -5247,8 +5192,6 @@ yawn(struct net_device *dev, int state)
5247 break; 5192 break;
5248 } 5193 }
5249 } 5194 }
5250
5251 return;
5252} 5195}
5253 5196
5254static void 5197static void
@@ -5290,8 +5233,6 @@ de4x5_parse_params(struct net_device *dev)
5290 } 5233 }
5291 *q = t; 5234 *q = t;
5292 } 5235 }
5293
5294 return;
5295} 5236}
5296 5237
5297static void 5238static void
@@ -5337,12 +5278,10 @@ de4x5_dbg_open(struct net_device *dev)
5337 } 5278 }
5338 } 5279 }
5339 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); 5280 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5340 printk("Ring size: \nRX: %d\nTX: %d\n", 5281 printk("Ring size:\nRX: %d\nTX: %d\n",
5341 (short)lp->rxRingSize, 5282 (short)lp->rxRingSize,
5342 (short)lp->txRingSize); 5283 (short)lp->txRingSize);
5343 } 5284 }
5344
5345 return;
5346} 5285}
5347 5286
5348static void 5287static void
@@ -5369,8 +5308,6 @@ de4x5_dbg_mii(struct net_device *dev, int k)
5369 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII)); 5308 printk("MII 20: %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
5370 } 5309 }
5371 } 5310 }
5372
5373 return;
5374} 5311}
5375 5312
5376static void 5313static void
@@ -5395,8 +5332,6 @@ de4x5_dbg_media(struct net_device *dev)
5395 } 5332 }
5396 lp->c_media = lp->media; 5333 lp->c_media = lp->media;
5397 } 5334 }
5398
5399 return;
5400} 5335}
5401 5336
5402static void 5337static void
@@ -5417,8 +5352,6 @@ de4x5_dbg_srom(struct de4x5_srom *p)
5417 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i)); 5352 printk("%3d %04x\n", i<<1, (u_short)*((u_short *)p+i));
5418 } 5353 }
5419 } 5354 }
5420
5421 return;
5422} 5355}
5423 5356
5424static void 5357static void
@@ -5440,8 +5373,6 @@ de4x5_dbg_rx(struct sk_buff *skb, int len)
5440 printk("\n"); 5373 printk("\n");
5441 } 5374 }
5442 } 5375 }
5443
5444 return;
5445} 5376}
5446 5377
5447/* 5378/*
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156dea98..29e6c63d39fd 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1118,7 +1118,6 @@ static void dmfe_ethtool_get_wol(struct net_device *dev,
1118 1118
1119 wolinfo->supported = WAKE_PHY | WAKE_MAGIC; 1119 wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
1120 wolinfo->wolopts = db->wol_mode; 1120 wolinfo->wolopts = db->wol_mode;
1121 return;
1122} 1121}
1123 1122
1124 1123
@@ -1180,11 +1179,11 @@ static void dmfe_timer(unsigned long data)
1180 1179
1181 /* TX polling kick monitor */ 1180 /* TX polling kick monitor */
1182 if ( db->tx_packet_cnt && 1181 if ( db->tx_packet_cnt &&
1183 time_after(jiffies, dev->trans_start + DMFE_TX_KICK) ) { 1182 time_after(jiffies, dev_trans_start(dev) + DMFE_TX_KICK) ) {
1184 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */ 1183 outl(0x1, dev->base_addr + DCR1); /* Tx polling again */
1185 1184
1186 /* TX Timeout */ 1185 /* TX Timeout */
1187 if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) { 1186 if (time_after(jiffies, dev_trans_start(dev) + DMFE_TX_TIMEOUT) ) {
1188 db->reset_TXtimeout++; 1187 db->reset_TXtimeout++;
1189 db->wait_reset = 1; 1188 db->wait_reset = 1;
1190 dev_warn(&dev->dev, "Tx timeout - resetting\n"); 1189 dev_warn(&dev->dev, "Tx timeout - resetting\n");
@@ -1453,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1453 1452
1454static void dm9132_id_table(struct DEVICE *dev) 1453static void dm9132_id_table(struct DEVICE *dev)
1455{ 1454{
1456 struct dev_mc_list *mcptr; 1455 struct netdev_hw_addr *ha;
1457 u16 * addrptr; 1456 u16 * addrptr;
1458 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */ 1457 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1459 u32 hash_val; 1458 u32 hash_val;
@@ -1477,8 +1476,8 @@ static void dm9132_id_table(struct DEVICE *dev)
1477 hash_table[3] = 0x8000; 1476 hash_table[3] = 0x8000;
1478 1477
1479 /* the multicast address in Hash Table : 64 bits */ 1478 /* the multicast address in Hash Table : 64 bits */
1480 netdev_for_each_mc_addr(mcptr, dev) { 1479 netdev_for_each_mc_addr(ha, dev) {
1481 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1480 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
1482 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1481 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1483 } 1482 }
1484 1483
@@ -1496,7 +1495,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1496static void send_filter_frame(struct DEVICE *dev) 1495static void send_filter_frame(struct DEVICE *dev)
1497{ 1496{
1498 struct dmfe_board_info *db = netdev_priv(dev); 1497 struct dmfe_board_info *db = netdev_priv(dev);
1499 struct dev_mc_list *mcptr; 1498 struct netdev_hw_addr *ha;
1500 struct tx_desc *txptr; 1499 struct tx_desc *txptr;
1501 u16 * addrptr; 1500 u16 * addrptr;
1502 u32 * suptr; 1501 u32 * suptr;
@@ -1519,8 +1518,8 @@ static void send_filter_frame(struct DEVICE *dev)
1519 *suptr++ = 0xffff; 1518 *suptr++ = 0xffff;
1520 1519
1521 /* fit the multicast address */ 1520 /* fit the multicast address */
1522 netdev_for_each_mc_addr(mcptr, dev) { 1521 netdev_for_each_mc_addr(ha, dev) {
1523 addrptr = (u16 *) mcptr->dmi_addr; 1522 addrptr = (u16 *) ha->addr;
1524 *suptr++ = addrptr[0]; 1523 *suptr++ = addrptr[0];
1525 *suptr++ = addrptr[1]; 1524 *suptr++ = addrptr[1];
1526 *suptr++ = addrptr[2]; 1525 *suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index 68b170ae4d15..a0c770ee4b64 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -396,8 +396,6 @@ void tulip_select_media(struct net_device *dev, int startup)
396 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0); 396 tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
397 397
398 mdelay(1); 398 mdelay(1);
399
400 return;
401} 399}
402 400
403/* 401/*
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index 966efa1a27d7..a63e64b6863d 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -67,7 +67,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
67 */ 67 */
68 if (tulip_media_cap[dev->if_port] & MediaIsMII) 68 if (tulip_media_cap[dev->if_port] & MediaIsMII)
69 return; 69 return;
70 if (! tp->nwayset || time_after(jiffies, dev->trans_start + 1*HZ)) { 70 if (! tp->nwayset || time_after(jiffies, dev_trans_start(dev) + 1*HZ)) {
71 tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff); 71 tp->csr6 = 0x00420000 | (tp->csr6 & 0x0000fdff);
72 iowrite32(tp->csr6, ioaddr + CSR6); 72 iowrite32(tp->csr6, ioaddr + CSR6);
73 iowrite32(0x30, ioaddr + CSR12); 73 iowrite32(0x30, ioaddr + CSR12);
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9dc2de..254643ed945e 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
605 605
606out_unlock: 606out_unlock:
607 spin_unlock_irqrestore (&tp->lock, flags); 607 spin_unlock_irqrestore (&tp->lock, flags);
608 dev->trans_start = jiffies; 608 dev->trans_start = jiffies; /* prevent tx timeout */
609 netif_wake_queue (dev); 609 netif_wake_queue (dev);
610} 610}
611 611
@@ -707,8 +707,6 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
707 707
708 spin_unlock_irqrestore(&tp->lock, flags); 708 spin_unlock_irqrestore(&tp->lock, flags);
709 709
710 dev->trans_start = jiffies;
711
712 return NETDEV_TX_OK; 710 return NETDEV_TX_OK;
713} 711}
714 712
@@ -991,15 +989,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
991{ 989{
992 struct tulip_private *tp = netdev_priv(dev); 990 struct tulip_private *tp = netdev_priv(dev);
993 u16 hash_table[32]; 991 u16 hash_table[32];
994 struct dev_mc_list *mclist; 992 struct netdev_hw_addr *ha;
995 int i; 993 int i;
996 u16 *eaddrs; 994 u16 *eaddrs;
997 995
998 memset(hash_table, 0, sizeof(hash_table)); 996 memset(hash_table, 0, sizeof(hash_table));
999 set_bit_le(255, hash_table); /* Broadcast entry */ 997 set_bit_le(255, hash_table); /* Broadcast entry */
1000 /* This should work on big-endian machines as well. */ 998 /* This should work on big-endian machines as well. */
1001 netdev_for_each_mc_addr(mclist, dev) { 999 netdev_for_each_mc_addr(ha, dev) {
1002 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 1000 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1003 1001
1004 set_bit_le(index, hash_table); 1002 set_bit_le(index, hash_table);
1005 } 1003 }
@@ -1019,13 +1017,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1019static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 1017static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1020{ 1018{
1021 struct tulip_private *tp = netdev_priv(dev); 1019 struct tulip_private *tp = netdev_priv(dev);
1022 struct dev_mc_list *mclist; 1020 struct netdev_hw_addr *ha;
1023 u16 *eaddrs; 1021 u16 *eaddrs;
1024 1022
1025 /* We have <= 14 addresses so we can use the wonderful 1023 /* We have <= 14 addresses so we can use the wonderful
1026 16 address perfect filtering of the Tulip. */ 1024 16 address perfect filtering of the Tulip. */
1027 netdev_for_each_mc_addr(mclist, dev) { 1025 netdev_for_each_mc_addr(ha, dev) {
1028 eaddrs = (u16 *)mclist->dmi_addr; 1026 eaddrs = (u16 *) ha->addr;
1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1027 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1030 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1028 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1031 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1060,7 @@ static void set_rx_mode(struct net_device *dev)
1062 } else if (tp->flags & MC_HASH_ONLY) { 1060 } else if (tp->flags & MC_HASH_ONLY) {
1063 /* Some work-alikes have only a 64-entry hash filter table. */ 1061 /* Some work-alikes have only a 64-entry hash filter table. */
1064 /* Should verify correctness on big-endian/__powerpc__ */ 1062 /* Should verify correctness on big-endian/__powerpc__ */
1065 struct dev_mc_list *mclist; 1063 struct netdev_hw_addr *ha;
1066 if (netdev_mc_count(dev) > 64) { 1064 if (netdev_mc_count(dev) > 64) {
1067 /* Arbitrary non-effective limit. */ 1065 /* Arbitrary non-effective limit. */
1068 tp->csr6 |= AcceptAllMulticast; 1066 tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1068,21 @@ static void set_rx_mode(struct net_device *dev)
1070 } else { 1068 } else {
1071 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ 1069 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1072 int filterbit; 1070 int filterbit;
1073 netdev_for_each_mc_addr(mclist, dev) { 1071 netdev_for_each_mc_addr(ha, dev) {
1074 if (tp->flags & COMET_MAC_ADDR) 1072 if (tp->flags & COMET_MAC_ADDR)
1075 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 1073 filterbit = ether_crc_le(ETH_ALEN,
1074 ha->addr);
1076 else 1075 else
1077 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1076 filterbit = ether_crc(ETH_ALEN,
1077 ha->addr) >> 26;
1078 filterbit &= 0x3f; 1078 filterbit &= 0x3f;
1079 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1079 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1080 if (tulip_debug > 2) 1080 if (tulip_debug > 2)
1081 dev_info(&dev->dev, 1081 dev_info(&dev->dev,
1082 "Added filter for %pM %08x bit %d\n", 1082 "Added filter for %pM %08x bit %d\n",
1083 mclist->dmi_addr, 1083 ha->addr,
1084 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1084 ether_crc(ETH_ALEN, ha->addr),
1085 filterbit);
1085 } 1086 }
1086 if (mc_filter[0] == tp->mc_filter[0] && 1087 if (mc_filter[0] == tp->mc_filter[0] &&
1087 mc_filter[1] == tp->mc_filter[1]) 1088 mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd34891e..96de5829b940 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1040,11 +1040,11 @@ static void uli526x_timer(unsigned long data)
1040 1040
1041 /* TX polling kick monitor */ 1041 /* TX polling kick monitor */
1042 if ( db->tx_packet_cnt && 1042 if ( db->tx_packet_cnt &&
1043 time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) { 1043 time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_KICK) ) {
1044 outl(0x1, dev->base_addr + DCR1); // Tx polling again 1044 outl(0x1, dev->base_addr + DCR1); // Tx polling again
1045 1045
1046 // TX Timeout 1046 // TX Timeout
1047 if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) { 1047 if ( time_after(jiffies, dev_trans_start(dev) + ULI526X_TX_TIMEOUT) ) {
1048 db->reset_TXtimeout++; 1048 db->reset_TXtimeout++;
1049 db->wait_reset = 1; 1049 db->wait_reset = 1;
1050 printk( "%s: Tx timeout - resetting\n", 1050 printk( "%s: Tx timeout - resetting\n",
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1393static void send_filter_frame(struct net_device *dev, int mc_cnt) 1393static void send_filter_frame(struct net_device *dev, int mc_cnt)
1394{ 1394{
1395 struct uli526x_board_info *db = netdev_priv(dev); 1395 struct uli526x_board_info *db = netdev_priv(dev);
1396 struct dev_mc_list *mcptr; 1396 struct netdev_hw_addr *ha;
1397 struct tx_desc *txptr; 1397 struct tx_desc *txptr;
1398 u16 * addrptr; 1398 u16 * addrptr;
1399 u32 * suptr; 1399 u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1416 *suptr++ = 0xffff << FLT_SHIFT; 1416 *suptr++ = 0xffff << FLT_SHIFT;
1417 1417
1418 /* fit the multicast address */ 1418 /* fit the multicast address */
1419 netdev_for_each_mc_addr(mcptr, dev) { 1419 netdev_for_each_mc_addr(ha, dev) {
1420 addrptr = (u16 *) mcptr->dmi_addr; 1420 addrptr = (u16 *) ha->addr;
1421 *suptr++ = addrptr[0] << FLT_SHIFT; 1421 *suptr++ = addrptr[0] << FLT_SHIFT;
1422 *suptr++ = addrptr[1] << FLT_SHIFT; 1422 *suptr++ = addrptr[1] << FLT_SHIFT;
1423 *suptr++ = addrptr[2] << FLT_SHIFT; 1423 *suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6cc1d68..608b279b921b 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -626,7 +626,6 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val
626 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); 626 iowrite32(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
627 mdio_delay(mdio_addr); 627 mdio_delay(mdio_addr);
628 } 628 }
629 return;
630} 629}
631 630
632 631
@@ -969,9 +968,8 @@ static void tx_timeout(struct net_device *dev)
969 enable_irq(dev->irq); 968 enable_irq(dev->irq);
970 969
971 netif_wake_queue(dev); 970 netif_wake_queue(dev);
972 dev->trans_start = jiffies; 971 dev->trans_start = jiffies; /* prevent tx timeout */
973 np->stats.tx_errors++; 972 np->stats.tx_errors++;
974 return;
975} 973}
976 974
977/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 975/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
@@ -1055,8 +1053,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
1055 } 1053 }
1056 spin_unlock_irq(&np->lock); 1054 spin_unlock_irq(&np->lock);
1057 1055
1058 dev->trans_start = jiffies;
1059
1060 if (debug > 4) { 1056 if (debug > 4) {
1061 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n", 1057 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
1062 dev->name, np->cur_tx, entry); 1058 dev->name, np->cur_tx, entry);
@@ -1366,13 +1362,15 @@ static u32 __set_rx_mode(struct net_device *dev)
1366 memset(mc_filter, 0xff, sizeof(mc_filter)); 1362 memset(mc_filter, 0xff, sizeof(mc_filter));
1367 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1363 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1368 } else { 1364 } else {
1369 struct dev_mc_list *mclist; 1365 struct netdev_hw_addr *ha;
1370 1366
1371 memset(mc_filter, 0, sizeof(mc_filter)); 1367 memset(mc_filter, 0, sizeof(mc_filter));
1372 netdev_for_each_mc_addr(mclist, dev) { 1368 netdev_for_each_mc_addr(ha, dev) {
1373 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1369 int filbit;
1374 filterbit &= 0x3f; 1370
1375 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1371 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1372 filbit &= 0x3f;
1373 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1376 } 1374 }
1377 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1375 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1378 } 1376 }
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb980562..a439e93be22d 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
350 350
351#ifdef DEBUG 351#ifdef DEBUG
352 print_binary(status); 352 print_binary(status);
353 printk("tx status 0x%08x 0x%08x \n", 353 printk("tx status 0x%08x 0x%08x\n",
354 card->tx_buffer[0], card->tx_buffer[4]); 354 card->tx_buffer[0], card->tx_buffer[4]);
355 printk("rx status 0x%08x 0x%08x \n", 355 printk("rx status 0x%08x 0x%08x\n",
356 card->rx_buffer[0], card->rx_buffer[4]); 356 card->rx_buffer[0], card->rx_buffer[4]);
357#endif 357#endif
358 /* Handle shared irq and hotplug */ 358 /* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
462 struct xircom_private *xp = netdev_priv(dev); 462 struct xircom_private *xp = netdev_priv(dev);
463 int retval; 463 int retval;
464 enter("xircom_open"); 464 enter("xircom_open");
465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n", 465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
466 dev->name, dev->irq); 466 dev->name, dev->irq);
467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); 467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
468 if (retval) { 468 if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d463..97b25533e5fb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -109,6 +109,9 @@ struct tun_struct {
109 109
110 struct tap_filter txflt; 110 struct tap_filter txflt;
111 struct socket socket; 111 struct socket socket;
112 struct socket_wq wq;
113
114 int vnet_hdr_sz;
112 115
113#ifdef TUN_DEBUG 116#ifdef TUN_DEBUG
114 int debug; 117 int debug;
@@ -323,7 +326,7 @@ static void tun_net_uninit(struct net_device *dev)
323 /* Inform the methods they need to stop using the dev. 326 /* Inform the methods they need to stop using the dev.
324 */ 327 */
325 if (tfile) { 328 if (tfile) {
326 wake_up_all(&tun->socket.wait); 329 wake_up_all(&tun->wq.wait);
327 if (atomic_dec_and_test(&tfile->count)) 330 if (atomic_dec_and_test(&tfile->count))
328 __tun_detach(tun); 331 __tun_detach(tun);
329 } 332 }
@@ -393,12 +396,11 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
393 396
394 /* Enqueue packet */ 397 /* Enqueue packet */
395 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb); 398 skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
396 dev->trans_start = jiffies;
397 399
398 /* Notify and wake up reader process */ 400 /* Notify and wake up reader process */
399 if (tun->flags & TUN_FASYNC) 401 if (tun->flags & TUN_FASYNC)
400 kill_fasync(&tun->fasync, SIGIO, POLL_IN); 402 kill_fasync(&tun->fasync, SIGIO, POLL_IN);
401 wake_up_interruptible_poll(&tun->socket.wait, POLLIN | 403 wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
402 POLLRDNORM | POLLRDBAND); 404 POLLRDNORM | POLLRDBAND);
403 return NETDEV_TX_OK; 405 return NETDEV_TX_OK;
404 406
@@ -415,7 +417,6 @@ static void tun_net_mclist(struct net_device *dev)
415 * _rx_ path and has nothing to do with the _tx_ path. 417 * _rx_ path and has nothing to do with the _tx_ path.
416 * In rx path we always accept everything userspace gives us. 418 * In rx path we always accept everything userspace gives us.
417 */ 419 */
418 return;
419} 420}
420 421
421#define MIN_MTU 68 422#define MIN_MTU 68
@@ -498,7 +499,7 @@ static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
498 499
499 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name); 500 DBG(KERN_INFO "%s: tun_chr_poll\n", tun->dev->name);
500 501
501 poll_wait(file, &tun->socket.wait, wait); 502 poll_wait(file, &tun->wq.wait, wait);
502 503
503 if (!skb_queue_empty(&sk->sk_receive_queue)) 504 if (!skb_queue_empty(&sk->sk_receive_queue))
504 mask |= POLLIN | POLLRDNORM; 505 mask |= POLLIN | POLLRDNORM;
@@ -563,7 +564,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
563 } 564 }
564 565
565 if (tun->flags & TUN_VNET_HDR) { 566 if (tun->flags & TUN_VNET_HDR) {
566 if ((len -= sizeof(gso)) > count) 567 if ((len -= tun->vnet_hdr_sz) > count)
567 return -EINVAL; 568 return -EINVAL;
568 569
569 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) 570 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
@@ -575,7 +576,7 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun,
575 576
576 if (gso.hdr_len > len) 577 if (gso.hdr_len > len)
577 return -EINVAL; 578 return -EINVAL;
578 offset += sizeof(gso); 579 offset += tun->vnet_hdr_sz;
579 } 580 }
580 581
581 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { 582 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
@@ -718,7 +719,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
718 719
719 if (tun->flags & TUN_VNET_HDR) { 720 if (tun->flags & TUN_VNET_HDR) {
720 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 721 struct virtio_net_hdr gso = { 0 }; /* no info leak */
721 if ((len -= sizeof(gso)) < 0) 722 if ((len -= tun->vnet_hdr_sz) < 0)
722 return -EINVAL; 723 return -EINVAL;
723 724
724 if (skb_is_gso(skb)) { 725 if (skb_is_gso(skb)) {
@@ -749,7 +750,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
749 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, 750 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
750 sizeof(gso)))) 751 sizeof(gso))))
751 return -EFAULT; 752 return -EFAULT;
752 total += sizeof(gso); 753 total += tun->vnet_hdr_sz;
753 } 754 }
754 755
755 len = min_t(int, skb->len, len); 756 len = min_t(int, skb->len, len);
@@ -773,7 +774,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
773 774
774 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name); 775 DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
775 776
776 add_wait_queue(&tun->socket.wait, &wait); 777 add_wait_queue(&tun->wq.wait, &wait);
777 while (len) { 778 while (len) {
778 current->state = TASK_INTERRUPTIBLE; 779 current->state = TASK_INTERRUPTIBLE;
779 780
@@ -804,7 +805,7 @@ static ssize_t tun_do_read(struct tun_struct *tun,
804 } 805 }
805 806
806 current->state = TASK_RUNNING; 807 current->state = TASK_RUNNING;
807 remove_wait_queue(&tun->socket.wait, &wait); 808 remove_wait_queue(&tun->wq.wait, &wait);
808 809
809 return ret; 810 return ret;
810} 811}
@@ -861,6 +862,7 @@ static struct rtnl_link_ops tun_link_ops __read_mostly = {
861static void tun_sock_write_space(struct sock *sk) 862static void tun_sock_write_space(struct sock *sk)
862{ 863{
863 struct tun_struct *tun; 864 struct tun_struct *tun;
865 wait_queue_head_t *wqueue;
864 866
865 if (!sock_writeable(sk)) 867 if (!sock_writeable(sk))
866 return; 868 return;
@@ -868,8 +870,9 @@ static void tun_sock_write_space(struct sock *sk)
868 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 870 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
869 return; 871 return;
870 872
871 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 873 wqueue = sk_sleep(sk);
872 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 874 if (wqueue && waitqueue_active(wqueue))
875 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
873 POLLWRNORM | POLLWRBAND); 876 POLLWRNORM | POLLWRBAND);
874 877
875 tun = tun_sk(sk)->tun; 878 tun = tun_sk(sk)->tun;
@@ -1033,13 +1036,15 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1033 tun->dev = dev; 1036 tun->dev = dev;
1034 tun->flags = flags; 1037 tun->flags = flags;
1035 tun->txflt.count = 0; 1038 tun->txflt.count = 0;
1039 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1036 1040
1037 err = -ENOMEM; 1041 err = -ENOMEM;
1038 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto); 1042 sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
1039 if (!sk) 1043 if (!sk)
1040 goto err_free_dev; 1044 goto err_free_dev;
1041 1045
1042 init_waitqueue_head(&tun->socket.wait); 1046 tun->socket.wq = &tun->wq;
1047 init_waitqueue_head(&tun->wq.wait);
1043 tun->socket.ops = &tun_socket_ops; 1048 tun->socket.ops = &tun_socket_ops;
1044 sock_init_data(&tun->socket, sk); 1049 sock_init_data(&tun->socket, sk);
1045 sk->sk_write_space = tun_sock_write_space; 1050 sk->sk_write_space = tun_sock_write_space;
@@ -1174,6 +1179,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1174 struct sock_fprog fprog; 1179 struct sock_fprog fprog;
1175 struct ifreq ifr; 1180 struct ifreq ifr;
1176 int sndbuf; 1181 int sndbuf;
1182 int vnet_hdr_sz;
1177 int ret; 1183 int ret;
1178 1184
1179 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 1185 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
@@ -1319,6 +1325,25 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1319 tun->socket.sk->sk_sndbuf = sndbuf; 1325 tun->socket.sk->sk_sndbuf = sndbuf;
1320 break; 1326 break;
1321 1327
1328 case TUNGETVNETHDRSZ:
1329 vnet_hdr_sz = tun->vnet_hdr_sz;
1330 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
1331 ret = -EFAULT;
1332 break;
1333
1334 case TUNSETVNETHDRSZ:
1335 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
1336 ret = -EFAULT;
1337 break;
1338 }
1339 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
1340 ret = -EINVAL;
1341 break;
1342 }
1343
1344 tun->vnet_hdr_sz = vnet_hdr_sz;
1345 break;
1346
1322 case TUNATTACHFILTER: 1347 case TUNATTACHFILTER:
1323 /* Can be set only for TAPs */ 1348 /* Can be set only for TAPs */
1324 ret = -EINVAL; 1349 ret = -EINVAL;
@@ -1342,7 +1367,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1342 default: 1367 default:
1343 ret = -EINVAL; 1368 ret = -EINVAL;
1344 break; 1369 break;
1345 }; 1370 }
1346 1371
1347unlock: 1372unlock:
1348 rtnl_unlock(); 1373 rtnl_unlock();
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818daa77e..22bde49262c0 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -881,8 +881,6 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
881 wmb(); 881 wmb();
882 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister); 882 iowrite32(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
883 883
884 dev->trans_start = jiffies;
885
886 /* If we don't have room to put the worst case packet on the 884 /* If we don't have room to put the worst case packet on the
887 * queue, then we must stop the queue. We need 2 extra 885 * queue, then we must stop the queue. We need 2 extra
888 * descriptors -- one to prevent ring wrap, and one for the 886 * descriptors -- one to prevent ring wrap, and one for the
@@ -920,11 +918,11 @@ typhoon_set_rx_mode(struct net_device *dev)
920 /* Too many to match, or accept all multicasts. */ 918 /* Too many to match, or accept all multicasts. */
921 filter |= TYPHOON_RX_FILTER_ALL_MCAST; 919 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
922 } else if (!netdev_mc_empty(dev)) { 920 } else if (!netdev_mc_empty(dev)) {
923 struct dev_mc_list *mclist; 921 struct netdev_hw_addr *ha;
924 922
925 memset(mc_filter, 0, sizeof(mc_filter)); 923 memset(mc_filter, 0, sizeof(mc_filter));
926 netdev_for_each_mc_addr(mclist, dev) { 924 netdev_for_each_mc_addr(ha, dev) {
927 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 925 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
928 mc_filter[bit >> 5] |= 1 << (bit & 0x1f); 926 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
929 } 927 }
930 928
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 0ab51037bf88..4a34833b85dd 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1999static void ucc_geth_set_multi(struct net_device *dev) 1999static void ucc_geth_set_multi(struct net_device *dev)
2000{ 2000{
2001 struct ucc_geth_private *ugeth; 2001 struct ucc_geth_private *ugeth;
2002 struct dev_mc_list *dmi; 2002 struct netdev_hw_addr *ha;
2003 struct ucc_fast __iomem *uf_regs; 2003 struct ucc_fast __iomem *uf_regs;
2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2005 2005
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2030 2030
2031 netdev_for_each_mc_addr(dmi, dev) { 2031 netdev_for_each_mc_addr(ha, dev) {
2032 /* Only support group multicast for now. 2032 /* Only support group multicast for now.
2033 */ 2033 */
2034 if (!(dmi->dmi_addr[0] & 1)) 2034 if (!(ha->addr[0] & 1))
2035 continue; 2035 continue;
2036 2036
2037 /* Ask CPM to run CRC and set bit in 2037 /* Ask CPM to run CRC and set bit in
2038 * filter mask. 2038 * filter mask.
2039 */ 2039 */
2040 hw_add_addr_in_hash(ugeth, dmi->dmi_addr); 2040 hw_add_addr_in_hash(ugeth, ha->addr);
2041 } 2041 }
2042 } 2042 }
2043 } 2043 }
@@ -3148,8 +3148,6 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3148 /* set bd status and length */ 3148 /* set bd status and length */
3149 out_be32((u32 __iomem *)bd, bd_status); 3149 out_be32((u32 __iomem *)bd, bd_status);
3150 3150
3151 dev->trans_start = jiffies;
3152
3153 /* Move to next BD in the ring */ 3151 /* Move to next BD in the ring */
3154 if (!(bd_status & T_W)) 3152 if (!(bd_status & T_W))
3155 bd += sizeof(struct qe_bd); 3153 bd += sizeof(struct qe_bd);
@@ -3883,7 +3881,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3883 } 3881 }
3884 3882
3885 if (netif_msg_probe(&debug)) 3883 if (netif_msg_probe(&debug))
3886 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 3884 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
3887 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3885 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3888 ug_info->uf_info.irq); 3886 ug_info->uf_info.irq);
3889 3887
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index ba56ce4382d9..d7b7018a1de1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,4 +385,25 @@ config USB_CDC_PHONET
385 cellular modem, as found on most Nokia handsets with the 385 cellular modem, as found on most Nokia handsets with the
386 "PC suite" USB profile. 386 "PC suite" USB profile.
387 387
388config USB_IPHETH
389 tristate "Apple iPhone USB Ethernet driver"
390 default n
391 ---help---
392 Module used to share Internet connection (tethering) from your
393 iPhone (Original, 3G and 3GS) to your system.
394 Note that you need userspace libraries and programs that are needed
395 to pair your device with your system and that understand the iPhone
396 protocol.
397
398 For more information: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
399
400config USB_SIERRA_NET
401 tristate "USB-to-WWAN Driver for Sierra Wireless modems"
402 depends on USB_USBNET
403 help
404 Choose this option if you have a Sierra Wireless USB-to-WWAN device.
405
406 To compile this driver as a module, choose M here: the
407 module will be called sierra_net.
408
388endmenu 409endmenu
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index 82ea62955b56..b13a279663ba 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,4 +23,6 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
26 28
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc82803..31b73310ec77 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -224,10 +224,9 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
224 cmd, value, index, size); 224 cmd, value, index, size);
225 225
226 if (data) { 226 if (data) {
227 buf = kmalloc(size, GFP_KERNEL); 227 buf = kmemdup(data, size, GFP_KERNEL);
228 if (!buf) 228 if (!buf)
229 goto out; 229 goto out;
230 memcpy(buf, data, size);
231 } 230 }
232 231
233 err = usb_control_msg( 232 err = usb_control_msg(
@@ -322,8 +321,29 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
322 /* get the packet length */ 321 /* get the packet length */
323 size = (u16) (header & 0x0000ffff); 322 size = (u16) (header & 0x0000ffff);
324 323
325 if ((skb->len) - ((size + 1) & 0xfffe) == 0) 324 if ((skb->len) - ((size + 1) & 0xfffe) == 0) {
325 u8 alignment = (u32)skb->data & 0x3;
326 if (alignment != 0x2) {
327 /*
328 * not 16bit aligned so use the room provided by
329 * the 32 bit header to align the data
330 *
331 * note we want 16bit alignment as MAC header is
332 * 14bytes thus ip header will be aligned on
333 * 32bit boundary so accessing ipheader elements
334 * using a cast to struct ip header wont cause
335 * an unaligned accesses.
336 */
337 u8 realignment = (alignment + 2) & 0x3;
338 memmove(skb->data - realignment,
339 skb->data,
340 size);
341 skb->data -= realignment;
342 skb_set_tail_pointer(skb, size);
343 }
326 return 2; 344 return 2;
345 }
346
327 if (size > ETH_FRAME_LEN) { 347 if (size > ETH_FRAME_LEN) {
328 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 348 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
329 size); 349 size);
@@ -331,7 +351,18 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
331 } 351 }
332 ax_skb = skb_clone(skb, GFP_ATOMIC); 352 ax_skb = skb_clone(skb, GFP_ATOMIC);
333 if (ax_skb) { 353 if (ax_skb) {
354 u8 alignment = (u32)packet & 0x3;
334 ax_skb->len = size; 355 ax_skb->len = size;
356
357 if (alignment != 0x2) {
358 /*
359 * not 16bit aligned use the room provided by
360 * the 32 bit header to align the data
361 */
362 u8 realignment = (alignment + 2) & 0x3;
363 memmove(packet - realignment, packet, size);
364 packet -= realignment;
365 }
335 ax_skb->data = packet; 366 ax_skb->data = packet;
336 skb_set_tail_pointer(ax_skb, size); 367 skb_set_tail_pointer(ax_skb, size);
337 usbnet_skb_return(dev, ax_skb); 368 usbnet_skb_return(dev, ax_skb);
@@ -558,16 +589,14 @@ static void asix_set_multicast(struct net_device *net)
558 * for our 8 byte filter buffer 589 * for our 8 byte filter buffer
559 * to avoid allocating memory that 590 * to avoid allocating memory that
560 * is tricky to free later */ 591 * is tricky to free later */
561 struct dev_mc_list *mc_list; 592 struct netdev_hw_addr *ha;
562 u32 crc_bits; 593 u32 crc_bits;
563 594
564 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 595 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
565 596
566 /* Build the multicast hash filter. */ 597 /* Build the multicast hash filter. */
567 netdev_for_each_mc_addr(mc_list, net) { 598 netdev_for_each_mc_addr(ha, net) {
568 crc_bits = 599 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
569 ether_crc(ETH_ALEN,
570 mc_list->dmi_addr) >> 26;
571 data->multi_filter[crc_bits >> 3] |= 600 data->multi_filter[crc_bits >> 3] |=
572 1 << (crc_bits & 7); 601 1 << (crc_bits & 7);
573 } 602 }
@@ -794,16 +823,14 @@ static void ax88172_set_multicast(struct net_device *net)
794 * for our 8 byte filter buffer 823 * for our 8 byte filter buffer
795 * to avoid allocating memory that 824 * to avoid allocating memory that
796 * is tricky to free later */ 825 * is tricky to free later */
797 struct dev_mc_list *mc_list; 826 struct netdev_hw_addr *ha;
798 u32 crc_bits; 827 u32 crc_bits;
799 828
800 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 829 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
801 830
802 /* Build the multicast hash filter. */ 831 /* Build the multicast hash filter. */
803 netdev_for_each_mc_addr(mc_list, net) { 832 netdev_for_each_mc_addr(ha, net) {
804 crc_bits = 833 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
805 ether_crc(ETH_ALEN,
806 mc_list->dmi_addr) >> 26;
807 data->multi_filter[crc_bits >> 3] |= 834 data->multi_filter[crc_bits >> 3] |=
808 1 << (crc_bits & 7); 835 1 << (crc_bits & 7);
809 } 836 }
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123b2741..97687d335903 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
629static void catc_set_multicast_list(struct net_device *netdev) 629static void catc_set_multicast_list(struct net_device *netdev)
630{ 630{
631 struct catc *catc = netdev_priv(netdev); 631 struct catc *catc = netdev_priv(netdev);
632 struct dev_mc_list *mc; 632 struct netdev_hw_addr *ha;
633 u8 broadcast[6]; 633 u8 broadcast[6];
634 u8 rx = RxEnable | RxPolarity | RxMultiCast; 634 u8 rx = RxEnable | RxPolarity | RxMultiCast;
635 635
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
647 if (netdev->flags & IFF_ALLMULTI) { 647 if (netdev->flags & IFF_ALLMULTI) {
648 memset(catc->multicast, 0xff, 64); 648 memset(catc->multicast, 0xff, 64);
649 } else { 649 } else {
650 netdev_for_each_mc_addr(mc, netdev) { 650 netdev_for_each_mc_addr(ha, netdev) {
651 u32 crc = ether_crc_le(6, mc->dmi_addr); 651 u32 crc = ether_crc_le(6, ha->addr);
652 if (!catc->is_f5u011) { 652 if (!catc->is_f5u011) {
653 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); 653 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
654 } else { 654 } else {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c8cdb7f30adc..b3fe0de40469 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -64,6 +64,11 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
64 64
65#endif 65#endif
66 66
67static const u8 mbm_guid[16] = {
68 0xa3, 0x17, 0xa8, 0x8b, 0x04, 0x5e, 0x4f, 0x01,
69 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
70};
71
67/* 72/*
68 * probes control interface, claims data interface, collects the bulk 73 * probes control interface, claims data interface, collects the bulk
69 * endpoints, activates data interface (if needed), maybe sets MTU. 74 * endpoints, activates data interface (if needed), maybe sets MTU.
@@ -79,6 +84,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
79 int status; 84 int status;
80 int rndis; 85 int rndis;
81 struct usb_driver *driver = driver_of(intf); 86 struct usb_driver *driver = driver_of(intf);
87 struct usb_cdc_mdlm_desc *desc = NULL;
88 struct usb_cdc_mdlm_detail_desc *detail = NULL;
82 89
83 if (sizeof dev->data < sizeof *info) 90 if (sizeof dev->data < sizeof *info)
84 return -EDOM; 91 return -EDOM;
@@ -229,6 +236,34 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
229 * side link address we were given. 236 * side link address we were given.
230 */ 237 */
231 break; 238 break;
239 case USB_CDC_MDLM_TYPE:
240 if (desc) {
241 dev_dbg(&intf->dev, "extra MDLM descriptor\n");
242 goto bad_desc;
243 }
244
245 desc = (void *)buf;
246
247 if (desc->bLength != sizeof(*desc))
248 goto bad_desc;
249
250 if (memcmp(&desc->bGUID, mbm_guid, 16))
251 goto bad_desc;
252 break;
253 case USB_CDC_MDLM_DETAIL_TYPE:
254 if (detail) {
255 dev_dbg(&intf->dev, "extra MDLM detail descriptor\n");
256 goto bad_desc;
257 }
258
259 detail = (void *)buf;
260
261 if (detail->bGuidDescriptorType == 0) {
262 if (detail->bLength < (sizeof(*detail) + 1))
263 goto bad_desc;
264 } else
265 goto bad_desc;
266 break;
232 } 267 }
233next_desc: 268next_desc:
234 len -= buf [0]; /* bLength */ 269 len -= buf [0]; /* bLength */
@@ -431,6 +466,7 @@ static const struct driver_info mbm_info = {
431 .bind = cdc_bind, 466 .bind = cdc_bind,
432 .unbind = usbnet_cdc_unbind, 467 .unbind = usbnet_cdc_unbind,
433 .status = cdc_status, 468 .status = cdc_status,
469 .manage_power = cdc_manage_power,
434}; 470};
435 471
436/*-------------------------------------------------------------------------*/ 472/*-------------------------------------------------------------------------*/
@@ -542,80 +578,10 @@ static const struct usb_device_id products [] = {
542 USB_CDC_PROTO_NONE), 578 USB_CDC_PROTO_NONE),
543 .driver_info = (unsigned long) &cdc_info, 579 .driver_info = (unsigned long) &cdc_info,
544}, { 580}, {
545 /* Ericsson F3507g */ 581 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MDLM,
546 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, 582 USB_CDC_PROTO_NONE),
547 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 583 .driver_info = (unsigned long)&mbm_info,
548 .driver_info = (unsigned long) &mbm_info, 584
549}, {
550 /* Ericsson F3507g ver. 2 */
551 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
553 .driver_info = (unsigned long) &mbm_info,
554}, {
555 /* Ericsson F3607gw */
556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
558 .driver_info = (unsigned long) &mbm_info,
559}, {
560 /* Ericsson F3607gw ver 2 */
561 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
563 .driver_info = (unsigned long) &mbm_info,
564}, {
565 /* Ericsson F3607gw ver 3 */
566 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
568 .driver_info = (unsigned long) &mbm_info,
569}, {
570 /* Ericsson F3307 */
571 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
572 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
573 .driver_info = (unsigned long) &mbm_info,
574}, {
575 /* Ericsson F3307 ver 2 */
576 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
577 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
578 .driver_info = (unsigned long) &mbm_info,
579}, {
580 /* Ericsson C3607w */
581 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
582 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
583 .driver_info = (unsigned long) &mbm_info,
584}, {
585 /* Ericsson C3607w ver 2 */
586 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190b, USB_CLASS_COMM,
587 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
588 .driver_info = (unsigned long) &mbm_info,
589}, {
590 /* Toshiba F3507g */
591 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
592 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
593 .driver_info = (unsigned long) &mbm_info,
594}, {
595 /* Toshiba F3607gw */
596 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
597 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
598 .driver_info = (unsigned long) &mbm_info,
599}, {
600 /* Toshiba F3607gw ver 2 */
601 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
602 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
603 .driver_info = (unsigned long) &mbm_info,
604}, {
605 /* Dell F3507g */
606 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
607 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
608 .driver_info = (unsigned long) &mbm_info,
609}, {
610 /* Dell F3607gw */
611 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
612 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
613 .driver_info = (unsigned long) &mbm_info,
614}, {
615 /* Dell F3607gw ver 2 */
616 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
617 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
618 .driver_info = (unsigned long) &mbm_info,
619}, 585},
620 { }, // END 586 { }, // END
621}; 587};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 04b281002a76..02b622e3b9fb 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,10 +93,9 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
93 netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length); 93 netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
94 94
95 if (data) { 95 if (data) {
96 buf = kmalloc(length, GFP_KERNEL); 96 buf = kmemdup(data, length, GFP_KERNEL);
97 if (!buf) 97 if (!buf)
98 goto out; 98 goto out;
99 memcpy(buf, data, length);
100 } 99 }
101 100
102 err = usb_control_msg(dev->udev, 101 err = usb_control_msg(dev->udev,
@@ -240,7 +239,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
240 goto out; 239 goto out;
241 240
242 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg); 241 dm_write_reg(dev, DM_SHARED_ADDR, phy ? (reg | 0x40) : reg);
243 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1c : 0x14); 242 dm_write_reg(dev, DM_SHARED_CTRL, phy ? 0x1a : 0x12);
244 243
245 for (i = 0; i < DM_TIMEOUT; i++) { 244 for (i = 0; i < DM_TIMEOUT; i++) {
246 u8 tmp; 245 u8 tmp;
@@ -387,10 +386,10 @@ static void dm9601_set_multicast(struct net_device *net)
387 netdev_mc_count(net) > DM_MAX_MCAST) { 386 netdev_mc_count(net) > DM_MAX_MCAST) {
388 rx_ctl |= 0x04; 387 rx_ctl |= 0x04;
389 } else if (!netdev_mc_empty(net)) { 388 } else if (!netdev_mc_empty(net)) {
390 struct dev_mc_list *mc_list; 389 struct netdev_hw_addr *ha;
391 390
392 netdev_for_each_mc_addr(mc_list, net) { 391 netdev_for_each_mc_addr(ha, net) {
393 u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 392 u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
394 hashes[crc >> 3] |= 1 << (crc & 0x7); 393 hashes[crc >> 3] |= 1 << (crc & 0x7);
395 } 394 }
396 } 395 }
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index be0cc99e881a..9964df199511 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -834,8 +834,6 @@ static netdev_tx_t hso_net_start_xmit(struct sk_buff *skb,
834 } else { 834 } else {
835 net->stats.tx_packets++; 835 net->stats.tx_packets++;
836 net->stats.tx_bytes += skb->len; 836 net->stats.tx_bytes += skb->len;
837 /* And tell the kernel when the last transmit started. */
838 net->trans_start = jiffies;
839 } 837 }
840 dev_kfree_skb(skb); 838 dev_kfree_skb(skb);
841 /* we're done */ 839 /* we're done */
@@ -1474,7 +1472,6 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
1474 spin_unlock_irqrestore(&serial->serial_lock, flags); 1472 spin_unlock_irqrestore(&serial->serial_lock, flags);
1475 1473
1476 /* done */ 1474 /* done */
1477 return;
1478} 1475}
1479 1476
1480/* how many characters in the buffer */ 1477/* how many characters in the buffer */
@@ -1994,7 +1991,6 @@ static void hso_std_serial_write_bulk_callback(struct urb *urb)
1994 hso_kick_transmit(serial); 1991 hso_kick_transmit(serial);
1995 1992
1996 D1(" "); 1993 D1(" ");
1997 return;
1998} 1994}
1999 1995
2000/* called for writing diag or CS serial port */ 1996/* called for writing diag or CS serial port */
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
new file mode 100644
index 000000000000..197c352c47fb
--- /dev/null
+++ b/drivers/net/usb/ipheth.c
@@ -0,0 +1,565 @@
1/*
2 * ipheth.c - Apple iPhone USB Ethernet driver
3 *
4 * Copyright (c) 2009 Diego Giagio <diego@giagio.com>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of GIAGIO.COM nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 *
41 * Attention: iPhone device must be paired, otherwise it won't respond to our
42 * driver. For more info: http://giagio.com/wiki/moin.cgi/iPhoneEthernetDriver
43 *
44 */
45
46#include <linux/kernel.h>
47#include <linux/errno.h>
48#include <linux/init.h>
49#include <linux/slab.h>
50#include <linux/module.h>
51#include <linux/netdevice.h>
52#include <linux/etherdevice.h>
53#include <linux/ethtool.h>
54#include <linux/usb.h>
55#include <linux/workqueue.h>
56
57#define USB_VENDOR_APPLE 0x05ac
58#define USB_PRODUCT_IPHONE 0x1290
59#define USB_PRODUCT_IPHONE_3G 0x1292
60#define USB_PRODUCT_IPHONE_3GS 0x1294
61
62#define IPHETH_USBINTF_CLASS 255
63#define IPHETH_USBINTF_SUBCLASS 253
64#define IPHETH_USBINTF_PROTO 1
65
66#define IPHETH_BUF_SIZE 1516
67#define IPHETH_TX_TIMEOUT (5 * HZ)
68
69#define IPHETH_INTFNUM 2
70#define IPHETH_ALT_INTFNUM 1
71
72#define IPHETH_CTRL_ENDP 0x00
73#define IPHETH_CTRL_BUF_SIZE 0x40
74#define IPHETH_CTRL_TIMEOUT (5 * HZ)
75
76#define IPHETH_CMD_GET_MACADDR 0x00
77#define IPHETH_CMD_CARRIER_CHECK 0x45
78
79#define IPHETH_CARRIER_CHECK_TIMEOUT round_jiffies_relative(1 * HZ)
80#define IPHETH_CARRIER_ON 0x04
81
82static struct usb_device_id ipheth_table[] = {
83 { USB_DEVICE_AND_INTERFACE_INFO(
84 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE,
85 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
86 IPHETH_USBINTF_PROTO) },
87 { USB_DEVICE_AND_INTERFACE_INFO(
88 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3G,
89 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
90 IPHETH_USBINTF_PROTO) },
91 { USB_DEVICE_AND_INTERFACE_INFO(
92 USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_3GS,
93 IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
94 IPHETH_USBINTF_PROTO) },
95 { }
96};
97MODULE_DEVICE_TABLE(usb, ipheth_table);
98
99struct ipheth_device {
100 struct usb_device *udev;
101 struct usb_interface *intf;
102 struct net_device *net;
103 struct sk_buff *tx_skb;
104 struct urb *tx_urb;
105 struct urb *rx_urb;
106 unsigned char *tx_buf;
107 unsigned char *rx_buf;
108 unsigned char *ctrl_buf;
109 u8 bulk_in;
110 u8 bulk_out;
111 struct delayed_work carrier_work;
112};
113
114static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags);
115
116static int ipheth_alloc_urbs(struct ipheth_device *iphone)
117{
118 struct urb *tx_urb = NULL;
119 struct urb *rx_urb = NULL;
120 u8 *tx_buf = NULL;
121 u8 *rx_buf = NULL;
122
123 tx_urb = usb_alloc_urb(0, GFP_KERNEL);
124 if (tx_urb == NULL)
125 goto error_nomem;
126
127 rx_urb = usb_alloc_urb(0, GFP_KERNEL);
128 if (rx_urb == NULL)
129 goto free_tx_urb;
130
131 tx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
132 GFP_KERNEL, &tx_urb->transfer_dma);
133 if (tx_buf == NULL)
134 goto free_rx_urb;
135
136 rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
137 GFP_KERNEL, &rx_urb->transfer_dma);
138 if (rx_buf == NULL)
139 goto free_tx_buf;
140
141
142 iphone->tx_urb = tx_urb;
143 iphone->rx_urb = rx_urb;
144 iphone->tx_buf = tx_buf;
145 iphone->rx_buf = rx_buf;
146 return 0;
147
148free_tx_buf:
149 usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, tx_buf,
150 tx_urb->transfer_dma);
151free_rx_urb:
152 usb_free_urb(rx_urb);
153free_tx_urb:
154 usb_free_urb(tx_urb);
155error_nomem:
156 return -ENOMEM;
157}
158
159static void ipheth_free_urbs(struct ipheth_device *iphone)
160{
161 usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
162 iphone->rx_urb->transfer_dma);
163 usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
164 iphone->tx_urb->transfer_dma);
165 usb_free_urb(iphone->rx_urb);
166 usb_free_urb(iphone->tx_urb);
167}
168
169static void ipheth_kill_urbs(struct ipheth_device *dev)
170{
171 usb_kill_urb(dev->tx_urb);
172 usb_kill_urb(dev->rx_urb);
173}
174
175static void ipheth_rcvbulk_callback(struct urb *urb)
176{
177 struct ipheth_device *dev;
178 struct sk_buff *skb;
179 int status;
180 char *buf;
181 int len;
182
183 dev = urb->context;
184 if (dev == NULL)
185 return;
186
187 status = urb->status;
188 switch (status) {
189 case -ENOENT:
190 case -ECONNRESET:
191 case -ESHUTDOWN:
192 return;
193 case 0:
194 break;
195 default:
196 err("%s: urb status: %d", __func__, urb->status);
197 return;
198 }
199
200 len = urb->actual_length;
201 buf = urb->transfer_buffer;
202
203 skb = dev_alloc_skb(NET_IP_ALIGN + len);
204 if (!skb) {
205 err("%s: dev_alloc_skb: -ENOMEM", __func__);
206 dev->net->stats.rx_dropped++;
207 return;
208 }
209
210 skb_reserve(skb, NET_IP_ALIGN);
211 memcpy(skb_put(skb, len), buf + NET_IP_ALIGN, len - NET_IP_ALIGN);
212 skb->dev = dev->net;
213 skb->protocol = eth_type_trans(skb, dev->net);
214
215 dev->net->stats.rx_packets++;
216 dev->net->stats.rx_bytes += len;
217
218 netif_rx(skb);
219 ipheth_rx_submit(dev, GFP_ATOMIC);
220}
221
222static void ipheth_sndbulk_callback(struct urb *urb)
223{
224 struct ipheth_device *dev;
225
226 dev = urb->context;
227 if (dev == NULL)
228 return;
229
230 if (urb->status != 0 &&
231 urb->status != -ENOENT &&
232 urb->status != -ECONNRESET &&
233 urb->status != -ESHUTDOWN)
234 err("%s: urb status: %d", __func__, urb->status);
235
236 dev_kfree_skb_irq(dev->tx_skb);
237 netif_wake_queue(dev->net);
238}
239
240static int ipheth_carrier_set(struct ipheth_device *dev)
241{
242 struct usb_device *udev = dev->udev;
243 int retval;
244
245 retval = usb_control_msg(udev,
246 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
247 IPHETH_CMD_CARRIER_CHECK, /* request */
248 0xc0, /* request type */
249 0x00, /* value */
250 0x02, /* index */
251 dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
252 IPHETH_CTRL_TIMEOUT);
253 if (retval < 0) {
254 err("%s: usb_control_msg: %d", __func__, retval);
255 return retval;
256 }
257
258 if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON)
259 netif_carrier_on(dev->net);
260 else
261 netif_carrier_off(dev->net);
262
263 return 0;
264}
265
266static void ipheth_carrier_check_work(struct work_struct *work)
267{
268 struct ipheth_device *dev = container_of(work, struct ipheth_device,
269 carrier_work.work);
270
271 ipheth_carrier_set(dev);
272 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
273}
274
275static int ipheth_get_macaddr(struct ipheth_device *dev)
276{
277 struct usb_device *udev = dev->udev;
278 struct net_device *net = dev->net;
279 int retval;
280
281 retval = usb_control_msg(udev,
282 usb_rcvctrlpipe(udev, IPHETH_CTRL_ENDP),
283 IPHETH_CMD_GET_MACADDR, /* request */
284 0xc0, /* request type */
285 0x00, /* value */
286 0x02, /* index */
287 dev->ctrl_buf,
288 IPHETH_CTRL_BUF_SIZE,
289 IPHETH_CTRL_TIMEOUT);
290 if (retval < 0) {
291 err("%s: usb_control_msg: %d", __func__, retval);
292 } else if (retval < ETH_ALEN) {
293 err("%s: usb_control_msg: short packet: %d bytes",
294 __func__, retval);
295 retval = -EINVAL;
296 } else {
297 memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
298 retval = 0;
299 }
300
301 return retval;
302}
303
304static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
305{
306 struct usb_device *udev = dev->udev;
307 int retval;
308
309 usb_fill_bulk_urb(dev->rx_urb, udev,
310 usb_rcvbulkpipe(udev, dev->bulk_in),
311 dev->rx_buf, IPHETH_BUF_SIZE,
312 ipheth_rcvbulk_callback,
313 dev);
314 dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
315
316 retval = usb_submit_urb(dev->rx_urb, mem_flags);
317 if (retval)
318 err("%s: usb_submit_urb: %d", __func__, retval);
319 return retval;
320}
321
322static int ipheth_open(struct net_device *net)
323{
324 struct ipheth_device *dev = netdev_priv(net);
325 struct usb_device *udev = dev->udev;
326 int retval = 0;
327
328 usb_set_interface(udev, IPHETH_INTFNUM, IPHETH_ALT_INTFNUM);
329
330 retval = ipheth_carrier_set(dev);
331 if (retval)
332 return retval;
333
334 retval = ipheth_rx_submit(dev, GFP_KERNEL);
335 if (retval)
336 return retval;
337
338 schedule_delayed_work(&dev->carrier_work, IPHETH_CARRIER_CHECK_TIMEOUT);
339 netif_start_queue(net);
340 return retval;
341}
342
343static int ipheth_close(struct net_device *net)
344{
345 struct ipheth_device *dev = netdev_priv(net);
346
347 cancel_delayed_work_sync(&dev->carrier_work);
348 netif_stop_queue(net);
349 return 0;
350}
351
352static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
353{
354 struct ipheth_device *dev = netdev_priv(net);
355 struct usb_device *udev = dev->udev;
356 int retval;
357
358 /* Paranoid */
359 if (skb->len > IPHETH_BUF_SIZE) {
360 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
361 dev->net->stats.tx_dropped++;
362 dev_kfree_skb_irq(skb);
363 return NETDEV_TX_OK;
364 }
365
366 memcpy(dev->tx_buf, skb->data, skb->len);
367 if (skb->len < IPHETH_BUF_SIZE)
368 memset(dev->tx_buf + skb->len, 0, IPHETH_BUF_SIZE - skb->len);
369
370 usb_fill_bulk_urb(dev->tx_urb, udev,
371 usb_sndbulkpipe(udev, dev->bulk_out),
372 dev->tx_buf, IPHETH_BUF_SIZE,
373 ipheth_sndbulk_callback,
374 dev);
375 dev->tx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
376
377 retval = usb_submit_urb(dev->tx_urb, GFP_ATOMIC);
378 if (retval) {
379 err("%s: usb_submit_urb: %d", __func__, retval);
380 dev->net->stats.tx_errors++;
381 dev_kfree_skb_irq(skb);
382 } else {
383 dev->tx_skb = skb;
384
385 dev->net->stats.tx_packets++;
386 dev->net->stats.tx_bytes += skb->len;
387 netif_stop_queue(net);
388 }
389
390 return NETDEV_TX_OK;
391}
392
393static void ipheth_tx_timeout(struct net_device *net)
394{
395 struct ipheth_device *dev = netdev_priv(net);
396
397 err("%s: TX timeout", __func__);
398 dev->net->stats.tx_errors++;
399 usb_unlink_urb(dev->tx_urb);
400}
401
402static struct net_device_stats *ipheth_stats(struct net_device *net)
403{
404 struct ipheth_device *dev = netdev_priv(net);
405 return &dev->net->stats;
406}
407
408static u32 ipheth_ethtool_op_get_link(struct net_device *net)
409{
410 struct ipheth_device *dev = netdev_priv(net);
411 return netif_carrier_ok(dev->net);
412}
413
414static struct ethtool_ops ops = {
415 .get_link = ipheth_ethtool_op_get_link
416};
417
418static const struct net_device_ops ipheth_netdev_ops = {
419 .ndo_open = &ipheth_open,
420 .ndo_stop = &ipheth_close,
421 .ndo_start_xmit = &ipheth_tx,
422 .ndo_tx_timeout = &ipheth_tx_timeout,
423 .ndo_get_stats = &ipheth_stats,
424};
425
426static struct device_type ipheth_type = {
427 .name = "wwan",
428};
429
430static int ipheth_probe(struct usb_interface *intf,
431 const struct usb_device_id *id)
432{
433 struct usb_device *udev = interface_to_usbdev(intf);
434 struct usb_host_interface *hintf;
435 struct usb_endpoint_descriptor *endp;
436 struct ipheth_device *dev;
437 struct net_device *netdev;
438 int i;
439 int retval;
440
441 netdev = alloc_etherdev(sizeof(struct ipheth_device));
442 if (!netdev)
443 return -ENOMEM;
444
445 netdev->netdev_ops = &ipheth_netdev_ops;
446 netdev->watchdog_timeo = IPHETH_TX_TIMEOUT;
447 strcpy(netdev->name, "wwan%d");
448
449 dev = netdev_priv(netdev);
450 dev->udev = udev;
451 dev->net = netdev;
452 dev->intf = intf;
453
454 /* Set up endpoints */
455 hintf = usb_altnum_to_altsetting(intf, IPHETH_ALT_INTFNUM);
456 if (hintf == NULL) {
457 retval = -ENODEV;
458 err("Unable to find alternate settings interface");
459 goto err_endpoints;
460 }
461
462 for (i = 0; i < hintf->desc.bNumEndpoints; i++) {
463 endp = &hintf->endpoint[i].desc;
464 if (usb_endpoint_is_bulk_in(endp))
465 dev->bulk_in = endp->bEndpointAddress;
466 else if (usb_endpoint_is_bulk_out(endp))
467 dev->bulk_out = endp->bEndpointAddress;
468 }
469 if (!(dev->bulk_in && dev->bulk_out)) {
470 retval = -ENODEV;
471 err("Unable to find endpoints");
472 goto err_endpoints;
473 }
474
475 dev->ctrl_buf = kmalloc(IPHETH_CTRL_BUF_SIZE, GFP_KERNEL);
476 if (dev->ctrl_buf == NULL) {
477 retval = -ENOMEM;
478 goto err_alloc_ctrl_buf;
479 }
480
481 retval = ipheth_get_macaddr(dev);
482 if (retval)
483 goto err_get_macaddr;
484
485 INIT_DELAYED_WORK(&dev->carrier_work, ipheth_carrier_check_work);
486
487 retval = ipheth_alloc_urbs(dev);
488 if (retval) {
489 err("error allocating urbs: %d", retval);
490 goto err_alloc_urbs;
491 }
492
493 usb_set_intfdata(intf, dev);
494
495 SET_NETDEV_DEV(netdev, &intf->dev);
496 SET_ETHTOOL_OPS(netdev, &ops);
497 SET_NETDEV_DEVTYPE(netdev, &ipheth_type);
498
499 retval = register_netdev(netdev);
500 if (retval) {
501 err("error registering netdev: %d", retval);
502 retval = -EIO;
503 goto err_register_netdev;
504 }
505
506 dev_info(&intf->dev, "Apple iPhone USB Ethernet device attached\n");
507 return 0;
508
509err_register_netdev:
510 ipheth_free_urbs(dev);
511err_alloc_urbs:
512err_get_macaddr:
513err_alloc_ctrl_buf:
514 kfree(dev->ctrl_buf);
515err_endpoints:
516 free_netdev(netdev);
517 return retval;
518}
519
520static void ipheth_disconnect(struct usb_interface *intf)
521{
522 struct ipheth_device *dev;
523
524 dev = usb_get_intfdata(intf);
525 if (dev != NULL) {
526 unregister_netdev(dev->net);
527 ipheth_kill_urbs(dev);
528 ipheth_free_urbs(dev);
529 kfree(dev->ctrl_buf);
530 free_netdev(dev->net);
531 }
532 usb_set_intfdata(intf, NULL);
533 dev_info(&intf->dev, "Apple iPhone USB Ethernet now disconnected\n");
534}
535
536static struct usb_driver ipheth_driver = {
537 .name = "ipheth",
538 .probe = ipheth_probe,
539 .disconnect = ipheth_disconnect,
540 .id_table = ipheth_table,
541};
542
543static int __init ipheth_init(void)
544{
545 int retval;
546
547 retval = usb_register(&ipheth_driver);
548 if (retval) {
549 err("usb_register failed: %d", retval);
550 return retval;
551 }
552 return 0;
553}
554
555static void __exit ipheth_exit(void)
556{
557 usb_deregister(&ipheth_driver);
558}
559
560module_init(ipheth_init);
561module_exit(ipheth_exit);
562
563MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
564MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
565MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 52671ea043a7..d6078b8c4273 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -145,6 +145,7 @@ static struct usb_device_id usb_klsi_table[] = {
145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */ 145 { USB_DEVICE(0x0707, 0x0100) }, /* SMC 2202USB */
146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */ 146 { USB_DEVICE(0x07aa, 0x0001) }, /* Correga K.K. */
147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */ 147 { USB_DEVICE(0x07b8, 0x4000) }, /* D-Link DU-E10 */
148 { USB_DEVICE(0x07c9, 0xb010) }, /* Allied Telesyn AT-USB10 USB Ethernet Adapter */
148 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */ 149 { USB_DEVICE(0x0846, 0x1001) }, /* NetGear EA-101 */
149 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */ 150 { USB_DEVICE(0x0846, 0x1002) }, /* NetGear EA-101 */
150 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */ 151 { USB_DEVICE(0x085a, 0x0008) }, /* PortGear Ethernet Adapter */
@@ -855,7 +856,6 @@ skip:
855 { 856 {
856 kaweth->stats.tx_packets++; 857 kaweth->stats.tx_packets++;
857 kaweth->stats.tx_bytes += skb->len; 858 kaweth->stats.tx_bytes += skb->len;
858 net->trans_start = jiffies;
859 } 859 }
860 860
861 spin_unlock_irq(&kaweth->device_lock); 861 spin_unlock_irq(&kaweth->device_lock);
@@ -1155,13 +1155,13 @@ err_fw:
1155 if (!kaweth->irq_urb) 1155 if (!kaweth->irq_urb)
1156 goto err_tx_and_rx; 1156 goto err_tx_and_rx;
1157 1157
1158 kaweth->intbuffer = usb_buffer_alloc( kaweth->dev, 1158 kaweth->intbuffer = usb_alloc_coherent( kaweth->dev,
1159 INTBUFFERSIZE, 1159 INTBUFFERSIZE,
1160 GFP_KERNEL, 1160 GFP_KERNEL,
1161 &kaweth->intbufferhandle); 1161 &kaweth->intbufferhandle);
1162 if (!kaweth->intbuffer) 1162 if (!kaweth->intbuffer)
1163 goto err_tx_and_rx_and_irq; 1163 goto err_tx_and_rx_and_irq;
1164 kaweth->rx_buf = usb_buffer_alloc( kaweth->dev, 1164 kaweth->rx_buf = usb_alloc_coherent( kaweth->dev,
1165 KAWETH_BUF_SIZE, 1165 KAWETH_BUF_SIZE,
1166 GFP_KERNEL, 1166 GFP_KERNEL,
1167 &kaweth->rxbufferhandle); 1167 &kaweth->rxbufferhandle);
@@ -1202,9 +1202,9 @@ err_fw:
1202 1202
1203err_intfdata: 1203err_intfdata:
1204 usb_set_intfdata(intf, NULL); 1204 usb_set_intfdata(intf, NULL);
1205 usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); 1205 usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
1206err_all_but_rxbuf: 1206err_all_but_rxbuf:
1207 usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); 1207 usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
1208err_tx_and_rx_and_irq: 1208err_tx_and_rx_and_irq:
1209 usb_free_urb(kaweth->irq_urb); 1209 usb_free_urb(kaweth->irq_urb);
1210err_tx_and_rx: 1210err_tx_and_rx:
@@ -1241,8 +1241,8 @@ static void kaweth_disconnect(struct usb_interface *intf)
1241 usb_free_urb(kaweth->tx_urb); 1241 usb_free_urb(kaweth->tx_urb);
1242 usb_free_urb(kaweth->irq_urb); 1242 usb_free_urb(kaweth->irq_urb);
1243 1243
1244 usb_buffer_free(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle); 1244 usb_free_coherent(kaweth->dev, KAWETH_BUF_SIZE, (void *)kaweth->rx_buf, kaweth->rxbufferhandle);
1245 usb_buffer_free(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle); 1245 usb_free_coherent(kaweth->dev, INTBUFFERSIZE, (void *)kaweth->intbuffer, kaweth->intbufferhandle);
1246 1246
1247 free_netdev(netdev); 1247 free_netdev(netdev);
1248} 1248}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f871e1..a6281e3987b5 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -142,12 +142,10 @@ static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *
142 int ret; 142 int ret;
143 void *buffer; 143 void *buffer;
144 144
145 buffer = kmalloc(size, GFP_NOIO); 145 buffer = kmemdup(data, size, GFP_NOIO);
146 if (buffer == NULL) 146 if (buffer == NULL)
147 return -ENOMEM; 147 return -ENOMEM;
148 148
149 memcpy(buffer, data, size);
150
151 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ, 149 ret = usb_control_msg(xdev, usb_sndctrlpipe(xdev, 0), MCS7830_WR_BREQ,
152 MCS7830_WR_BMREQ, 0x0000, index, buffer, 150 MCS7830_WR_BMREQ, 0x0000, index, buffer,
153 size, MCS7830_CTRL_TIMEOUT); 151 size, MCS7830_CTRL_TIMEOUT);
@@ -453,12 +451,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
453 * for our 8 byte filter buffer 451 * for our 8 byte filter buffer
454 * to avoid allocating memory that 452 * to avoid allocating memory that
455 * is tricky to free later */ 453 * is tricky to free later */
456 struct dev_mc_list *mc_list; 454 struct netdev_hw_addr *ha;
457 u32 crc_bits; 455 u32 crc_bits;
458 456
459 /* Build the multicast hash filter. */ 457 /* Build the multicast hash filter. */
460 netdev_for_each_mc_addr(mc_list, net) { 458 netdev_for_each_mc_addr(ha, net) {
461 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 459 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
462 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); 460 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
463 } 461 }
464 } 462 }
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 41838773b568..974d17f0263e 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -203,13 +203,12 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
203 char *buffer; 203 char *buffer;
204 DECLARE_WAITQUEUE(wait, current); 204 DECLARE_WAITQUEUE(wait, current);
205 205
206 buffer = kmalloc(size, GFP_KERNEL); 206 buffer = kmemdup(data, size, GFP_KERNEL);
207 if (!buffer) { 207 if (!buffer) {
208 netif_warn(pegasus, drv, pegasus->net, 208 netif_warn(pegasus, drv, pegasus->net,
209 "out of memory in %s\n", __func__); 209 "out of memory in %s\n", __func__);
210 return -ENOMEM; 210 return -ENOMEM;
211 } 211 }
212 memcpy(buffer, data, size);
213 212
214 add_wait_queue(&pegasus->ctrl_wait, &wait); 213 add_wait_queue(&pegasus->ctrl_wait, &wait);
215 set_current_state(TASK_UNINTERRUPTIBLE); 214 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -255,13 +254,12 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
255 char *tmp; 254 char *tmp;
256 DECLARE_WAITQUEUE(wait, current); 255 DECLARE_WAITQUEUE(wait, current);
257 256
258 tmp = kmalloc(1, GFP_KERNEL); 257 tmp = kmemdup(&data, 1, GFP_KERNEL);
259 if (!tmp) { 258 if (!tmp) {
260 netif_warn(pegasus, drv, pegasus->net, 259 netif_warn(pegasus, drv, pegasus->net,
261 "out of memory in %s\n", __func__); 260 "out of memory in %s\n", __func__);
262 return -ENOMEM; 261 return -ENOMEM;
263 } 262 }
264 memcpy(tmp, &data, 1);
265 add_wait_queue(&pegasus->ctrl_wait, &wait); 263 add_wait_queue(&pegasus->ctrl_wait, &wait);
266 set_current_state(TASK_UNINTERRUPTIBLE); 264 set_current_state(TASK_UNINTERRUPTIBLE);
267 while (pegasus->flags & ETH_REGS_CHANGED) 265 while (pegasus->flags & ETH_REGS_CHANGED)
@@ -808,7 +806,7 @@ static void write_bulk_callback(struct urb *urb)
808 break; 806 break;
809 } 807 }
810 808
811 net->trans_start = jiffies; 809 net->trans_start = jiffies; /* prevent tx timeout */
812 netif_wake_queue(net); 810 netif_wake_queue(net);
813} 811}
814 812
@@ -909,7 +907,6 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
909 } else { 907 } else {
910 pegasus->stats.tx_packets++; 908 pegasus->stats.tx_packets++;
911 pegasus->stats.tx_bytes += skb->len; 909 pegasus->stats.tx_bytes += skb->len;
912 net->trans_start = jiffies;
913 } 910 }
914 dev_kfree_skb(skb); 911 dev_kfree_skb(skb);
915 912
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index b90d8766ab74..29f5211e645b 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -256,7 +256,7 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
256 DEFAULT_GPIO_RESET ) 256 DEFAULT_GPIO_RESET )
257PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, 257PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
258 DEFAULT_GPIO_RESET | PEGASUS_II ) 258 DEFAULT_GPIO_RESET | PEGASUS_II )
259PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a, 259PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x093a,
260 DEFAULT_GPIO_RESET | PEGASUS_II ) 260 DEFAULT_GPIO_RESET | PEGASUS_II )
261PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, 261PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
262 DEFAULT_GPIO_RESET) 262 DEFAULT_GPIO_RESET)
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index dd8a4adf48ca..28d3ee175e7b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,8 +104,10 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) 104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
105{ 105{
106 struct cdc_state *info = (void *) &dev->data; 106 struct cdc_state *info = (void *) &dev->data;
107 struct usb_cdc_notification notification;
107 int master_ifnum; 108 int master_ifnum;
108 int retval; 109 int retval;
110 int partial;
109 unsigned count; 111 unsigned count;
110 __le32 rsp; 112 __le32 rsp;
111 u32 xid = 0, msg_len, request_id; 113 u32 xid = 0, msg_len, request_id;
@@ -133,13 +135,17 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
133 if (unlikely(retval < 0 || xid == 0)) 135 if (unlikely(retval < 0 || xid == 0))
134 return retval; 136 return retval;
135 137
136 // FIXME Seems like some devices discard responses when 138 /* Some devices don't respond on the control channel until
137 // we time out and cancel our "get response" requests... 139 * polled on the status channel, so do that first. */
138 // so, this is fragile. Probably need to poll for status. 140 retval = usb_interrupt_msg(
141 dev->udev,
142 usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
143 &notification, sizeof(notification), &partial,
144 RNDIS_CONTROL_TIMEOUT_MS);
145 if (unlikely(retval < 0))
146 return retval;
139 147
140 /* ignore status endpoint, just poll the control channel; 148 /* Poll the control channel; the request probably completed immediately */
141 * the request probably completed immediately
142 */
143 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 149 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
144 for (count = 0; count < 10; count++) { 150 for (count = 0; count < 10; count++) {
145 memset(buf, 0, CONTROL_BUFFER_SIZE); 151 memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
new file mode 100644
index 000000000000..f1942d69a0d5
--- /dev/null
+++ b/drivers/net/usb/sierra_net.c
@@ -0,0 +1,1004 @@
1/*
2 * USB-to-WWAN Driver for Sierra Wireless modems
3 *
4 * Copyright (C) 2008, 2009, 2010 Paxton Smith, Matthew Safar, Rory Filer
5 * <linux@sierrawireless.com>
6 *
7 * Portions of this based on the cdc_ether driver by David Brownell (2003-2005)
8 * and Ole Andre Vadla Ravnas (ActiveSync) (2006).
9 *
10 * IMPORTANT DISCLAIMER: This driver is not commercially supported by
11 * Sierra Wireless. Use at your own risk.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#define DRIVER_VERSION "v.2.0"
29#define DRIVER_AUTHOR "Paxton Smith, Matthew Safar, Rory Filer"
30#define DRIVER_DESC "USB-to-WWAN Driver for Sierra Wireless modems"
31static const char driver_name[] = "sierra_net";
32
33/* if defined debug messages enabled */
34/*#define DEBUG*/
35
36#include <linux/module.h>
37#include <linux/etherdevice.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
40#include <linux/sched.h>
41#include <linux/timer.h>
42#include <linux/usb.h>
43#include <linux/usb/cdc.h>
44#include <net/ip.h>
45#include <net/udp.h>
46#include <asm/unaligned.h>
47#include <linux/usb/usbnet.h>
48
49#define SWI_USB_REQUEST_GET_FW_ATTR 0x06
50#define SWI_GET_FW_ATTR_MASK 0x08
51
52/* atomic counter partially included in MAC address to make sure 2 devices
53 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
54 */
55static atomic_t iface_counter = ATOMIC_INIT(0);
56
57/*
58 * SYNC Timer Delay definition used to set the expiry time
59 */
60#define SIERRA_NET_SYNCDELAY (2*HZ)
61
62/* Max. MTU supported. The modem buffers are limited to 1500 */
63#define SIERRA_NET_MAX_SUPPORTED_MTU 1500
64
65/* The SIERRA_NET_USBCTL_BUF_LEN defines a buffer size allocated for control
66 * message reception ... and thus the max. received packet.
67 * (May be the cause for parse_hip returning -EINVAL)
68 */
69#define SIERRA_NET_USBCTL_BUF_LEN 1024
70
71/* list of interface numbers - used for constructing interface lists */
72struct sierra_net_iface_info {
73 const u32 infolen; /* number of interface numbers on list */
74 const u8 *ifaceinfo; /* pointer to the array holding the numbers */
75};
76
77struct sierra_net_info_data {
78 u16 rx_urb_size;
79 struct sierra_net_iface_info whitelist;
80};
81
82/* Private data structure */
83struct sierra_net_data {
84
85 u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
86
87 u16 link_up; /* air link up or down */
88 u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
89
90 u8 sync_msg[4]; /* SYNC message */
91 u8 shdwn_msg[4]; /* Shutdown message */
92
93 /* Backpointer to the container */
94 struct usbnet *usbnet;
95
96 u8 ifnum; /* interface number */
97
98/* Bit masks, must be a power of 2 */
99#define SIERRA_NET_EVENT_RESP_AVAIL 0x01
100#define SIERRA_NET_TIMER_EXPIRY 0x02
101 unsigned long kevent_flags;
102 struct work_struct sierra_net_kevent;
103 struct timer_list sync_timer; /* For retrying SYNC sequence */
104};
105
106struct param {
107 int is_present;
108 union {
109 void *ptr;
110 u32 dword;
111 u16 word;
112 u8 byte;
113 };
114};
115
116/* HIP message type */
117#define SIERRA_NET_HIP_EXTENDEDID 0x7F
118#define SIERRA_NET_HIP_HSYNC_ID 0x60 /* Modem -> host */
119#define SIERRA_NET_HIP_RESTART_ID 0x62 /* Modem -> host */
120#define SIERRA_NET_HIP_MSYNC_ID 0x20 /* Host -> modem */
121#define SIERRA_NET_HIP_SHUTD_ID 0x26 /* Host -> modem */
122
123#define SIERRA_NET_HIP_EXT_IP_IN_ID 0x0202
124#define SIERRA_NET_HIP_EXT_IP_OUT_ID 0x0002
125
126/* 3G UMTS Link Sense Indication definitions */
127#define SIERRA_NET_HIP_LSI_UMTSID 0x78
128
129/* Reverse Channel Grant Indication HIP message */
130#define SIERRA_NET_HIP_RCGI 0x64
131
132/* LSI Protocol types */
133#define SIERRA_NET_PROTOCOL_UMTS 0x01
134/* LSI Coverage */
135#define SIERRA_NET_COVERAGE_NONE 0x00
136#define SIERRA_NET_COVERAGE_NOPACKET 0x01
137
138/* LSI Session */
139#define SIERRA_NET_SESSION_IDLE 0x00
140/* LSI Link types */
141#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
142
143struct lsi_umts {
144 u8 protocol;
145 u8 unused1;
146 __be16 length;
147 /* eventually use a union for the rest - assume umts for now */
148 u8 coverage;
149 u8 unused2[41];
150 u8 session_state;
151 u8 unused3[33];
152 u8 link_type;
153 u8 pdp_addr_len; /* NW-supplied PDP address len */
154 u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
155 u8 unused4[23];
156 u8 dns1_addr_len; /* NW-supplied 1st DNS address len (bigendian) */
157 u8 dns1_addr[16]; /* NW-supplied 1st DNS address */
158 u8 dns2_addr_len; /* NW-supplied 2nd DNS address len */
159 u8 dns2_addr[16]; /* NW-supplied 2nd DNS address (bigendian)*/
160 u8 wins1_addr_len; /* NW-supplied 1st Wins address len */
161 u8 wins1_addr[16]; /* NW-supplied 1st Wins address (bigendian)*/
162 u8 wins2_addr_len; /* NW-supplied 2nd Wins address len */
163 u8 wins2_addr[16]; /* NW-supplied 2nd Wins address (bigendian) */
164 u8 unused5[4];
165 u8 gw_addr_len; /* NW-supplied GW address len */
166 u8 gw_addr[16]; /* NW-supplied GW address (bigendian) */
167 u8 reserved[8];
168} __attribute__ ((packed));
169
170#define SIERRA_NET_LSI_COMMON_LEN 4
171#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
172#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
173 (SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
174
175/* Forward definitions */
176static void sierra_sync_timer(unsigned long syncdata);
177static int sierra_net_change_mtu(struct net_device *net, int new_mtu);
178
179/* Our own net device operations structure */
180static const struct net_device_ops sierra_net_device_ops = {
181 .ndo_open = usbnet_open,
182 .ndo_stop = usbnet_stop,
183 .ndo_start_xmit = usbnet_start_xmit,
184 .ndo_tx_timeout = usbnet_tx_timeout,
185 .ndo_change_mtu = sierra_net_change_mtu,
186 .ndo_set_mac_address = eth_mac_addr,
187 .ndo_validate_addr = eth_validate_addr,
188};
189
190/* get private data associated with passed in usbnet device */
191static inline struct sierra_net_data *sierra_net_get_private(struct usbnet *dev)
192{
193 return (struct sierra_net_data *)dev->data[0];
194}
195
196/* set private data associated with passed in usbnet device */
197static inline void sierra_net_set_private(struct usbnet *dev,
198 struct sierra_net_data *priv)
199{
200 dev->data[0] = (unsigned long)priv;
201}
202
203/* is packet IPv4 */
204static inline int is_ip(struct sk_buff *skb)
205{
206 return (skb->protocol == cpu_to_be16(ETH_P_IP));
207}
208
209/*
210 * check passed in packet and make sure that:
211 * - it is linear (no scatter/gather)
212 * - it is ethernet (mac_header properly set)
213 */
214static int check_ethip_packet(struct sk_buff *skb, struct usbnet *dev)
215{
216 skb_reset_mac_header(skb); /* ethernet header */
217
218 if (skb_is_nonlinear(skb)) {
219 netdev_err(dev->net, "Non linear buffer-dropping\n");
220 return 0;
221 }
222
223 if (!pskb_may_pull(skb, ETH_HLEN))
224 return 0;
225 skb->protocol = eth_hdr(skb)->h_proto;
226
227 return 1;
228}
229
230static const u8 *save16bit(struct param *p, const u8 *datap)
231{
232 p->is_present = 1;
233 p->word = get_unaligned_be16(datap);
234 return datap + sizeof(p->word);
235}
236
237static const u8 *save8bit(struct param *p, const u8 *datap)
238{
239 p->is_present = 1;
240 p->byte = *datap;
241 return datap + sizeof(p->byte);
242}
243
244/*----------------------------------------------------------------------------*
245 * BEGIN HIP *
246 *----------------------------------------------------------------------------*/
247/* HIP header */
248#define SIERRA_NET_HIP_HDR_LEN 4
249/* Extended HIP header */
250#define SIERRA_NET_HIP_EXT_HDR_LEN 6
251
252struct hip_hdr {
253 int hdrlen;
254 struct param payload_len;
255 struct param msgid;
256 struct param msgspecific;
257 struct param extmsgid;
258};
259
260static int parse_hip(const u8 *buf, const u32 buflen, struct hip_hdr *hh)
261{
262 const u8 *curp = buf;
263 int padded;
264
265 if (buflen < SIERRA_NET_HIP_HDR_LEN)
266 return -EPROTO;
267
268 curp = save16bit(&hh->payload_len, curp);
269 curp = save8bit(&hh->msgid, curp);
270 curp = save8bit(&hh->msgspecific, curp);
271
272 padded = hh->msgid.byte & 0x80;
273 hh->msgid.byte &= 0x7F; /* 7 bits */
274
275 hh->extmsgid.is_present = (hh->msgid.byte == SIERRA_NET_HIP_EXTENDEDID);
276 if (hh->extmsgid.is_present) {
277 if (buflen < SIERRA_NET_HIP_EXT_HDR_LEN)
278 return -EPROTO;
279
280 hh->payload_len.word &= 0x3FFF; /* 14 bits */
281
282 curp = save16bit(&hh->extmsgid, curp);
283 hh->extmsgid.word &= 0x03FF; /* 10 bits */
284
285 hh->hdrlen = SIERRA_NET_HIP_EXT_HDR_LEN;
286 } else {
287 hh->payload_len.word &= 0x07FF; /* 11 bits */
288 hh->hdrlen = SIERRA_NET_HIP_HDR_LEN;
289 }
290
291 if (padded) {
292 hh->hdrlen++;
293 hh->payload_len.word--;
294 }
295
296 /* if real packet shorter than the claimed length */
297 if (buflen < (hh->hdrlen + hh->payload_len.word))
298 return -EINVAL;
299
300 return 0;
301}
302
303static void build_hip(u8 *buf, const u16 payloadlen,
304 struct sierra_net_data *priv)
305{
306 /* the following doesn't have the full functionality. We
307 * currently build only one kind of header, so it is faster this way
308 */
309 put_unaligned_be16(payloadlen, buf);
310 memcpy(buf+2, priv->tx_hdr_template, sizeof(priv->tx_hdr_template));
311}
312/*----------------------------------------------------------------------------*
313 * END HIP *
314 *----------------------------------------------------------------------------*/
315
316static int sierra_net_send_cmd(struct usbnet *dev,
317 u8 *cmd, int cmdlen, const char * cmd_name)
318{
319 struct sierra_net_data *priv = sierra_net_get_private(dev);
320 int status;
321
322 status = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
323 USB_CDC_SEND_ENCAPSULATED_COMMAND,
324 USB_DIR_OUT|USB_TYPE_CLASS|USB_RECIP_INTERFACE, 0,
325 priv->ifnum, cmd, cmdlen, USB_CTRL_SET_TIMEOUT);
326
327 if (status != cmdlen && status != -ENODEV)
328 netdev_err(dev->net, "Submit %s failed %d\n", cmd_name, status);
329
330 return status;
331}
332
333static int sierra_net_send_sync(struct usbnet *dev)
334{
335 int status;
336 struct sierra_net_data *priv = sierra_net_get_private(dev);
337
338 dev_dbg(&dev->udev->dev, "%s", __func__);
339
340 status = sierra_net_send_cmd(dev, priv->sync_msg,
341 sizeof(priv->sync_msg), "SYNC");
342
343 return status;
344}
345
346static void sierra_net_set_ctx_index(struct sierra_net_data *priv, u8 ctx_ix)
347{
348 dev_dbg(&(priv->usbnet->udev->dev), "%s %d", __func__, ctx_ix);
349 priv->tx_hdr_template[0] = 0x3F;
350 priv->tx_hdr_template[1] = ctx_ix;
351 *((u16 *)&priv->tx_hdr_template[2]) =
352 cpu_to_be16(SIERRA_NET_HIP_EXT_IP_OUT_ID);
353}
354
355static inline int sierra_net_is_valid_addrlen(u8 len)
356{
357 return (len == sizeof(struct in_addr));
358}
359
360static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
361{
362 struct lsi_umts *lsi = (struct lsi_umts *)data;
363
364 if (datalen < sizeof(struct lsi_umts)) {
365 netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
366 __func__, datalen,
367 sizeof(struct lsi_umts));
368 return -1;
369 }
370
371 if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
372 netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
373 __func__, be16_to_cpu(lsi->length),
374 (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
375 return -1;
376 }
377
378 /* Validate the protocol - only support UMTS for now */
379 if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
380 netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
381 lsi->protocol);
382 return -1;
383 }
384
385 /* Validate the link type */
386 if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
387 netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
388 lsi->link_type);
389 return -1;
390 }
391
392 /* Validate the coverage */
393 if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
394 || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
395 netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
396 return 0;
397 }
398
399 /* Validate the session state */
400 if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
401 netdev_err(dev->net, "Session idle, 0x%02x\n",
402 lsi->session_state);
403 return 0;
404 }
405
406 /* Set link_sense true */
407 return 1;
408}
409
410static void sierra_net_handle_lsi(struct usbnet *dev, char *data,
411 struct hip_hdr *hh)
412{
413 struct sierra_net_data *priv = sierra_net_get_private(dev);
414 int link_up;
415
416 link_up = sierra_net_parse_lsi(dev, data + hh->hdrlen,
417 hh->payload_len.word);
418 if (link_up < 0) {
419 netdev_err(dev->net, "Invalid LSI\n");
420 return;
421 }
422 if (link_up) {
423 sierra_net_set_ctx_index(priv, hh->msgspecific.byte);
424 priv->link_up = 1;
425 netif_carrier_on(dev->net);
426 } else {
427 priv->link_up = 0;
428 netif_carrier_off(dev->net);
429 }
430}
431
432static void sierra_net_dosync(struct usbnet *dev)
433{
434 int status;
435 struct sierra_net_data *priv = sierra_net_get_private(dev);
436
437 dev_dbg(&dev->udev->dev, "%s", __func__);
438
439 /* tell modem we are ready */
440 status = sierra_net_send_sync(dev);
441 if (status < 0)
442 netdev_err(dev->net,
443 "Send SYNC failed, status %d\n", status);
444 status = sierra_net_send_sync(dev);
445 if (status < 0)
446 netdev_err(dev->net,
447 "Send SYNC failed, status %d\n", status);
448
449 /* Now, start a timer and make sure we get the Restart Indication */
450 priv->sync_timer.function = sierra_sync_timer;
451 priv->sync_timer.data = (unsigned long) dev;
452 priv->sync_timer.expires = jiffies + SIERRA_NET_SYNCDELAY;
453 add_timer(&priv->sync_timer);
454}
455
456static void sierra_net_kevent(struct work_struct *work)
457{
458 struct sierra_net_data *priv =
459 container_of(work, struct sierra_net_data, sierra_net_kevent);
460 struct usbnet *dev = priv->usbnet;
461 int len;
462 int err;
463 u8 *buf;
464 u8 ifnum;
465
466 if (test_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags)) {
467 clear_bit(SIERRA_NET_EVENT_RESP_AVAIL, &priv->kevent_flags);
468
469 /* Query the modem for the LSI message */
470 buf = kzalloc(SIERRA_NET_USBCTL_BUF_LEN, GFP_KERNEL);
471 if (!buf) {
472 netdev_err(dev->net,
473 "failed to allocate buf for LS msg\n");
474 return;
475 }
476 ifnum = priv->ifnum;
477 len = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
478 USB_CDC_GET_ENCAPSULATED_RESPONSE,
479 USB_DIR_IN|USB_TYPE_CLASS|USB_RECIP_INTERFACE,
480 0, ifnum, buf, SIERRA_NET_USBCTL_BUF_LEN,
481 USB_CTRL_SET_TIMEOUT);
482
483 if (len < 0) {
484 netdev_err(dev->net,
485 "usb_control_msg failed, status %d\n", len);
486 } else {
487 struct hip_hdr hh;
488
489 dev_dbg(&dev->udev->dev, "%s: Received status message,"
490 " %04x bytes", __func__, len);
491
492 err = parse_hip(buf, len, &hh);
493 if (err) {
494 netdev_err(dev->net, "%s: Bad packet,"
495 " parse result %d\n", __func__, err);
496 kfree(buf);
497 return;
498 }
499
500 /* Validate packet length */
501 if (len != hh.hdrlen + hh.payload_len.word) {
502 netdev_err(dev->net, "%s: Bad packet, received"
503 " %d, expected %d\n", __func__, len,
504 hh.hdrlen + hh.payload_len.word);
505 kfree(buf);
506 return;
507 }
508
509 /* Switch on received message types */
510 switch (hh.msgid.byte) {
511 case SIERRA_NET_HIP_LSI_UMTSID:
512 dev_dbg(&dev->udev->dev, "LSI for ctx:%d",
513 hh.msgspecific.byte);
514 sierra_net_handle_lsi(dev, buf, &hh);
515 break;
516 case SIERRA_NET_HIP_RESTART_ID:
517 dev_dbg(&dev->udev->dev, "Restart reported: %d,"
518 " stopping sync timer",
519 hh.msgspecific.byte);
520 /* Got sync resp - stop timer & clear mask */
521 del_timer_sync(&priv->sync_timer);
522 clear_bit(SIERRA_NET_TIMER_EXPIRY,
523 &priv->kevent_flags);
524 break;
525 case SIERRA_NET_HIP_HSYNC_ID:
526 dev_dbg(&dev->udev->dev, "SYNC received");
527 err = sierra_net_send_sync(dev);
528 if (err < 0)
529 netdev_err(dev->net,
530 "Send SYNC failed %d\n", err);
531 break;
532 case SIERRA_NET_HIP_EXTENDEDID:
533 netdev_err(dev->net, "Unrecognized HIP msg, "
534 "extmsgid 0x%04x\n", hh.extmsgid.word);
535 break;
536 case SIERRA_NET_HIP_RCGI:
537 /* Ignored */
538 break;
539 default:
540 netdev_err(dev->net, "Unrecognized HIP msg, "
541 "msgid 0x%02x\n", hh.msgid.byte);
542 break;
543 }
544 }
545 kfree(buf);
546 }
547 /* The sync timer bit might be set */
548 if (test_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags)) {
549 clear_bit(SIERRA_NET_TIMER_EXPIRY, &priv->kevent_flags);
550 dev_dbg(&dev->udev->dev, "Deferred sync timer expiry");
551 sierra_net_dosync(priv->usbnet);
552 }
553
554 if (priv->kevent_flags)
555 dev_dbg(&dev->udev->dev, "sierra_net_kevent done, "
556 "kevent_flags = 0x%lx", priv->kevent_flags);
557}
558
559static void sierra_net_defer_kevent(struct usbnet *dev, int work)
560{
561 struct sierra_net_data *priv = sierra_net_get_private(dev);
562
563 set_bit(work, &priv->kevent_flags);
564 schedule_work(&priv->sierra_net_kevent);
565}
566
567/*
568 * Sync Retransmit Timer Handler. On expiry, kick the work queue
569 */
570void sierra_sync_timer(unsigned long syncdata)
571{
572 struct usbnet *dev = (struct usbnet *)syncdata;
573
574 dev_dbg(&dev->udev->dev, "%s", __func__);
575 /* Kick the tasklet */
576 sierra_net_defer_kevent(dev, SIERRA_NET_TIMER_EXPIRY);
577}
578
579static void sierra_net_status(struct usbnet *dev, struct urb *urb)
580{
581 struct usb_cdc_notification *event;
582
583 dev_dbg(&dev->udev->dev, "%s", __func__);
584
585 if (urb->actual_length < sizeof *event)
586 return;
587
588 /* Add cases to handle other standard notifications. */
589 event = urb->transfer_buffer;
590 switch (event->bNotificationType) {
591 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
592 case USB_CDC_NOTIFY_SPEED_CHANGE:
593 /* USB 305 sends those */
594 break;
595 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
596 sierra_net_defer_kevent(dev, SIERRA_NET_EVENT_RESP_AVAIL);
597 break;
598 default:
599 netdev_err(dev->net, ": unexpected notification %02x!\n",
600 event->bNotificationType);
601 break;
602 }
603}
604
605static void sierra_net_get_drvinfo(struct net_device *net,
606 struct ethtool_drvinfo *info)
607{
608 /* Inherit standard device info */
609 usbnet_get_drvinfo(net, info);
610 strncpy(info->driver, driver_name, sizeof info->driver);
611 strncpy(info->version, DRIVER_VERSION, sizeof info->version);
612}
613
614static u32 sierra_net_get_link(struct net_device *net)
615{
616 struct usbnet *dev = netdev_priv(net);
617 /* Report link is down whenever the interface is down */
618 return sierra_net_get_private(dev)->link_up && netif_running(net);
619}
620
621static struct ethtool_ops sierra_net_ethtool_ops = {
622 .get_drvinfo = sierra_net_get_drvinfo,
623 .get_link = sierra_net_get_link,
624 .get_msglevel = usbnet_get_msglevel,
625 .set_msglevel = usbnet_set_msglevel,
626 .get_settings = usbnet_get_settings,
627 .set_settings = usbnet_set_settings,
628 .nway_reset = usbnet_nway_reset,
629};
630
631/* MTU can not be more than 1500 bytes, enforce it. */
632static int sierra_net_change_mtu(struct net_device *net, int new_mtu)
633{
634 if (new_mtu > SIERRA_NET_MAX_SUPPORTED_MTU)
635 return -EINVAL;
636
637 return usbnet_change_mtu(net, new_mtu);
638}
639
640static int is_whitelisted(const u8 ifnum,
641 const struct sierra_net_iface_info *whitelist)
642{
643 if (whitelist) {
644 const u8 *list = whitelist->ifaceinfo;
645 int i;
646
647 for (i = 0; i < whitelist->infolen; i++) {
648 if (list[i] == ifnum)
649 return 1;
650 }
651 }
652 return 0;
653}
654
655static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
656{
657 int result = 0;
658 u16 *attrdata;
659
660 attrdata = kmalloc(sizeof(*attrdata), GFP_KERNEL);
661 if (!attrdata)
662 return -ENOMEM;
663
664 result = usb_control_msg(
665 dev->udev,
666 usb_rcvctrlpipe(dev->udev, 0),
667 /* _u8 vendor specific request */
668 SWI_USB_REQUEST_GET_FW_ATTR,
669 USB_DIR_IN | USB_TYPE_VENDOR, /* __u8 request type */
670 0x0000, /* __u16 value not used */
671 0x0000, /* __u16 index not used */
672 attrdata, /* char *data */
673 sizeof(*attrdata), /* __u16 size */
674 USB_CTRL_SET_TIMEOUT); /* int timeout */
675
676 if (result < 0) {
677 kfree(attrdata);
678 return -EIO;
679 }
680
681 *datap = *attrdata;
682
683 kfree(attrdata);
684 return result;
685}
686
687/*
688 * collects the bulk endpoints, the status endpoint.
689 */
690static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
691{
692 u8 ifacenum;
693 u8 numendpoints;
694 u16 fwattr = 0;
695 int status;
696 struct ethhdr *eth;
697 struct sierra_net_data *priv;
698 static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
699 0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
700 static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
701 0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
702
703 struct sierra_net_info_data *data =
704 (struct sierra_net_info_data *)dev->driver_info->data;
705
706 dev_dbg(&dev->udev->dev, "%s", __func__);
707
708 ifacenum = intf->cur_altsetting->desc.bInterfaceNumber;
709 /* We only accept certain interfaces */
710 if (!is_whitelisted(ifacenum, &data->whitelist)) {
711 dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum);
712 return -ENODEV;
713 }
714 numendpoints = intf->cur_altsetting->desc.bNumEndpoints;
715 /* We have three endpoints, bulk in and out, and a status */
716 if (numendpoints != 3) {
717 dev_err(&dev->udev->dev, "Expected 3 endpoints, found: %d",
718 numendpoints);
719 return -ENODEV;
720 }
721 /* Status endpoint set in usbnet_get_endpoints() */
722 dev->status = NULL;
723 status = usbnet_get_endpoints(dev, intf);
724 if (status < 0) {
725 dev_err(&dev->udev->dev, "Error in usbnet_get_endpoints (%d)",
726 status);
727 return -ENODEV;
728 }
729 /* Initialize sierra private data */
730 priv = kzalloc(sizeof *priv, GFP_KERNEL);
731 if (!priv) {
732 dev_err(&dev->udev->dev, "No memory");
733 return -ENOMEM;
734 }
735
736 priv->usbnet = dev;
737 priv->ifnum = ifacenum;
738 dev->net->netdev_ops = &sierra_net_device_ops;
739
740 /* change MAC addr to include, ifacenum, and to be unique */
741 dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
742 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
743
744 /* we will have to manufacture ethernet headers, prepare template */
745 eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
746 memcpy(&eth->h_dest, dev->net->dev_addr, ETH_ALEN);
747 eth->h_proto = cpu_to_be16(ETH_P_IP);
748
749 /* prepare shutdown message template */
750 memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
751 /* set context index initially to 0 - prepares tx hdr template */
752 sierra_net_set_ctx_index(priv, 0);
753
754 /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */
755 dev->rx_urb_size = data->rx_urb_size;
756 if (dev->udev->speed != USB_SPEED_HIGH)
757 dev->rx_urb_size = min_t(size_t, 4096, data->rx_urb_size);
758
759 dev->net->hard_header_len += SIERRA_NET_HIP_EXT_HDR_LEN;
760 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
761
762 /* Set up the netdev */
763 dev->net->flags |= IFF_NOARP;
764 dev->net->ethtool_ops = &sierra_net_ethtool_ops;
765 netif_carrier_off(dev->net);
766
767 sierra_net_set_private(dev, priv);
768
769 priv->kevent_flags = 0;
770
771 /* Use the shared workqueue */
772 INIT_WORK(&priv->sierra_net_kevent, sierra_net_kevent);
773
774 /* Only need to do this once */
775 init_timer(&priv->sync_timer);
776
777 /* verify fw attributes */
778 status = sierra_net_get_fw_attr(dev, &fwattr);
779 dev_dbg(&dev->udev->dev, "Fw attr: %x\n", fwattr);
780
781 /* test whether firmware supports DHCP */
782 if (!(status == sizeof(fwattr) && (fwattr & SWI_GET_FW_ATTR_MASK))) {
783 /* found incompatible firmware version */
784 dev_err(&dev->udev->dev, "Incompatible driver and firmware"
785 " versions\n");
786 kfree(priv);
787 return -ENODEV;
788 }
789 /* prepare sync message from template */
790 memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg));
791
792 /* initiate the sync sequence */
793 sierra_net_dosync(dev);
794
795 return 0;
796}
797
798static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
799{
800 int status;
801 struct sierra_net_data *priv = sierra_net_get_private(dev);
802
803 dev_dbg(&dev->udev->dev, "%s", __func__);
804
805 /* Kill the timer then flush the work queue */
806 del_timer_sync(&priv->sync_timer);
807
808 flush_scheduled_work();
809
810 /* tell modem we are going away */
811 status = sierra_net_send_cmd(dev, priv->shdwn_msg,
812 sizeof(priv->shdwn_msg), "Shutdown");
813 if (status < 0)
814 netdev_err(dev->net,
815 "usb_control_msg failed, status %d\n", status);
816
817 sierra_net_set_private(dev, NULL);
818
819 kfree(priv);
820}
821
822static struct sk_buff *sierra_net_skb_clone(struct usbnet *dev,
823 struct sk_buff *skb, int len)
824{
825 struct sk_buff *new_skb;
826
827 /* clone skb */
828 new_skb = skb_clone(skb, GFP_ATOMIC);
829
830 /* remove len bytes from original */
831 skb_pull(skb, len);
832
833 /* trim next packet to it's length */
834 if (new_skb) {
835 skb_trim(new_skb, len);
836 } else {
837 if (netif_msg_rx_err(dev))
838 netdev_err(dev->net, "failed to get skb\n");
839 dev->net->stats.rx_dropped++;
840 }
841
842 return new_skb;
843}
844
845/* ---------------------------- Receive data path ----------------------*/
846static int sierra_net_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
847{
848 int err;
849 struct hip_hdr hh;
850 struct sk_buff *new_skb;
851
852 dev_dbg(&dev->udev->dev, "%s", __func__);
853
854 /* could contain multiple packets */
855 while (likely(skb->len)) {
856 err = parse_hip(skb->data, skb->len, &hh);
857 if (err) {
858 if (netif_msg_rx_err(dev))
859 netdev_err(dev->net, "Invalid HIP header %d\n",
860 err);
861 /* dev->net->stats.rx_errors incremented by caller */
862 dev->net->stats.rx_length_errors++;
863 return 0;
864 }
865
866 /* Validate Extended HIP header */
867 if (!hh.extmsgid.is_present
868 || hh.extmsgid.word != SIERRA_NET_HIP_EXT_IP_IN_ID) {
869 if (netif_msg_rx_err(dev))
870 netdev_err(dev->net, "HIP/ETH: Invalid pkt\n");
871
872 dev->net->stats.rx_frame_errors++;
873 /* dev->net->stats.rx_errors incremented by caller */;
874 return 0;
875 }
876
877 skb_pull(skb, hh.hdrlen);
878
879 /* We are going to accept this packet, prepare it */
880 memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
881 ETH_HLEN);
882
883 /* Last packet in batch handled by usbnet */
884 if (hh.payload_len.word == skb->len)
885 return 1;
886
887 new_skb = sierra_net_skb_clone(dev, skb, hh.payload_len.word);
888 if (new_skb)
889 usbnet_skb_return(dev, new_skb);
890
891 } /* while */
892
893 return 0;
894}
895
896/* ---------------------------- Transmit data path ----------------------*/
897struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
898 gfp_t flags)
899{
900 struct sierra_net_data *priv = sierra_net_get_private(dev);
901 u16 len;
902 bool need_tail;
903
904 dev_dbg(&dev->udev->dev, "%s", __func__);
905 if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
906 /* enough head room as is? */
907 if (SIERRA_NET_HIP_EXT_HDR_LEN <= skb_headroom(skb)) {
908 /* Save the Eth/IP length and set up HIP hdr */
909 len = skb->len;
910 skb_push(skb, SIERRA_NET_HIP_EXT_HDR_LEN);
911 /* Handle ZLP issue */
912 need_tail = ((len + SIERRA_NET_HIP_EXT_HDR_LEN)
913 % dev->maxpacket == 0);
914 if (need_tail) {
915 if (unlikely(skb_tailroom(skb) == 0)) {
916 netdev_err(dev->net, "tx_fixup:"
917 "no room for packet\n");
918 dev_kfree_skb_any(skb);
919 return NULL;
920 } else {
921 skb->data[skb->len] = 0;
922 __skb_put(skb, 1);
923 len = len + 1;
924 }
925 }
926 build_hip(skb->data, len, priv);
927 return skb;
928 } else {
929 /*
930 * compensate in the future if necessary
931 */
932 netdev_err(dev->net, "tx_fixup: no room for HIP\n");
933 } /* headroom */
934 }
935
936 if (!priv->link_up)
937 dev->net->stats.tx_carrier_errors++;
938
939 /* tx_dropped incremented by usbnet */
940
941 /* filter the packet out, release it */
942 dev_kfree_skb_any(skb);
943 return NULL;
944}
945
946static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
947static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
948 .rx_urb_size = 8 * 1024,
949 .whitelist = {
950 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
951 .ifaceinfo = sierra_net_ifnum_list
952 }
953};
954
955static const struct driver_info sierra_net_info_68A3 = {
956 .description = "Sierra Wireless USB-to-WWAN Modem",
957 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
958 .bind = sierra_net_bind,
959 .unbind = sierra_net_unbind,
960 .status = sierra_net_status,
961 .rx_fixup = sierra_net_rx_fixup,
962 .tx_fixup = sierra_net_tx_fixup,
963 .data = (unsigned long)&sierra_net_info_data_68A3,
964};
965
966static const struct usb_device_id products[] = {
967 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
968 .driver_info = (unsigned long) &sierra_net_info_68A3},
969
970 {}, /* last item */
971};
972MODULE_DEVICE_TABLE(usb, products);
973
974/* We are based on usbnet, so let it handle the USB driver specifics */
975static struct usb_driver sierra_net_driver = {
976 .name = "sierra_net",
977 .id_table = products,
978 .probe = usbnet_probe,
979 .disconnect = usbnet_disconnect,
980 .suspend = usbnet_suspend,
981 .resume = usbnet_resume,
982 .no_dynamic_id = 1,
983};
984
985static int __init sierra_net_init(void)
986{
987 BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
988 < sizeof(struct cdc_state));
989
990 return usb_register(&sierra_net_driver);
991}
992
993static void __exit sierra_net_exit(void)
994{
995 usb_deregister(&sierra_net_driver);
996}
997
998module_exit(sierra_net_exit);
999module_init(sierra_net_init);
1000
1001MODULE_AUTHOR(DRIVER_AUTHOR);
1002MODULE_DESCRIPTION(DRIVER_DESC);
1003MODULE_VERSION(DRIVER_VERSION);
1004MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1b79e4..753ee6eb7edd 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
445 netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); 445 netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
446 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; 446 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
447 } else if (!netdev_mc_empty(dev->net)) { 447 } else if (!netdev_mc_empty(dev->net)) {
448 struct dev_mc_list *mc_list; 448 struct netdev_hw_addr *ha;
449 449
450 netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); 450 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
451 451
452 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; 452 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
453 453
454 netdev_for_each_mc_addr(mc_list, netdev) { 454 netdev_for_each_mc_addr(ha, netdev) {
455 u32 bitnum = smsc75xx_hash(mc_list->dmi_addr); 455 u32 bitnum = smsc75xx_hash(ha->addr);
456 pdata->multicast_hash_table[bitnum / 32] |= 456 pdata->multicast_hash_table[bitnum / 32] |=
457 (1 << (bitnum % 32)); 457 (1 << (bitnum % 32));
458 } 458 }
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af63d378..12a3c88c5282 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
385 pdata->mac_cr |= MAC_CR_MCPAS_; 385 pdata->mac_cr |= MAC_CR_MCPAS_;
386 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_); 386 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
387 } else if (!netdev_mc_empty(dev->net)) { 387 } else if (!netdev_mc_empty(dev->net)) {
388 struct dev_mc_list *mc_list; 388 struct netdev_hw_addr *ha;
389 389
390 pdata->mac_cr |= MAC_CR_HPFILT_; 390 pdata->mac_cr |= MAC_CR_HPFILT_;
391 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); 391 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
392 392
393 netdev_for_each_mc_addr(mc_list, netdev) { 393 netdev_for_each_mc_addr(ha, netdev) {
394 u32 bitnum = smsc95xx_hash(mc_list->dmi_addr); 394 u32 bitnum = smsc95xx_hash(ha->addr);
395 u32 mask = 0x01 << (bitnum & 0x1F); 395 u32 mask = 0x01 << (bitnum & 0x1F);
396 if (bitnum & 0x20) 396 if (bitnum & 0x20)
397 hash_hi |= mask; 397 hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc78dc6..a95c73de5824 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1069 * NOTE: strictly conforming cdc-ether devices should expect 1069 * NOTE: strictly conforming cdc-ether devices should expect
1070 * the ZLP here, but ignore the one-byte packet. 1070 * the ZLP here, but ignore the one-byte packet.
1071 */ 1071 */
1072 if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { 1072 if (length % dev->maxpacket == 0) {
1073 urb->transfer_buffer_length++; 1073 if (!(info->flags & FLAG_SEND_ZLP)) {
1074 if (skb_tailroom(skb)) { 1074 urb->transfer_buffer_length++;
1075 skb->data[skb->len] = 0; 1075 if (skb_tailroom(skb)) {
1076 __skb_put(skb, 1); 1076 skb->data[skb->len] = 0;
1077 } 1077 __skb_put(skb, 1);
1078 }
1079 } else
1080 urb->transfer_flags |= URB_ZERO_PACKET;
1078 } 1081 }
1079 1082
1080 spin_lock_irqsave(&dev->txq.lock, flags); 1083 spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index f9f0730b53d5..5ec542dd5b50 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -187,7 +187,6 @@ tx_drop:
187 return NETDEV_TX_OK; 187 return NETDEV_TX_OK;
188 188
189rx_drop: 189rx_drop:
190 kfree_skb(skb);
191 rcv_stats->rx_dropped++; 190 rcv_stats->rx_dropped++;
192 return NETDEV_TX_OK; 191 return NETDEV_TX_OK;
193} 192}
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751aa66e0..4930f9dbc493 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1209,7 +1209,7 @@ static void rhine_reset_task(struct work_struct *work)
1209 spin_unlock_bh(&rp->lock); 1209 spin_unlock_bh(&rp->lock);
1210 enable_irq(rp->pdev->irq); 1210 enable_irq(rp->pdev->irq);
1211 1211
1212 dev->trans_start = jiffies; 1212 dev->trans_start = jiffies; /* prevent tx timeout */
1213 dev->stats.tx_errors++; 1213 dev->stats.tx_errors++;
1214 netif_wake_queue(dev); 1214 netif_wake_queue(dev);
1215} 1215}
@@ -1294,8 +1294,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1294 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1294 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1295 netif_stop_queue(dev); 1295 netif_stop_queue(dev);
1296 1296
1297 dev->trans_start = jiffies;
1298
1299 spin_unlock_irqrestore(&rp->lock, flags); 1297 spin_unlock_irqrestore(&rp->lock, flags);
1300 1298
1301 if (debug > 4) { 1299 if (debug > 4) {
@@ -1703,11 +1701,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
1703 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1701 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1704 rx_mode = 0x0C; 1702 rx_mode = 0x0C;
1705 } else { 1703 } else {
1706 struct dev_mc_list *mclist; 1704 struct netdev_hw_addr *ha;
1707 1705
1708 memset(mc_filter, 0, sizeof(mc_filter)); 1706 memset(mc_filter, 0, sizeof(mc_filter));
1709 netdev_for_each_mc_addr(mclist, dev) { 1707 netdev_for_each_mc_addr(ha, dev) {
1710 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1708 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1711 1709
1712 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1710 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1713 } 1711 }
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4ee89d..42dffd3e5795 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
719 u32 status = 0; 719 u32 status = 0;
720 u16 ANAR; 720 u16 ANAR;
721 721
722 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) 722 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
723 status |= VELOCITY_LINK_FAIL; 723 status |= VELOCITY_LINK_FAIL;
724 724
725 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) 725 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
726 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; 726 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
727 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) 727 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
728 status |= (VELOCITY_SPEED_1000); 728 status |= (VELOCITY_SPEED_1000);
729 else { 729 else {
730 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 730 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
731 if (ANAR & ANAR_TXFD) 731 if (ANAR & ADVERTISE_100FULL)
732 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); 732 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
733 else if (ANAR & ANAR_TX) 733 else if (ANAR & ADVERTISE_100HALF)
734 status |= VELOCITY_SPEED_100; 734 status |= VELOCITY_SPEED_100;
735 else if (ANAR & ANAR_10FD) 735 else if (ANAR & ADVERTISE_10FULL)
736 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); 736 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
737 else 737 else
738 status |= (VELOCITY_SPEED_10); 738 status |= (VELOCITY_SPEED_10);
739 } 739 }
740 740
741 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 741 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
742 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 742 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
743 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 743 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
744 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 744 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
745 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) 745 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
746 status |= VELOCITY_AUTONEG_ENABLE; 746 status |= VELOCITY_AUTONEG_ENABLE;
747 } 747 }
748 } 748 }
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
801 /*Enable or Disable PAUSE in ANAR */ 801 /*Enable or Disable PAUSE in ANAR */
802 switch (vptr->options.flow_cntl) { 802 switch (vptr->options.flow_cntl) {
803 case FLOW_CNTL_TX: 803 case FLOW_CNTL_TX:
804 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 804 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
806 break; 806 break;
807 807
808 case FLOW_CNTL_RX: 808 case FLOW_CNTL_RX:
809 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 810 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
811 break; 811 break;
812 812
813 case FLOW_CNTL_TX_RX: 813 case FLOW_CNTL_TX_RX:
814 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 814 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816 break; 816 break;
817 817
818 case FLOW_CNTL_DISABLE: 818 case FLOW_CNTL_DISABLE:
819 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 819 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
820 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 820 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
821 break; 821 break;
822 default: 822 default:
823 break; 823 break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
832 */ 832 */
833static void mii_set_auto_on(struct velocity_info *vptr) 833static void mii_set_auto_on(struct velocity_info *vptr)
834{ 834{
835 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) 835 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
836 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 836 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
837 else 837 else
838 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); 838 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
839} 839}
840 840
841static u32 check_connection_type(struct mac_regs __iomem *regs) 841static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
860 else 860 else
861 status |= VELOCITY_SPEED_100; 861 status |= VELOCITY_SPEED_100;
862 862
863 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 863 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
864 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 864 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
865 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 865 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
866 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 866 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
867 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) 867 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
868 status |= VELOCITY_AUTONEG_ENABLE; 868 status |= VELOCITY_AUTONEG_ENABLE;
869 } 869 }
870 } 870 }
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
905 */ 905 */
906 906
907 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 907 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
908 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); 908 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
909 909
910 /* 910 /*
911 * If connection type is AUTO 911 * If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
915 /* clear force MAC mode bit */ 915 /* clear force MAC mode bit */
916 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR); 916 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
917 /* set duplex mode of MAC according to duplex mode of MII */ 917 /* set duplex mode of MAC according to duplex mode of MII */
918 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); 918 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
919 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 919 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
920 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); 920 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
921 921
922 /* enable AUTO-NEGO mode */ 922 /* enable AUTO-NEGO mode */
923 mii_set_auto_on(vptr); 923 mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
952 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 952 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
953 } 953 }
954 954
955 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 955 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
956 956
957 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) 957 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
958 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 958 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
959 else 959 else
960 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG); 960 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
961 961
962 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ 962 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
963 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); 963 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
964 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); 964 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
965 if (mii_status & VELOCITY_SPEED_100) { 965 if (mii_status & VELOCITY_SPEED_100) {
966 if (mii_status & VELOCITY_DUPLEX_FULL) 966 if (mii_status & VELOCITY_DUPLEX_FULL)
967 ANAR |= ANAR_TXFD; 967 ANAR |= ADVERTISE_100FULL;
968 else 968 else
969 ANAR |= ANAR_TX; 969 ANAR |= ADVERTISE_100HALF;
970 } else { 970 } else {
971 if (mii_status & VELOCITY_DUPLEX_FULL) 971 if (mii_status & VELOCITY_DUPLEX_FULL)
972 ANAR |= ANAR_10FD; 972 ANAR |= ADVERTISE_10FULL;
973 else 973 else
974 ANAR |= ANAR_10; 974 ANAR |= ADVERTISE_10HALF;
975 } 975 }
976 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); 976 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
977 /* enable AUTO-NEGO mode */ 977 /* enable AUTO-NEGO mode */
978 mii_set_auto_on(vptr); 978 mii_set_auto_on(vptr);
979 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ 979 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
980 } 980 }
981 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ 981 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
982 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ 982 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
1126 struct mac_regs __iomem *regs = vptr->mac_regs; 1126 struct mac_regs __iomem *regs = vptr->mac_regs;
1127 u8 rx_mode; 1127 u8 rx_mode;
1128 int i; 1128 int i;
1129 struct dev_mc_list *mclist; 1129 struct netdev_hw_addr *ha;
1130 1130
1131 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1131 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1132 writel(0xffffffff, &regs->MARCAM[0]); 1132 writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
1142 mac_get_cam_mask(regs, vptr->mCAMmask); 1142 mac_get_cam_mask(regs, vptr->mCAMmask);
1143 1143
1144 i = 0; 1144 i = 0;
1145 netdev_for_each_mc_addr(mclist, dev) { 1145 netdev_for_each_mc_addr(ha, dev) {
1146 mac_set_cam(regs, i + offset, mclist->dmi_addr); 1146 mac_set_cam(regs, i + offset, ha->addr);
1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); 1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1148 i++; 1148 i++;
1149 } 1149 }
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1178 /* 1178 /*
1179 * Reset to hardware default 1179 * Reset to hardware default
1180 */ 1180 */
1181 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1181 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1182 /* 1182 /*
1183 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1183 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1184 * off it in NWay-forced half mode for NWay-forced v.s. 1184 * off it in NWay-forced half mode for NWay-forced v.s.
1185 * legacy-forced issue. 1185 * legacy-forced issue.
1186 */ 1186 */
1187 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1187 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1188 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1188 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1189 else 1189 else
1190 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1190 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1191 /* 1191 /*
1192 * Turn on Link/Activity LED enable bit for CIS8201 1192 * Turn on Link/Activity LED enable bit for CIS8201
1193 */ 1193 */
1194 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); 1194 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1195 break; 1195 break;
1196 case PHYID_VT3216_32BIT: 1196 case PHYID_VT3216_32BIT:
1197 case PHYID_VT3216_64BIT: 1197 case PHYID_VT3216_64BIT:
1198 /* 1198 /*
1199 * Reset to hardware default 1199 * Reset to hardware default
1200 */ 1200 */
1201 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1201 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1202 /* 1202 /*
1203 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1203 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1204 * off it in NWay-forced half mode for NWay-forced v.s. 1204 * off it in NWay-forced half mode for NWay-forced v.s.
1205 * legacy-forced issue 1205 * legacy-forced issue
1206 */ 1206 */
1207 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1207 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1208 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1208 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1209 else 1209 else
1210 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1210 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1211 break; 1211 break;
1212 1212
1213 case PHYID_MARVELL_1000: 1213 case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1219 /* 1219 /*
1220 * Reset to hardware default 1220 * Reset to hardware default
1221 */ 1221 */
1222 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1222 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1223 break; 1223 break;
1224 default: 1224 default:
1225 ; 1225 ;
1226 } 1226 }
1227 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); 1227 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1228 if (BMCR & BMCR_ISO) { 1228 if (BMCR & BMCR_ISOLATE) {
1229 BMCR &= ~BMCR_ISO; 1229 BMCR &= ~BMCR_ISOLATE;
1230 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); 1230 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1231 } 1231 }
1232} 1232}
1233 1233
@@ -2606,7 +2606,6 @@ static netdev_tx_t velocity_xmit(struct sk_buff *skb,
2606 td_ptr->td_buf[0].size |= TD_QUEUE; 2606 td_ptr->td_buf[0].size |= TD_QUEUE;
2607 mac_tx_queue_wake(vptr->mac_regs, qnum); 2607 mac_tx_queue_wake(vptr->mac_regs, qnum);
2608 2608
2609 dev->trans_start = jiffies;
2610 spin_unlock_irqrestore(&vptr->lock, flags); 2609 spin_unlock_irqrestore(&vptr->lock, flags);
2611out: 2610out:
2612 return NETDEV_TX_OK; 2611 return NETDEV_TX_OK;
@@ -2953,13 +2952,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
2953 2952
2954 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2953 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2955 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2954 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2956 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); 2955 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2957 2956
2958 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 2957 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2959 } 2958 }
2960 2959
2961 if (vptr->mii_status & VELOCITY_SPEED_1000) 2960 if (vptr->mii_status & VELOCITY_SPEED_1000)
2962 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 2961 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2963 2962
2964 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2963 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2965 2964
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f64ba16..c38191179fae 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
1240 u32 pattern[8]; 1240 u32 pattern[8];
1241}; 1241};
1242 1242
1243
1244/*
1245 * MII registers.
1246 */
1247
1248
1249/* 1243/*
1250 * Registers in the MII (offset unit is WORD) 1244 * Registers in the MII (offset unit is WORD)
1251 */ 1245 */
1252 1246
1253#define MII_REG_BMCR 0x00 // physical address
1254#define MII_REG_BMSR 0x01 //
1255#define MII_REG_PHYID1 0x02 // OUI
1256#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
1257#define MII_REG_ANAR 0x04 //
1258#define MII_REG_ANLPAR 0x05 //
1259#define MII_REG_G1000CR 0x09 //
1260#define MII_REG_G1000SR 0x0A //
1261#define MII_REG_MODCFG 0x10 //
1262#define MII_REG_TCSR 0x16 //
1263#define MII_REG_PLED 0x1B //
1264// NS, MYSON only
1265#define MII_REG_PCR 0x17 //
1266// ESI only
1267#define MII_REG_PCSR 0x17 //
1268#define MII_REG_AUXCR 0x1C //
1269
1270// Marvell 88E1000/88E1000S 1247// Marvell 88E1000/88E1000S
1271#define MII_REG_PSCR 0x10 // PHY specific control register 1248#define MII_REG_PSCR 0x10 // PHY specific control register
1272 1249
1273// 1250//
1274// Bits in the BMCR register 1251// Bits in the Silicon revision register
1275//
1276#define BMCR_RESET 0x8000 //
1277#define BMCR_LBK 0x4000 //
1278#define BMCR_SPEED100 0x2000 //
1279#define BMCR_AUTO 0x1000 //
1280#define BMCR_PD 0x0800 //
1281#define BMCR_ISO 0x0400 //
1282#define BMCR_REAUTO 0x0200 //
1283#define BMCR_FDX 0x0100 //
1284#define BMCR_SPEED1G 0x0040 //
1285//
1286// Bits in the BMSR register
1287//
1288#define BMSR_AUTOCM 0x0020 //
1289#define BMSR_LNK 0x0004 //
1290
1291//
1292// Bits in the ANAR register
1293//
1294#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
1295#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
1296#define ANAR_T4 0x0200 //
1297#define ANAR_TXFD 0x0100 //
1298#define ANAR_TX 0x0080 //
1299#define ANAR_10FD 0x0040 //
1300#define ANAR_10 0x0020 //
1301//
1302// Bits in the ANLPAR register
1303//
1304#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
1305#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
1306#define ANLPAR_T4 0x0200 //
1307#define ANLPAR_TXFD 0x0100 //
1308#define ANLPAR_TX 0x0080 //
1309#define ANLPAR_10FD 0x0040 //
1310#define ANLPAR_10 0x0020 //
1311
1312//
1313// Bits in the G1000CR register
1314//
1315#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
1316#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
1317
1318//
1319// Bits in the G1000SR register
1320// 1252//
1321#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
1322#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
1323 1253
1324#define TCSR_ECHODIS 0x2000 // 1254#define TCSR_ECHODIS 0x2000 //
1325#define AUXCR_MDPPS 0x0004 // 1255#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
1338 1268
1339#define PHYID_REV_ID_MASK 0x0000000FUL 1269#define PHYID_REV_ID_MASK 0x0000000FUL
1340 1270
1341#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
1342#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) 1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
1343 1272
1344#define MII_REG_BITS_ON(x,i,p) do {\ 1273#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
1362 1291
1363#define MII_GET_PHY_ID(p) ({\ 1292#define MII_GET_PHY_ID(p) ({\
1364 u32 id;\ 1293 u32 id;\
1365 velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\ 1294 velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
1366 velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\ 1295 velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
1367 (id);}) 1296 (id);})
1368 1297
1369/* 1298/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd1a42d..78eb3190b9b1 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
40 40
41#define VIRTNET_SEND_COMMAND_SG_MAX 2 41#define VIRTNET_SEND_COMMAND_SG_MAX 2
42 42
43struct virtnet_info 43struct virtnet_info {
44{
45 struct virtio_device *vdev; 44 struct virtio_device *vdev;
46 struct virtqueue *rvq, *svq, *cvq; 45 struct virtqueue *rvq, *svq, *cvq;
47 struct net_device *dev; 46 struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
62 61
63 /* Chain pages by the private ptr. */ 62 /* Chain pages by the private ptr. */
64 struct page *pages; 63 struct page *pages;
64
65 /* fragments + linear part + virtio header */
66 struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
67 struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
65}; 68};
66 69
67struct skb_vnet_hdr { 70struct skb_vnet_hdr {
@@ -119,7 +122,7 @@ static void skb_xmit_done(struct virtqueue *svq)
119 struct virtnet_info *vi = svq->vdev->priv; 122 struct virtnet_info *vi = svq->vdev->priv;
120 123
121 /* Suppress further interrupts. */ 124 /* Suppress further interrupts. */
122 svq->vq_ops->disable_cb(svq); 125 virtqueue_disable_cb(svq);
123 126
124 /* We were probably waiting for more output buffers. */ 127 /* We were probably waiting for more output buffers. */
125 netif_wake_queue(vi->dev); 128 netif_wake_queue(vi->dev);
@@ -207,7 +210,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
207 return -EINVAL; 210 return -EINVAL;
208 } 211 }
209 212
210 page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 213 page = virtqueue_get_buf(vi->rvq, &len);
211 if (!page) { 214 if (!page) {
212 pr_debug("%s: rx error: %d buffers missing\n", 215 pr_debug("%s: rx error: %d buffers missing\n",
213 skb->dev->name, hdr->mhdr.num_buffers); 216 skb->dev->name, hdr->mhdr.num_buffers);
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
324{ 327{
325 struct sk_buff *skb; 328 struct sk_buff *skb;
326 struct skb_vnet_hdr *hdr; 329 struct skb_vnet_hdr *hdr;
327 struct scatterlist sg[2];
328 int err; 330 int err;
329 331
330 sg_init_table(sg, 2);
331 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 332 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
332 if (unlikely(!skb)) 333 if (unlikely(!skb))
333 return -ENOMEM; 334 return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
335 skb_put(skb, MAX_PACKET_LEN); 336 skb_put(skb, MAX_PACKET_LEN);
336 337
337 hdr = skb_vnet_hdr(skb); 338 hdr = skb_vnet_hdr(skb);
338 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); 339 sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
339 340
340 skb_to_sgvec(skb, sg + 1, 0, skb->len); 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
341 342
342 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); 343 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
343 if (err < 0) 344 if (err < 0)
344 dev_kfree_skb(skb); 345 dev_kfree_skb(skb);
345 346
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
348 349
349static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) 350static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
350{ 351{
351 struct scatterlist sg[MAX_SKB_FRAGS + 2];
352 struct page *first, *list = NULL; 352 struct page *first, *list = NULL;
353 char *p; 353 char *p;
354 int i, err, offset; 354 int i, err, offset;
355 355
356 sg_init_table(sg, MAX_SKB_FRAGS + 2); 356 /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
357 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
358 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 357 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
359 first = get_a_page(vi, gfp); 358 first = get_a_page(vi, gfp);
360 if (!first) { 359 if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
362 give_pages(vi, list); 361 give_pages(vi, list);
363 return -ENOMEM; 362 return -ENOMEM;
364 } 363 }
365 sg_set_buf(&sg[i], page_address(first), PAGE_SIZE); 364 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
366 365
367 /* chain new page in list head to match sg */ 366 /* chain new page in list head to match sg */
368 first->private = (unsigned long)list; 367 first->private = (unsigned long)list;
@@ -376,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
376 } 375 }
377 p = page_address(first); 376 p = page_address(first);
378 377
379 /* sg[0], sg[1] share the same page */ 378 /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
380 /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/ 379 /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
381 sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr)); 380 sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
382 381
383 /* sg[1] for data packet, from offset */ 382 /* vi->rx_sg[1] for data packet, from offset */
384 offset = sizeof(struct padded_vnet_hdr); 383 offset = sizeof(struct padded_vnet_hdr);
385 sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset); 384 sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
386 385
387 /* chain first in list head */ 386 /* chain first in list head */
388 first->private = (unsigned long)list; 387 first->private = (unsigned long)list;
389 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, 388 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
390 first); 389 first);
391 if (err < 0) 390 if (err < 0)
392 give_pages(vi, first); 391 give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
397static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) 396static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
398{ 397{
399 struct page *page; 398 struct page *page;
400 struct scatterlist sg;
401 int err; 399 int err;
402 400
403 page = get_a_page(vi, gfp); 401 page = get_a_page(vi, gfp);
404 if (!page) 402 if (!page)
405 return -ENOMEM; 403 return -ENOMEM;
406 404
407 sg_init_one(&sg, page_address(page), PAGE_SIZE); 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
408 406
409 err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); 407 err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
410 if (err < 0) 408 if (err < 0)
411 give_pages(vi, page); 409 give_pages(vi, page);
412 410
@@ -435,7 +433,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
435 } while (err > 0); 433 } while (err > 0);
436 if (unlikely(vi->num > vi->max)) 434 if (unlikely(vi->num > vi->max))
437 vi->max = vi->num; 435 vi->max = vi->num;
438 vi->rvq->vq_ops->kick(vi->rvq); 436 virtqueue_kick(vi->rvq);
439 return !oom; 437 return !oom;
440} 438}
441 439
@@ -444,7 +442,7 @@ static void skb_recv_done(struct virtqueue *rvq)
444 struct virtnet_info *vi = rvq->vdev->priv; 442 struct virtnet_info *vi = rvq->vdev->priv;
445 /* Schedule NAPI, Suppress further interrupts if successful. */ 443 /* Schedule NAPI, Suppress further interrupts if successful. */
446 if (napi_schedule_prep(&vi->napi)) { 444 if (napi_schedule_prep(&vi->napi)) {
447 rvq->vq_ops->disable_cb(rvq); 445 virtqueue_disable_cb(rvq);
448 __napi_schedule(&vi->napi); 446 __napi_schedule(&vi->napi);
449 } 447 }
450} 448}
@@ -473,7 +471,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
473 471
474again: 472again:
475 while (received < budget && 473 while (received < budget &&
476 (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 474 (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
477 receive_buf(vi->dev, buf, len); 475 receive_buf(vi->dev, buf, len);
478 --vi->num; 476 --vi->num;
479 received++; 477 received++;
@@ -487,9 +485,9 @@ again:
487 /* Out of packets? */ 485 /* Out of packets? */
488 if (received < budget) { 486 if (received < budget) {
489 napi_complete(napi); 487 napi_complete(napi);
490 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && 488 if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
491 napi_schedule_prep(napi)) { 489 napi_schedule_prep(napi)) {
492 vi->rvq->vq_ops->disable_cb(vi->rvq); 490 virtqueue_disable_cb(vi->rvq);
493 __napi_schedule(napi); 491 __napi_schedule(napi);
494 goto again; 492 goto again;
495 } 493 }
@@ -503,7 +501,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
503 struct sk_buff *skb; 501 struct sk_buff *skb;
504 unsigned int len, tot_sgs = 0; 502 unsigned int len, tot_sgs = 0;
505 503
506 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 504 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
507 pr_debug("Sent skb %p\n", skb); 505 pr_debug("Sent skb %p\n", skb);
508 vi->dev->stats.tx_bytes += skb->len; 506 vi->dev->stats.tx_bytes += skb->len;
509 vi->dev->stats.tx_packets++; 507 vi->dev->stats.tx_packets++;
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
515 513
516static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 514static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
517{ 515{
518 struct scatterlist sg[2+MAX_SKB_FRAGS];
519 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 516 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
520 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 517 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
521 518
522 sg_init_table(sg, 2+MAX_SKB_FRAGS);
523
524 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 519 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
525 520
526 if (skb->ip_summed == CHECKSUM_PARTIAL) { 521 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
554 549
555 /* Encode metadata header at front. */ 550 /* Encode metadata header at front. */
556 if (vi->mergeable_rx_bufs) 551 if (vi->mergeable_rx_bufs)
557 sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr); 552 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
558 else 553 else
559 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); 554 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
560 555
561 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 556 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
562 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); 557 return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
558 0, skb);
563} 559}
564 560
565static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 561static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -578,14 +574,14 @@ again:
578 if (unlikely(capacity < 0)) { 574 if (unlikely(capacity < 0)) {
579 netif_stop_queue(dev); 575 netif_stop_queue(dev);
580 dev_warn(&dev->dev, "Unexpected full queue\n"); 576 dev_warn(&dev->dev, "Unexpected full queue\n");
581 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 577 if (unlikely(!virtqueue_enable_cb(vi->svq))) {
582 vi->svq->vq_ops->disable_cb(vi->svq); 578 virtqueue_disable_cb(vi->svq);
583 netif_start_queue(dev); 579 netif_start_queue(dev);
584 goto again; 580 goto again;
585 } 581 }
586 return NETDEV_TX_BUSY; 582 return NETDEV_TX_BUSY;
587 } 583 }
588 vi->svq->vq_ops->kick(vi->svq); 584 virtqueue_kick(vi->svq);
589 585
590 /* Don't wait up for transmitted skbs to be freed. */ 586 /* Don't wait up for transmitted skbs to be freed. */
591 skb_orphan(skb); 587 skb_orphan(skb);
@@ -595,12 +591,12 @@ again:
595 * before it gets out of hand. Naturally, this wastes entries. */ 591 * before it gets out of hand. Naturally, this wastes entries. */
596 if (capacity < 2+MAX_SKB_FRAGS) { 592 if (capacity < 2+MAX_SKB_FRAGS) {
597 netif_stop_queue(dev); 593 netif_stop_queue(dev);
598 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 594 if (unlikely(!virtqueue_enable_cb(vi->svq))) {
599 /* More just got used, free them then recheck. */ 595 /* More just got used, free them then recheck. */
600 capacity += free_old_xmit_skbs(vi); 596 capacity += free_old_xmit_skbs(vi);
601 if (capacity >= 2+MAX_SKB_FRAGS) { 597 if (capacity >= 2+MAX_SKB_FRAGS) {
602 netif_start_queue(dev); 598 netif_start_queue(dev);
603 vi->svq->vq_ops->disable_cb(vi->svq); 599 virtqueue_disable_cb(vi->svq);
604 } 600 }
605 } 601 }
606 } 602 }
@@ -645,7 +641,7 @@ static int virtnet_open(struct net_device *dev)
645 * now. virtnet_poll wants re-enable the queue, so we disable here. 641 * now. virtnet_poll wants re-enable the queue, so we disable here.
646 * We synchronize against interrupts via NAPI_STATE_SCHED */ 642 * We synchronize against interrupts via NAPI_STATE_SCHED */
647 if (napi_schedule_prep(&vi->napi)) { 643 if (napi_schedule_prep(&vi->napi)) {
648 vi->rvq->vq_ops->disable_cb(vi->rvq); 644 virtqueue_disable_cb(vi->rvq);
649 __napi_schedule(&vi->napi); 645 __napi_schedule(&vi->napi);
650 } 646 }
651 return 0; 647 return 0;
@@ -682,15 +678,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
682 sg_set_buf(&sg[i + 1], sg_virt(s), s->length); 678 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
683 sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); 679 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
684 680
685 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); 681 BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
686 682
687 vi->cvq->vq_ops->kick(vi->cvq); 683 virtqueue_kick(vi->cvq);
688 684
689 /* 685 /*
690 * Spin for a response, the kick causes an ioport write, trapping 686 * Spin for a response, the kick causes an ioport write, trapping
691 * into the hypervisor, so the request should be handled immediately. 687 * into the hypervisor, so the request should be handled immediately.
692 */ 688 */
693 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) 689 while (!virtqueue_get_buf(vi->cvq, &tmp))
694 cpu_relax(); 690 cpu_relax();
695 691
696 return status == VIRTIO_NET_OK; 692 return status == VIRTIO_NET_OK;
@@ -722,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
722 struct scatterlist sg[2]; 718 struct scatterlist sg[2];
723 u8 promisc, allmulti; 719 u8 promisc, allmulti;
724 struct virtio_net_ctrl_mac *mac_data; 720 struct virtio_net_ctrl_mac *mac_data;
725 struct dev_addr_list *addr;
726 struct netdev_hw_addr *ha; 721 struct netdev_hw_addr *ha;
727 int uc_count; 722 int uc_count;
728 int mc_count; 723 int mc_count;
@@ -779,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
779 774
780 mac_data->entries = mc_count; 775 mac_data->entries = mc_count;
781 i = 0; 776 i = 0;
782 netdev_for_each_mc_addr(addr, dev) 777 netdev_for_each_mc_addr(ha, dev)
783 memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN); 778 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
784 779
785 sg_set_buf(&sg[1], mac_data, 780 sg_set_buf(&sg[1], mac_data,
786 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 781 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev)
942 vdev->priv = vi; 937 vdev->priv = vi;
943 vi->pages = NULL; 938 vi->pages = NULL;
944 INIT_DELAYED_WORK(&vi->refill, refill_work); 939 INIT_DELAYED_WORK(&vi->refill, refill_work);
940 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
941 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
945 942
946 /* If we can receive ANY GSO packets, we must allocate large ones. */ 943 /* If we can receive ANY GSO packets, we must allocate large ones. */
947 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 944 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
@@ -1006,13 +1003,13 @@ static void free_unused_bufs(struct virtnet_info *vi)
1006{ 1003{
1007 void *buf; 1004 void *buf;
1008 while (1) { 1005 while (1) {
1009 buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); 1006 buf = virtqueue_detach_unused_buf(vi->svq);
1010 if (!buf) 1007 if (!buf)
1011 break; 1008 break;
1012 dev_kfree_skb(buf); 1009 dev_kfree_skb(buf);
1013 } 1010 }
1014 while (1) { 1011 while (1) {
1015 buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); 1012 buf = virtqueue_detach_unused_buf(vi->rvq);
1016 if (!buf) 1013 if (!buf)
1017 break; 1014 break;
1018 if (vi->mergeable_rx_bufs || vi->big_packets) 1015 if (vi->mergeable_rx_bufs || vi->big_packets)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485d9673..989b742551ac 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -992,7 +992,6 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
992 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 992 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
993 tq->tx_ring.next2fill); 993 tq->tx_ring.next2fill);
994 } 994 }
995 netdev->trans_start = jiffies;
996 995
997 return NETDEV_TX_OK; 996 return NETDEV_TX_OK;
998 997
@@ -1174,7 +1173,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1174 netif_receive_skb(skb); 1173 netif_receive_skb(skb);
1175 } 1174 }
1176 1175
1177 adapter->netdev->last_rx = jiffies;
1178 ctx->skb = NULL; 1176 ctx->skb = NULL;
1179 } 1177 }
1180 1178
@@ -1371,13 +1369,12 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1371 1369
1372 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size + 1370 sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1373 rq->rx_ring[1].size); 1371 rq->rx_ring[1].size);
1374 bi = kmalloc(sz, GFP_KERNEL); 1372 bi = kzalloc(sz, GFP_KERNEL);
1375 if (!bi) { 1373 if (!bi) {
1376 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n", 1374 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1377 adapter->netdev->name); 1375 adapter->netdev->name);
1378 goto err; 1376 goto err;
1379 } 1377 }
1380 memset(bi, 0, sz);
1381 rq->buf_info[0] = bi; 1378 rq->buf_info[0] = bi;
1382 rq->buf_info[1] = bi + rq->rx_ring[0].size; 1379 rq->buf_info[1] = bi + rq->rx_ring[0].size;
1383 1380
@@ -1675,11 +1672,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
1675 /* We may be called with BH disabled */ 1672 /* We may be called with BH disabled */
1676 buf = kmalloc(sz, GFP_ATOMIC); 1673 buf = kmalloc(sz, GFP_ATOMIC);
1677 if (buf) { 1674 if (buf) {
1678 struct dev_mc_list *mc; 1675 struct netdev_hw_addr *ha;
1679 int i = 0; 1676 int i = 0;
1680 1677
1681 netdev_for_each_mc_addr(mc, netdev) 1678 netdev_for_each_mc_addr(ha, netdev)
1682 memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr, 1679 memcpy(buf + i++ * ETH_ALEN, ha->addr,
1683 ETH_ALEN); 1680 ETH_ALEN);
1684 } 1681 }
1685 } 1682 }
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d218b6..297f0d202073 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -183,8 +183,6 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
183 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd); 183 pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
184 184
185 pci_save_state(hldev->pdev); 185 pci_save_state(hldev->pdev);
186
187 return;
188} 186}
189 187
190/* 188/*
@@ -342,8 +340,6 @@ void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
342 340
343 hldev->minor_revision = 341 hldev->minor_revision =
344 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64); 342 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
345
346 return;
347} 343}
348 344
349/* 345/*
@@ -357,8 +353,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
357 353
358 switch (host_type) { 354 switch (host_type) {
359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: 355 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
360 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 356 if (func_id == 0) {
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; 357 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
358 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
359 }
362 break; 360 break;
363 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: 361 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
364 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 362 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -426,8 +424,6 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
426 hldev->first_vp_id = i; 424 hldev->first_vp_id = i;
427 break; 425 break;
428 } 426 }
429
430 return;
431} 427}
432 428
433/* 429/*
@@ -633,8 +629,10 @@ vxge_hw_device_initialize(
633 __vxge_hw_device_pci_e_init(hldev); 629 __vxge_hw_device_pci_e_init(hldev);
634 630
635 status = __vxge_hw_device_reg_addr_get(hldev); 631 status = __vxge_hw_device_reg_addr_get(hldev);
636 if (status != VXGE_HW_OK) 632 if (status != VXGE_HW_OK) {
633 vfree(hldev);
637 goto exit; 634 goto exit;
635 }
638 __vxge_hw_device_id_get(hldev); 636 __vxge_hw_device_id_get(hldev);
639 637
640 __vxge_hw_device_host_info_get(hldev); 638 __vxge_hw_device_host_info_get(hldev);
@@ -1213,19 +1211,16 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1213 /* link this RxD block with previous one */ 1211 /* link this RxD block with previous one */
1214 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index); 1212 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1215 } 1213 }
1216
1217 return;
1218} 1214}
1219 1215
1220/* 1216/*
1221 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs 1217 * __vxge_hw_ring_replenish - Initial replenish of RxDs
1222 * This function replenishes the RxDs from reserve array to work array 1218 * This function replenishes the RxDs from reserve array to work array
1223 */ 1219 */
1224enum vxge_hw_status 1220enum vxge_hw_status
1225vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) 1221vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1226{ 1222{
1227 void *rxd; 1223 void *rxd;
1228 int i = 0;
1229 struct __vxge_hw_channel *channel; 1224 struct __vxge_hw_channel *channel;
1230 enum vxge_hw_status status = VXGE_HW_OK; 1225 enum vxge_hw_status status = VXGE_HW_OK;
1231 1226
@@ -1246,11 +1241,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
1246 } 1241 }
1247 1242
1248 vxge_hw_ring_rxd_post(ring, rxd); 1243 vxge_hw_ring_rxd_post(ring, rxd);
1249 if (min_flag) {
1250 i++;
1251 if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
1252 break;
1253 }
1254 } 1244 }
1255 status = VXGE_HW_OK; 1245 status = VXGE_HW_OK;
1256exit: 1246exit:
@@ -1355,7 +1345,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1355 * Currently we don't have a case when the 1) is done without the 2). 1345 * Currently we don't have a case when the 1) is done without the 2).
1356 */ 1346 */
1357 if (ring->rxd_init) { 1347 if (ring->rxd_init) {
1358 status = vxge_hw_ring_replenish(ring, 1); 1348 status = vxge_hw_ring_replenish(ring);
1359 if (status != VXGE_HW_OK) { 1349 if (status != VXGE_HW_OK) {
1360 __vxge_hw_ring_delete(vp); 1350 __vxge_hw_ring_delete(vp);
1361 goto exit; 1351 goto exit;
@@ -1417,7 +1407,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1417 goto exit; 1407 goto exit;
1418 1408
1419 if (ring->rxd_init) { 1409 if (ring->rxd_init) {
1420 status = vxge_hw_ring_replenish(ring, 1); 1410 status = vxge_hw_ring_replenish(ring);
1421 if (status != VXGE_HW_OK) 1411 if (status != VXGE_HW_OK)
1422 goto exit; 1412 goto exit;
1423 } 1413 }
@@ -2320,8 +2310,6 @@ __vxge_hw_fifo_mempool_item_alloc(
2320 txdl_priv->first_txdp = txdp; 2310 txdl_priv->first_txdp = txdp;
2321 txdl_priv->next_txdl_priv = NULL; 2311 txdl_priv->next_txdl_priv = NULL;
2322 txdl_priv->alloc_frags = 0; 2312 txdl_priv->alloc_frags = 0;
2323
2324 return;
2325} 2313}
2326 2314
2327/* 2315/*
@@ -2578,7 +2566,6 @@ __vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2578 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0); 2566 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2579 writeq(0, &vpath_reg->rts_access_steer_data1); 2567 writeq(0, &vpath_reg->rts_access_steer_data1);
2580 wmb(); 2568 wmb();
2581 return;
2582} 2569}
2583 2570
2584 2571
@@ -3486,7 +3473,6 @@ __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3486 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE; 3473 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3487 3474
3488 writeq(val64, &vp_reg->prc_cfg4); 3475 writeq(val64, &vp_reg->prc_cfg4);
3489 return;
3490} 3476}
3491 3477
3492/* 3478/*
@@ -3905,7 +3891,6 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3905 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 3891 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3906 } 3892 }
3907 } 3893 }
3908 return;
3909} 3894}
3910/* 3895/*
3911 * __vxge_hw_vpath_initialize 3896 * __vxge_hw_vpath_initialize
@@ -5039,8 +5024,6 @@ __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5039 if (status == VXGE_HW_OK) 5024 if (status == VXGE_HW_OK)
5040 __vxge_hw_blockpool_blocks_remove(blockpool); 5025 __vxge_hw_blockpool_blocks_remove(blockpool);
5041 } 5026 }
5042
5043 return;
5044} 5027}
5045 5028
5046/* 5029/*
@@ -5096,6 +5079,4 @@ __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5096 } 5079 }
5097 5080
5098 __vxge_hw_blockpool_blocks_remove(blockpool); 5081 __vxge_hw_blockpool_blocks_remove(blockpool);
5099
5100 return;
5101} 5082}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416307f8..4ae2625d4d8f 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
765#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 765#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
766#define VXGE_HW_VH_NORMAL_FUNCTION 7 766#define VXGE_HW_VH_NORMAL_FUNCTION 7
767 u64 function_mode; 767 u64 function_mode;
768#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 768#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
769#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 769#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
770#define VXGE_HW_FUNCTION_MODE_SRIOV 2 770#define VXGE_HW_FUNCTION_MODE_SRIOV 2
771#define VXGE_HW_FUNCTION_MODE_MRIOV 3 771#define VXGE_HW_FUNCTION_MODE_MRIOV 3
772#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
773#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
774#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
775#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
776#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
777#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
778#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
779
772 u32 func_id; 780 u32 func_id;
773 u64 vpath_mask; 781 u64 vpath_mask;
774 struct vxge_hw_device_version fw_version; 782 struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1915 gfp_t flags; 1923 gfp_t flags;
1916 void *vaddr; 1924 void *vaddr;
1917 unsigned long misaligned = 0; 1925 unsigned long misaligned = 0;
1926 int realloc_flag = 0;
1918 *p_dma_acch = *p_dmah = NULL; 1927 *p_dma_acch = *p_dmah = NULL;
1919 1928
1920 if (in_interrupt()) 1929 if (in_interrupt())
1921 flags = GFP_ATOMIC | GFP_DMA; 1930 flags = GFP_ATOMIC | GFP_DMA;
1922 else 1931 else
1923 flags = GFP_KERNEL | GFP_DMA; 1932 flags = GFP_KERNEL | GFP_DMA;
1924 1933realloc:
1925 size += VXGE_CACHE_LINE_SIZE;
1926
1927 vaddr = kmalloc((size), flags); 1934 vaddr = kmalloc((size), flags);
1928 if (vaddr == NULL) 1935 if (vaddr == NULL)
1929 return vaddr; 1936 return vaddr;
1930 misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), 1937 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1931 VXGE_CACHE_LINE_SIZE); 1938 VXGE_CACHE_LINE_SIZE);
1939 if (realloc_flag)
1940 goto out;
1941
1942 if (misaligned) {
1943 /* misaligned, free current one and try allocating
1944 * size + VXGE_CACHE_LINE_SIZE memory
1945 */
1946 kfree((void *) vaddr);
1947 size += VXGE_CACHE_LINE_SIZE;
1948 realloc_flag = 1;
1949 goto realloc;
1950 }
1951out:
1932 *(unsigned long *)p_dma_acch = misaligned; 1952 *(unsigned long *)p_dma_acch = misaligned;
1933 vaddr = (void *)((u8 *)vaddr + misaligned); 1953 vaddr = (void *)((u8 *)vaddr + misaligned);
1934 return vaddr; 1954 return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2254 struct vxge_hw_rth_hash_types *hash_type, 2274 struct vxge_hw_rth_hash_types *hash_type,
2255 u16 bucket_size); 2275 u16 bucket_size);
2256 2276
2277enum vxge_hw_status
2278__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2257#endif 2279#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374cfd322..cadef8549c06 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
109 int index, offset; 109 int index, offset;
110 enum vxge_hw_status status; 110 enum vxge_hw_status status;
111 u64 reg; 111 u64 reg;
112 u8 *reg_space = (u8 *) space; 112 u64 *reg_space = (u64 *) space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
115 pci_get_drvdata(vdev->pdev); 115 pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
129 __func__, __LINE__); 129 __func__, __LINE__);
130 return; 130 return;
131 } 131 }
132 132 *reg_space++ = reg;
133 memcpy((reg_space + offset), &reg, 8);
134 } 133 }
135 } 134 }
136} 135}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c30..b504bd561362 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
445 ring->ndev->name, __func__, __LINE__); 445 ring->ndev->name, __func__, __LINE__);
446 ring->pkts_processed = 0; 446 ring->pkts_processed = 0;
447 447
448 vxge_hw_ring_replenish(ringh, 0); 448 vxge_hw_ring_replenish(ringh);
449 449
450 do { 450 do {
451 prefetch((char *)dtr + L1_CACHE_BYTES); 451 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1118 */ 1118 */
1119static void vxge_set_multicast(struct net_device *dev) 1119static void vxge_set_multicast(struct net_device *dev)
1120{ 1120{
1121 struct dev_mc_list *mclist; 1121 struct netdev_hw_addr *ha;
1122 struct vxgedev *vdev; 1122 struct vxgedev *vdev;
1123 int i, mcast_cnt = 0; 1123 int i, mcast_cnt = 0;
1124 struct __vxge_hw_device *hldev; 1124 struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
1218 } 1218 }
1219 1219
1220 /* Add new ones */ 1220 /* Add new ones */
1221 netdev_for_each_mc_addr(mclist, dev) { 1221 netdev_for_each_mc_addr(ha, dev) {
1222 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1222 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1224 vpath_idx++) { 1224 vpath_idx++) {
1225 mac_info.vpath_no = vpath_idx; 1225 mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1365{ 1365{
1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1367 int msix_id, alarm_msix_id; 1367 int msix_id = 0;
1368 int tim_msix_id[4] = {[0 ...3] = 0}; 1368 int tim_msix_id[4] = {0, 1, 0, 0};
1369 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1369 1370
1370 vxge_hw_vpath_intr_enable(vpath->handle); 1371 vxge_hw_vpath_intr_enable(vpath->handle);
1371 1372
1372 if (vdev->config.intr_type == INTA) 1373 if (vdev->config.intr_type == INTA)
1373 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); 1374 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1374 else { 1375 else {
1375 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1376 alarm_msix_id =
1377 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1378
1379 tim_msix_id[0] = msix_id;
1380 tim_msix_id[1] = msix_id + 1;
1381 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 1376 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1382 alarm_msix_id); 1377 alarm_msix_id);
1383 1378
1379 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1384 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1380 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1385 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); 1381 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1386 1382
1387 /* enable the alarm vector */ 1383 /* enable the alarm vector */
1388 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); 1384 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1385 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1386 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1389 } 1387 }
1390} 1388}
1391 1389
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1406 if (vdev->config.intr_type == INTA) 1404 if (vdev->config.intr_type == INTA)
1407 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); 1405 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1408 else { 1406 else {
1409 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1407 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1408 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1411 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); 1409 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1412 1410
1413 /* disable the alarm vector */ 1411 /* disable the alarm vector */
1414 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 1412 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1413 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1415 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1414 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1416 } 1415 }
1417} 1416}
@@ -1765,7 +1764,6 @@ static void vxge_netpoll(struct net_device *dev)
1765 1764
1766 vxge_debug_entryexit(VXGE_TRACE, 1765 vxge_debug_entryexit(VXGE_TRACE,
1767 "%s:%d Exiting...", __func__, __LINE__); 1766 "%s:%d Exiting...", __func__, __LINE__);
1768 return;
1769} 1767}
1770#endif 1768#endif
1771 1769
@@ -2224,19 +2222,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2224 enum vxge_hw_status status; 2222 enum vxge_hw_status status;
2225 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; 2223 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2226 struct vxgedev *vdev = vpath->vdev; 2224 struct vxgedev *vdev = vpath->vdev;
2227 int alarm_msix_id = 2225 int msix_id = (vpath->handle->vpath->vp_id *
2228 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2226 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2229 2227
2230 for (i = 0; i < vdev->no_of_vpath; i++) { 2228 for (i = 0; i < vdev->no_of_vpath; i++) {
2231 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, 2229 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2232 alarm_msix_id);
2233 2230
2234 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2231 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2235 vdev->exec_mode); 2232 vdev->exec_mode);
2236 if (status == VXGE_HW_OK) { 2233 if (status == VXGE_HW_OK) {
2237 2234
2238 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2235 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2239 alarm_msix_id); 2236 msix_id);
2240 continue; 2237 continue;
2241 } 2238 }
2242 vxge_debug_intr(VXGE_ERR, 2239 vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2246,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2249static int vxge_alloc_msix(struct vxgedev *vdev) 2246static int vxge_alloc_msix(struct vxgedev *vdev)
2250{ 2247{
2251 int j, i, ret = 0; 2248 int j, i, ret = 0;
2252 int intr_cnt = 0; 2249 int msix_intr_vect = 0, temp;
2253 int alarm_msix_id = 0, msix_intr_vect = 0;
2254 vdev->intr_cnt = 0; 2250 vdev->intr_cnt = 0;
2255 2251
2252start:
2256 /* Tx/Rx MSIX Vectors count */ 2253 /* Tx/Rx MSIX Vectors count */
2257 vdev->intr_cnt = vdev->no_of_vpath * 2; 2254 vdev->intr_cnt = vdev->no_of_vpath * 2;
2258 2255
2259 /* Alarm MSIX Vectors count */ 2256 /* Alarm MSIX Vectors count */
2260 vdev->intr_cnt++; 2257 vdev->intr_cnt++;
2261 2258
2262 intr_cnt = (vdev->max_vpath_supported * 2) + 1; 2259 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
2263 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2264 GFP_KERNEL); 2260 GFP_KERNEL);
2265 if (!vdev->entries) { 2261 if (!vdev->entries) {
2266 vxge_debug_init(VXGE_ERR, 2262 vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2265,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2269 return -ENOMEM; 2265 return -ENOMEM;
2270 } 2266 }
2271 2267
2272 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), 2268 vdev->vxge_entries =
2273 GFP_KERNEL); 2269 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
2270 GFP_KERNEL);
2274 if (!vdev->vxge_entries) { 2271 if (!vdev->vxge_entries) {
2275 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2272 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2276 VXGE_DRIVER_NAME); 2273 VXGE_DRIVER_NAME);
@@ -2278,9 +2275,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2278 return -ENOMEM; 2275 return -ENOMEM;
2279 } 2276 }
2280 2277
2281 /* Last vector in the list is used for alarm */ 2278 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2282 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2283 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2284 2279
2285 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2280 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2286 2281
@@ -2298,47 +2293,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2298 } 2293 }
2299 2294
2300 /* Initialize the alarm vector */ 2295 /* Initialize the alarm vector */
2301 vdev->entries[j].entry = alarm_msix_id; 2296 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2302 vdev->vxge_entries[j].entry = alarm_msix_id; 2297 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2303 vdev->vxge_entries[j].in_use = 0; 2298 vdev->vxge_entries[j].in_use = 0;
2304 2299
2305 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); 2300 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2306 /* if driver request exceeeds available irq's, request with a small
2307 * number.
2308 */
2309 if (ret > 0) {
2310 vxge_debug_init(VXGE_ERR,
2311 "%s: MSI-X enable failed for %d vectors, available: %d",
2312 VXGE_DRIVER_NAME, intr_cnt, ret);
2313 vdev->max_vpath_supported = vdev->no_of_vpath;
2314 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2315
2316 /* Reset the alarm vector setting */
2317 vdev->entries[j].entry = 0;
2318 vdev->vxge_entries[j].entry = 0;
2319
2320 /* Initialize the alarm vector with new setting */
2321 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2322 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2323 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2324
2325 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2326 if (!ret)
2327 vxge_debug_init(VXGE_ERR,
2328 "%s: MSI-X enabled for %d vectors",
2329 VXGE_DRIVER_NAME, intr_cnt);
2330 }
2331 2301
2332 if (ret) { 2302 if (ret > 0) {
2333 vxge_debug_init(VXGE_ERR, 2303 vxge_debug_init(VXGE_ERR,
2334 "%s: MSI-X enable failed for %d vectors, ret: %d", 2304 "%s: MSI-X enable failed for %d vectors, ret: %d",
2335 VXGE_DRIVER_NAME, intr_cnt, ret); 2305 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2336 kfree(vdev->entries); 2306 kfree(vdev->entries);
2337 kfree(vdev->vxge_entries); 2307 kfree(vdev->vxge_entries);
2338 vdev->entries = NULL; 2308 vdev->entries = NULL;
2339 vdev->vxge_entries = NULL; 2309 vdev->vxge_entries = NULL;
2310
2311 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
2312 return -ENODEV;
2313 /* Try with less no of vector by reducing no of vpaths count */
2314 temp = (ret - 1)/2;
2315 vxge_close_vpaths(vdev, temp);
2316 vdev->no_of_vpath = temp;
2317 goto start;
2318 } else if (ret < 0)
2340 return -ENODEV; 2319 return -ENODEV;
2341 } 2320
2342 return 0; 2321 return 0;
2343} 2322}
2344 2323
@@ -2346,43 +2325,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2346{ 2325{
2347 2326
2348 int i, ret = 0; 2327 int i, ret = 0;
2349 enum vxge_hw_status status;
2350 /* 0 - Tx, 1 - Rx */ 2328 /* 0 - Tx, 1 - Rx */
2351 int tim_msix_id[4]; 2329 int tim_msix_id[4] = {0, 1, 0, 0};
2352 int alarm_msix_id = 0, msix_intr_vect = 0; 2330
2353 vdev->intr_cnt = 0; 2331 vdev->intr_cnt = 0;
2354 2332
2355 /* allocate msix vectors */ 2333 /* allocate msix vectors */
2356 ret = vxge_alloc_msix(vdev); 2334 ret = vxge_alloc_msix(vdev);
2357 if (!ret) { 2335 if (!ret) {
2358 /* Last vector in the list is used for alarm */
2359 alarm_msix_id =
2360 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2361 for (i = 0; i < vdev->no_of_vpath; i++) { 2336 for (i = 0; i < vdev->no_of_vpath; i++) {
2362 2337
2363 /* If fifo or ring are not enabled 2338 /* If fifo or ring are not enabled
2364 the MSIX vector for that should be set to 0 2339 the MSIX vector for that should be set to 0
2365 Hence initializeing this array to all 0s. 2340 Hence initializeing this array to all 0s.
2366 */ 2341 */
2367 memset(tim_msix_id, 0, sizeof(tim_msix_id)); 2342 vdev->vpaths[i].ring.rx_vector_no =
2368 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2343 (vdev->vpaths[i].device_id *
2369 tim_msix_id[0] = msix_intr_vect; 2344 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2370
2371 tim_msix_id[1] = msix_intr_vect + 1;
2372 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1];
2373 2345
2374 status = vxge_hw_vpath_msix_set( 2346 vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
2375 vdev->vpaths[i].handle, 2347 tim_msix_id, VXGE_ALARM_MSIX_ID);
2376 tim_msix_id, alarm_msix_id);
2377 if (status != VXGE_HW_OK) {
2378 vxge_debug_init(VXGE_ERR,
2379 "vxge_hw_vpath_msix_set "
2380 "failed with status : %x", status);
2381 kfree(vdev->entries);
2382 kfree(vdev->vxge_entries);
2383 pci_disable_msix(vdev->pdev);
2384 return -ENODEV;
2385 }
2386 } 2348 }
2387 } 2349 }
2388 2350
@@ -2393,7 +2355,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2393{ 2355{
2394 int intr_cnt; 2356 int intr_cnt;
2395 2357
2396 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); 2358 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2397 intr_cnt++) { 2359 intr_cnt++) {
2398 if (vdev->vxge_entries[intr_cnt].in_use) { 2360 if (vdev->vxge_entries[intr_cnt].in_use) {
2399 synchronize_irq(vdev->entries[intr_cnt].vector); 2361 synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2420,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2458 switch (msix_idx) { 2420 switch (msix_idx) {
2459 case 0: 2421 case 0:
2460 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2422 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2461 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", 2423 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2462 vdev->ndev->name, pci_fun, vp_idx, 2424 vdev->ndev->name,
2463 vdev->entries[intr_cnt].entry); 2425 vdev->entries[intr_cnt].entry,
2426 pci_fun, vp_idx);
2464 ret = request_irq( 2427 ret = request_irq(
2465 vdev->entries[intr_cnt].vector, 2428 vdev->entries[intr_cnt].vector,
2466 vxge_tx_msix_handle, 0, 2429 vxge_tx_msix_handle, 0,
@@ -2472,9 +2435,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2472 break; 2435 break;
2473 case 1: 2436 case 1:
2474 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2437 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2475 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", 2438 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2476 vdev->ndev->name, pci_fun, vp_idx, 2439 vdev->ndev->name,
2477 vdev->entries[intr_cnt].entry); 2440 vdev->entries[intr_cnt].entry,
2441 pci_fun, vp_idx);
2478 ret = request_irq( 2442 ret = request_irq(
2479 vdev->entries[intr_cnt].vector, 2443 vdev->entries[intr_cnt].vector,
2480 vxge_rx_msix_napi_handle, 2444 vxge_rx_msix_napi_handle,
@@ -2502,9 +2466,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2502 if (irq_req) { 2466 if (irq_req) {
2503 /* We requested for this msix interrupt */ 2467 /* We requested for this msix interrupt */
2504 vdev->vxge_entries[intr_cnt].in_use = 1; 2468 vdev->vxge_entries[intr_cnt].in_use = 1;
2469 msix_idx += vdev->vpaths[vp_idx].device_id *
2470 VXGE_HW_VPATH_MSIX_ACTIVE;
2505 vxge_hw_vpath_msix_unmask( 2471 vxge_hw_vpath_msix_unmask(
2506 vdev->vpaths[vp_idx].handle, 2472 vdev->vpaths[vp_idx].handle,
2507 intr_idx); 2473 msix_idx);
2508 intr_cnt++; 2474 intr_cnt++;
2509 } 2475 }
2510 2476
@@ -2514,16 +2480,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
2514 vp_idx++; 2480 vp_idx++;
2515 } 2481 }
2516 2482
2517 intr_cnt = vdev->max_vpath_supported * 2; 2483 intr_cnt = vdev->no_of_vpath * 2;
2518 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2484 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2519 "%s:vxge Alarm fn: %d MSI-X: %d", 2485 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2520 vdev->ndev->name, pci_fun, 2486 vdev->ndev->name,
2521 vdev->entries[intr_cnt].entry); 2487 vdev->entries[intr_cnt].entry,
2488 pci_fun);
2522 /* For Alarm interrupts */ 2489 /* For Alarm interrupts */
2523 ret = request_irq(vdev->entries[intr_cnt].vector, 2490 ret = request_irq(vdev->entries[intr_cnt].vector,
2524 vxge_alarm_msix_handle, 0, 2491 vxge_alarm_msix_handle, 0,
2525 vdev->desc[intr_cnt], 2492 vdev->desc[intr_cnt],
2526 &vdev->vpaths[vp_idx]); 2493 &vdev->vpaths[0]);
2527 if (ret) { 2494 if (ret) {
2528 vxge_debug_init(VXGE_ERR, 2495 vxge_debug_init(VXGE_ERR,
2529 "%s: MSIX - %d Registration failed", 2496 "%s: MSIX - %d Registration failed",
@@ -2536,16 +2503,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
2536 goto INTA_MODE; 2503 goto INTA_MODE;
2537 } 2504 }
2538 2505
2506 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2507 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2539 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2508 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2540 intr_idx - 2); 2509 msix_idx);
2541 vdev->vxge_entries[intr_cnt].in_use = 1; 2510 vdev->vxge_entries[intr_cnt].in_use = 1;
2542 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; 2511 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2543 } 2512 }
2544INTA_MODE: 2513INTA_MODE:
2545#endif 2514#endif
2546 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2547 2515
2548 if (vdev->config.intr_type == INTA) { 2516 if (vdev->config.intr_type == INTA) {
2517 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2518 "%s:vxge:INTA", vdev->ndev->name);
2549 vxge_hw_device_set_intr_type(vdev->devh, 2519 vxge_hw_device_set_intr_type(vdev->devh,
2550 VXGE_HW_INTR_MODE_IRQLINE); 2520 VXGE_HW_INTR_MODE_IRQLINE);
2551 vxge_hw_vpath_tti_ci_set(vdev->devh, 2521 vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -2844,7 +2814,6 @@ static void vxge_napi_del_all(struct vxgedev *vdev)
2844 for (i = 0; i < vdev->no_of_vpath; i++) 2814 for (i = 0; i < vdev->no_of_vpath; i++)
2845 netif_napi_del(&vdev->vpaths[i].ring.napi); 2815 netif_napi_del(&vdev->vpaths[i].ring.napi);
2846 } 2816 }
2847 return;
2848} 2817}
2849 2818
2850int do_vxge_close(struct net_device *dev, int do_io) 2819int do_vxge_close(struct net_device *dev, int do_io)
@@ -3529,8 +3498,6 @@ static void verify_bandwidth(void)
3529 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) 3498 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++)
3530 bw_percentage[i] = bw_percentage[0]; 3499 bw_percentage[i] = bw_percentage[0];
3531 } 3500 }
3532
3533 return;
3534} 3501}
3535 3502
3536/* 3503/*
@@ -3995,6 +3962,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
3995 netif_device_attach(netdev); 3962 netif_device_attach(netdev);
3996} 3963}
3997 3964
3965static inline u32 vxge_get_num_vfs(u64 function_mode)
3966{
3967 u32 num_functions = 0;
3968
3969 switch (function_mode) {
3970 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
3971 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
3972 num_functions = 8;
3973 break;
3974 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
3975 num_functions = 1;
3976 break;
3977 case VXGE_HW_FUNCTION_MODE_SRIOV:
3978 case VXGE_HW_FUNCTION_MODE_MRIOV:
3979 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
3980 num_functions = 17;
3981 break;
3982 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
3983 num_functions = 4;
3984 break;
3985 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
3986 num_functions = 2;
3987 break;
3988 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
3989 num_functions = 8; /* TODO */
3990 break;
3991 }
3992 return num_functions;
3993}
3994
3998/** 3995/**
3999 * vxge_probe 3996 * vxge_probe
4000 * @pdev : structure containing the PCI related information of the device. 3997 * @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4019,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4022 u8 *macaddr; 4019 u8 *macaddr;
4023 struct vxge_mac_addrs *entry; 4020 struct vxge_mac_addrs *entry;
4024 static int bus = -1, device = -1; 4021 static int bus = -1, device = -1;
4022 u32 host_type;
4025 u8 new_device = 0; 4023 u8 new_device = 0;
4024 enum vxge_hw_status is_privileged;
4025 u32 function_mode;
4026 u32 num_vfs = 0;
4026 4027
4027 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 4028 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4028 attr.pdev = pdev; 4029 attr.pdev = pdev;
4029 4030
4030 if (bus != pdev->bus->number) 4031 /* In SRIOV-17 mode, functions of the same adapter
4031 new_device = 1; 4032 * can be deployed on different buses */
4032 if (device != PCI_SLOT(pdev->devfn)) 4033 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
4034 (device != PCI_SLOT(pdev->devfn))))
4033 new_device = 1; 4035 new_device = 1;
4034 4036
4035 bus = pdev->bus->number; 4037 bus = pdev->bus->number;
@@ -4046,9 +4048,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4046 driver_config->total_dev_cnt); 4048 driver_config->total_dev_cnt);
4047 driver_config->config_dev_cnt = 0; 4049 driver_config->config_dev_cnt = 0;
4048 driver_config->total_dev_cnt = 0; 4050 driver_config->total_dev_cnt = 0;
4049 driver_config->g_no_cpus = 0;
4050 } 4051 }
4051 4052 /* Now making the CPU based no of vpath calculation
4053 * applicable for individual functions as well.
4054 */
4055 driver_config->g_no_cpus = 0;
4052 driver_config->vpath_per_dev = max_config_vpath; 4056 driver_config->vpath_per_dev = max_config_vpath;
4053 4057
4054 driver_config->total_dev_cnt++; 4058 driver_config->total_dev_cnt++;
@@ -4161,6 +4165,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4161 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4165 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4162 (unsigned long long)vpath_mask); 4166 (unsigned long long)vpath_mask);
4163 4167
4168 function_mode = ll_config.device_hw_info.function_mode;
4169 host_type = ll_config.device_hw_info.host_type;
4170 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4171 ll_config.device_hw_info.func_id);
4172
4164 /* Check how many vpaths are available */ 4173 /* Check how many vpaths are available */
4165 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4174 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4166 if (!((vpath_mask) & vxge_mBIT(i))) 4175 if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4177,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4168 max_vpath_supported++; 4177 max_vpath_supported++;
4169 } 4178 }
4170 4179
4180 if (new_device)
4181 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4182
4171 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4183 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4172 if ((VXGE_HW_FUNCTION_MODE_SRIOV == 4184 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4173 ll_config.device_hw_info.function_mode) && 4185 (ll_config.intr_type != INTA) &&
4174 (max_config_dev > 1) && (pdev->is_physfn)) { 4186 (is_privileged == VXGE_HW_OK)) {
4175 ret = pci_enable_sriov(pdev, max_config_dev - 1); 4187 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4176 if (ret) 4188 ? (max_config_dev - 1) : num_vfs);
4177 vxge_debug_ll_config(VXGE_ERR, 4189 if (ret)
4178 "Failed to enable SRIOV: %d \n", ret); 4190 vxge_debug_ll_config(VXGE_ERR,
4191 "Failed in enabling SRIOV mode: %d\n", ret);
4179 } 4192 }
4180 4193
4181 /* 4194 /*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4be9d7..60276b20fa5e 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_USE_DEFAULT 0xffffffff 32#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 33#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2
34#define VXGE_HW_RXSYNC_FREQ_CNT 4 35#define VXGE_HW_RXSYNC_FREQ_CNT 4
35#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) 36#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
36#define VXGE_LL_RX_COPY_THRESHOLD 256 37#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
89 90
90#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) 91#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
91 92
93#define is_sriov(function_mode) \
94 ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
95 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
96 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
97
92enum vxge_reset_event { 98enum vxge_reset_event {
93 /* reset events */ 99 /* reset events */
94 VXGE_LL_VPATH_RESET = 0, 100 VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce465..6cc1dd79b40b 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,11 +231,8 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{ 231{
232 232
233 __vxge_hw_pio_mem_write32_upper( 233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 234 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]); 235 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237
238 return;
239} 236}
240 237
241/** 238/**
@@ -252,11 +249,8 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{ 249{
253 250
254 __vxge_hw_pio_mem_write32_upper( 251 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 252 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]); 253 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258
259 return;
260} 254}
261 255
262/** 256/**
@@ -331,8 +325,6 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
331 val64 = readq(&hldev->common_reg->titan_general_int_status); 325 val64 = readq(&hldev->common_reg->titan_general_int_status);
332 326
333 vxge_hw_device_unmask_all(hldev); 327 vxge_hw_device_unmask_all(hldev);
334
335 return;
336} 328}
337 329
338/** 330/**
@@ -364,8 +356,6 @@ void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
364 vxge_hw_vpath_intr_disable( 356 vxge_hw_vpath_intr_disable(
365 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); 357 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
366 } 358 }
367
368 return;
369} 359}
370 360
371/** 361/**
@@ -385,8 +375,6 @@ void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
385 375
386 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 376 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
387 &hldev->common_reg->titan_mask_all_int); 377 &hldev->common_reg->titan_mask_all_int);
388
389 return;
390} 378}
391 379
392/** 380/**
@@ -406,8 +394,6 @@ void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
406 394
407 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 395 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
408 &hldev->common_reg->titan_mask_all_int); 396 &hldev->common_reg->titan_mask_all_int);
409
410 return;
411} 397}
412 398
413/** 399/**
@@ -649,8 +635,6 @@ void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
649 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]), 635 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
650 &hldev->common_reg->tim_int_status1); 636 &hldev->common_reg->tim_int_status1);
651 } 637 }
652
653 return;
654} 638}
655 639
656/* 640/*
@@ -878,7 +862,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
878 862
879 channel = &ring->channel; 863 channel = &ring->channel;
880 864
881 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 865 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
882 866
883 if (ring->stats->common_stats.usage_cnt > 0) 867 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--; 868 ring->stats->common_stats.usage_cnt--;
@@ -902,7 +886,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
902 channel = &ring->channel; 886 channel = &ring->channel;
903 887
904 wmb(); 888 wmb();
905 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 889 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
906 890
907 vxge_hw_channel_dtr_post(channel, rxdh); 891 vxge_hw_channel_dtr_post(channel, rxdh);
908 892
@@ -966,6 +950,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
966 struct __vxge_hw_channel *channel; 950 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp; 951 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK; 952 enum vxge_hw_status status = VXGE_HW_OK;
953 u64 control_0, own;
969 954
970 channel = &ring->channel; 955 channel = &ring->channel;
971 956
@@ -977,8 +962,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
977 goto exit; 962 goto exit;
978 } 963 }
979 964
965 control_0 = rxdp->control_0;
966 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
967 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
968
980 /* check whether it is not the end */ 969 /* check whether it is not the end */
981 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { 970 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
982 971
983 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 972 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
984 0); 973 0);
@@ -986,8 +975,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
986 ++ring->cmpl_cnt; 975 ++ring->cmpl_cnt;
987 vxge_hw_channel_dtr_complete(channel); 976 vxge_hw_channel_dtr_complete(channel);
988 977
989 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
990
991 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); 978 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
992 979
993 ring->stats->common_stats.usage_cnt++; 980 ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1022,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
1035 * such as unknown UPV6 header), Drop it !!! 1022 * such as unknown UPV6 header), Drop it !!!
1036 */ 1023 */
1037 1024
1038 if (t_code == 0 || t_code == 5) { 1025 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1026 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1039 status = VXGE_HW_OK; 1027 status = VXGE_HW_OK;
1040 goto exit; 1028 goto exit;
1041 } 1029 }
1042 1030
1043 if (t_code > 0xF) { 1031 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1044 status = VXGE_HW_ERR_INVALID_TCODE; 1032 status = VXGE_HW_ERR_INVALID_TCODE;
1045 goto exit; 1033 goto exit;
1046 } 1034 }
@@ -2216,29 +2204,24 @@ exit:
2216 * This API will associate a given MSIX vector numbers with the four TIM 2204 * This API will associate a given MSIX vector numbers with the four TIM
2217 * interrupts and alarm interrupt. 2205 * interrupts and alarm interrupt.
2218 */ 2206 */
2219enum vxge_hw_status 2207void
2220vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, 2208vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2221 int alarm_msix_id) 2209 int alarm_msix_id)
2222{ 2210{
2223 u64 val64; 2211 u64 val64;
2224 struct __vxge_hw_virtualpath *vpath = vp->vpath; 2212 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2225 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; 2213 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2226 u32 first_vp_id = vpath->hldev->first_vp_id; 2214 u32 vp_id = vp->vpath->vp_id;
2227 2215
2228 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( 2216 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2229 (first_vp_id * 4) + tim_msix_id[0]) | 2217 (vp_id * 4) + tim_msix_id[0]) |
2230 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( 2218 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2231 (first_vp_id * 4) + tim_msix_id[1]) | 2219 (vp_id * 4) + tim_msix_id[1]);
2232 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[2]);
2234
2235 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2236 (first_vp_id * 4) + tim_msix_id[3]);
2237 2220
2238 writeq(val64, &vp_reg->interrupt_cfg0); 2221 writeq(val64, &vp_reg->interrupt_cfg0);
2239 2222
2240 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( 2223 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2241 (first_vp_id * 4) + alarm_msix_id), 2224 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2242 &vp_reg->interrupt_cfg2); 2225 &vp_reg->interrupt_cfg2);
2243 2226
2244 if (vpath->hldev->config.intr_mode == 2227 if (vpath->hldev->config.intr_mode ==
@@ -2258,8 +2241,6 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2258 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN, 2241 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2259 0, 32), &vp_reg->one_shot_vect3_en); 2242 0, 32), &vp_reg->one_shot_vect3_en);
2260 } 2243 }
2261
2262 return VXGE_HW_OK;
2263} 2244}
2264 2245
2265/** 2246/**
@@ -2279,11 +2260,8 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2279{ 2260{
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2261 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281 __vxge_hw_pio_mem_write32_upper( 2262 __vxge_hw_pio_mem_write32_upper(
2282 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2263 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2283 (msix_id / 4)), 0, 32),
2284 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); 2264 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2285
2286 return;
2287} 2265}
2288 2266
2289/** 2267/**
@@ -2305,19 +2283,15 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2305 if (hldev->config.intr_mode == 2283 if (hldev->config.intr_mode ==
2306 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2284 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2307 __vxge_hw_pio_mem_write32_upper( 2285 __vxge_hw_pio_mem_write32_upper(
2308 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2286 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2309 (msix_id/4)), 0, 32),
2310 &hldev->common_reg-> 2287 &hldev->common_reg->
2311 clr_msix_one_shot_vec[msix_id%4]); 2288 clr_msix_one_shot_vec[msix_id%4]);
2312 } else { 2289 } else {
2313 __vxge_hw_pio_mem_write32_upper( 2290 __vxge_hw_pio_mem_write32_upper(
2314 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2291 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2315 (msix_id/4)), 0, 32),
2316 &hldev->common_reg-> 2292 &hldev->common_reg->
2317 clear_msix_mask_vect[msix_id%4]); 2293 clear_msix_mask_vect[msix_id%4]);
2318 } 2294 }
2319
2320 return;
2321} 2295}
2322 2296
2323/** 2297/**
@@ -2337,11 +2311,8 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2337{ 2311{
2338 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2312 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2339 __vxge_hw_pio_mem_write32_upper( 2313 __vxge_hw_pio_mem_write32_upper(
2340 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2314 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2341 (msix_id/4)), 0, 32),
2342 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); 2315 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2343
2344 return;
2345} 2316}
2346 2317
2347/** 2318/**
@@ -2358,8 +2329,6 @@ vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2358 __vxge_hw_pio_mem_write32_upper( 2329 __vxge_hw_pio_mem_write32_upper(
2359 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32), 2330 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2360 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect); 2331 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2361
2362 return;
2363} 2332}
2364 2333
2365/** 2334/**
@@ -2398,8 +2367,6 @@ void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2398 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64), 2367 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2399 &hldev->common_reg->tim_int_mask1); 2368 &hldev->common_reg->tim_int_mask1);
2400 } 2369 }
2401
2402 return;
2403} 2370}
2404 2371
2405/** 2372/**
@@ -2436,8 +2403,6 @@ void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2436 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64, 2403 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2437 &hldev->common_reg->tim_int_mask1); 2404 &hldev->common_reg->tim_int_mask1);
2438 } 2405 }
2439
2440 return;
2441} 2406}
2442 2407
2443/** 2408/**
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853e3e84..c252f3d3f650 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
1866 u32 rth_hash_type; 1866 u32 rth_hash_type;
1867 u32 rth_value; 1867 u32 rth_value;
1868}; 1868};
1869/**
1870 * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
1871 * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
1872 * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
1873 * configuration mismatch.
1874 * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
1875 * configuration mismatch.
1876 * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
1877 * presentation configuration mismatch.
1878 * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
1879 * such as unknown IPv6 header.
1880 * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
1881 * error, such as FCS or ECC).
1882 * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
1883 * s) were not appropriately sized and data loss occurred.
1884 * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
1885 * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
1886 * Segment1 exceeded the capacity of Buffer1 and the remainder
1887 * was placed in Buffer2. Segment2 now starts in Buffer3.
1888 * No data loss or errors occurred.
1889 * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
1890 * assigned buffers has a size of 0 bytes.
1891 * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
1892 * VPath Reset or because of a VPIN mismatch.
1893 * @VXGE_HW_RING_T_CODE_UNUSED: Unused
1894 * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
1895 * transfer code condition occurred.
1896 *
1897 * Transfer codes returned by adapter.
1898 */
1899enum vxge_hw_ring_tcode {
1900 VXGE_HW_RING_T_CODE_OK = 0x0,
1901 VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
1902 VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
1903 VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
1904 VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
1905 VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
1906 VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
1907 VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
1908 VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
1909 VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
1910 VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
1911 VXGE_HW_RING_T_CODE_UNUSED = 0xE,
1912 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1913};
1869 1914
1870/** 1915/**
1871 * enum enum vxge_hw_ring_hash_type - RTH hash types 1916 * enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
1910 void *rxdh); 1955 void *rxdh);
1911 1956
1912enum vxge_hw_status 1957enum vxge_hw_status
1913vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag); 1958vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
1914 1959
1915void 1960void
1916vxge_hw_ring_rxd_post_post_wmb( 1961vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
2042 2087
2043#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) 2088#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2044#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) 2089#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2045#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
2046 2090
2047/* 2091/*
2048 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. 2092 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
2332 struct __vxge_hw_vpath_handle *vpath_handle, 2376 struct __vxge_hw_vpath_handle *vpath_handle,
2333 u32 skip_alarms); 2377 u32 skip_alarms);
2334 2378
2335enum vxge_hw_status 2379void
2336vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, 2380vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2337 int *tim_msix_id, int alarm_msix_id); 2381 int *tim_msix_id, int alarm_msix_id);
2338 2382
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a754b7b8..5da7ab1fd307 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
17 17
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "6" 20#define VXGE_VERSION_FIX "8"
21#define VXGE_VERSION_BUILD "18937" 21#define VXGE_VERSION_BUILD "20182"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
index cd8cb95c5bd7..cf9e15fd8d91 100644
--- a/drivers/net/wan/cycx_x25.c
+++ b/drivers/net/wan/cycx_x25.c
@@ -634,11 +634,12 @@ static netdev_tx_t cycx_netdevice_hard_start_xmit(struct sk_buff *skb,
634 } 634 }
635 } else { /* chan->protocol == ETH_P_X25 */ 635 } else { /* chan->protocol == ETH_P_X25 */
636 switch (skb->data[0]) { 636 switch (skb->data[0]) {
637 case 0: break; 637 case X25_IFACE_DATA:
638 case 1: /* Connect request */ 638 break;
639 case X25_IFACE_CONNECT:
639 cycx_x25_chan_connect(dev); 640 cycx_x25_chan_connect(dev);
640 goto free_packet; 641 goto free_packet;
641 case 2: /* Disconnect request */ 642 case X25_IFACE_DISCONNECT:
642 cycx_x25_chan_disconnect(dev); 643 cycx_x25_chan_disconnect(dev);
643 goto free_packet; 644 goto free_packet;
644 default: 645 default:
@@ -1406,7 +1407,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1406 reset_timer(dev); 1407 reset_timer(dev);
1407 1408
1408 if (chan->protocol == ETH_P_X25) 1409 if (chan->protocol == ETH_P_X25)
1409 cycx_x25_chan_send_event(dev, 1); 1410 cycx_x25_chan_send_event(dev,
1411 X25_IFACE_CONNECT);
1410 1412
1411 break; 1413 break;
1412 case WAN_CONNECTING: 1414 case WAN_CONNECTING:
@@ -1424,7 +1426,8 @@ static void cycx_x25_set_chan_state(struct net_device *dev, u8 state)
1424 } 1426 }
1425 1427
1426 if (chan->protocol == ETH_P_X25) 1428 if (chan->protocol == ETH_P_X25)
1427 cycx_x25_chan_send_event(dev, 2); 1429 cycx_x25_chan_send_event(dev,
1430 X25_IFACE_DISCONNECT);
1428 1431
1429 netif_wake_queue(dev); 1432 netif_wake_queue(dev);
1430 break; 1433 break;
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index a4859f7a7cc0..d45b08d1dbc9 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1175,8 +1175,6 @@ static netdev_tx_t dscc4_start_xmit(struct sk_buff *skb,
1175 spin_unlock(&dpriv->lock); 1175 spin_unlock(&dpriv->lock);
1176#endif 1176#endif
1177 1177
1178 dev->trans_start = jiffies;
1179
1180 if (debug > 2) 1178 if (debug > 2)
1181 dscc4_tx_print(dev, dpriv, "Xmit"); 1179 dscc4_tx_print(dev, dpriv, "Xmit");
1182 /* To be cleaned(unsigned int)/optimized. Later, ok ? */ 1180 /* To be cleaned(unsigned int)/optimized. Later, ok ? */
diff --git a/drivers/net/wan/hd64570.c b/drivers/net/wan/hd64570.c
index 4dde2ea4a189..a3ea27ce04f2 100644
--- a/drivers/net/wan/hd64570.c
+++ b/drivers/net/wan/hd64570.c
@@ -658,7 +658,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
658#endif 658#endif
659 writew(len, &desc->len); 659 writew(len, &desc->len);
660 writeb(ST_TX_EOM, &desc->stat); 660 writeb(ST_TX_EOM, &desc->stat);
661 dev->trans_start = jiffies;
662 661
663 port->txin = next_desc(port, port->txin, 1); 662 port->txin = next_desc(port, port->txin, 1);
664 sca_outw(desc_offset(port, port->txin, 1), 663 sca_outw(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index aad9ed45c254..ea476cbd38b5 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -585,7 +585,6 @@ static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
585 585
586 writew(len, &desc->len); 586 writew(len, &desc->len);
587 writeb(ST_TX_EOM, &desc->stat); 587 writeb(ST_TX_EOM, &desc->stat);
588 dev->trans_start = jiffies;
589 588
590 port->txin = (port->txin + 1) % card->tx_ring_buffers; 589 port->txin = (port->txin + 1) % card->tx_ring_buffers;
591 sca_outl(desc_offset(port, port->txin, 1), 590 sca_outl(desc_offset(port, port->txin, 1),
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c7adbb79f7cc..70527e5a54a2 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -49,14 +49,14 @@ static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
49 49
50static void x25_connected(struct net_device *dev, int reason) 50static void x25_connected(struct net_device *dev, int reason)
51{ 51{
52 x25_connect_disconnect(dev, reason, 1); 52 x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
53} 53}
54 54
55 55
56 56
57static void x25_disconnected(struct net_device *dev, int reason) 57static void x25_disconnected(struct net_device *dev, int reason)
58{ 58{
59 x25_connect_disconnect(dev, reason, 2); 59 x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
60} 60}
61 61
62 62
@@ -71,7 +71,7 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
71 return NET_RX_DROP; 71 return NET_RX_DROP;
72 72
73 ptr = skb->data; 73 ptr = skb->data;
74 *ptr = 0; 74 *ptr = X25_IFACE_DATA;
75 75
76 skb->protocol = x25_type_trans(skb, dev); 76 skb->protocol = x25_type_trans(skb, dev);
77 return netif_rx(skb); 77 return netif_rx(skb);
@@ -94,13 +94,13 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
94 94
95 /* X.25 to LAPB */ 95 /* X.25 to LAPB */
96 switch (skb->data[0]) { 96 switch (skb->data[0]) {
97 case 0: /* Data to be transmitted */ 97 case X25_IFACE_DATA: /* Data to be transmitted */
98 skb_pull(skb, 1); 98 skb_pull(skb, 1);
99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK) 99 if ((result = lapb_data_request(dev, skb)) != LAPB_OK)
100 dev_kfree_skb(skb); 100 dev_kfree_skb(skb);
101 return NETDEV_TX_OK; 101 return NETDEV_TX_OK;
102 102
103 case 1: 103 case X25_IFACE_CONNECT:
104 if ((result = lapb_connect_request(dev))!= LAPB_OK) { 104 if ((result = lapb_connect_request(dev))!= LAPB_OK) {
105 if (result == LAPB_CONNECTED) 105 if (result == LAPB_CONNECTED)
106 /* Send connect confirm. msg to level 3 */ 106 /* Send connect confirm. msg to level 3 */
@@ -112,7 +112,7 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
112 } 112 }
113 break; 113 break;
114 114
115 case 2: 115 case X25_IFACE_DISCONNECT:
116 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) { 116 if ((result = lapb_disconnect_request(dev)) != LAPB_OK) {
117 if (result == LAPB_NOTCONNECTED) 117 if (result == LAPB_NOTCONNECTED)
118 /* Send disconnect confirm. msg to level 3 */ 118 /* Send disconnect confirm. msg to level 3 */
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index 0c2cdde686a0..88e363033e23 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -891,7 +891,6 @@ static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
891 891
892 wmb(); 892 wmb();
893 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); 893 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc);
894 dev->trans_start = jiffies;
895 894
896 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */ 895 if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
897#if DEBUG_TX 896#if DEBUG_TX
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 98e2f99903d7..4d4dc38c7290 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -139,7 +139,7 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
139 return NET_RX_DROP; 139 return NET_RX_DROP;
140 140
141 ptr = skb->data; 141 ptr = skb->data;
142 *ptr = 0x00; 142 *ptr = X25_IFACE_DATA;
143 143
144 skb->protocol = x25_type_trans(skb, dev); 144 skb->protocol = x25_type_trans(skb, dev);
145 return netif_rx(skb); 145 return netif_rx(skb);
@@ -161,14 +161,14 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
161 goto drop; 161 goto drop;
162 162
163 switch (skb->data[0]) { 163 switch (skb->data[0]) {
164 case 0x00: 164 case X25_IFACE_DATA:
165 break; 165 break;
166 case 0x01: 166 case X25_IFACE_CONNECT:
167 if ((err = lapb_connect_request(dev)) != LAPB_OK) 167 if ((err = lapb_connect_request(dev)) != LAPB_OK)
168 printk(KERN_ERR "lapbeth: lapb_connect_request " 168 printk(KERN_ERR "lapbeth: lapb_connect_request "
169 "error: %d\n", err); 169 "error: %d\n", err);
170 goto drop; 170 goto drop;
171 case 0x02: 171 case X25_IFACE_DISCONNECT:
172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK) 172 if ((err = lapb_disconnect_request(dev)) != LAPB_OK)
173 printk(KERN_ERR "lapbeth: lapb_disconnect_request " 173 printk(KERN_ERR "lapbeth: lapb_disconnect_request "
174 "err: %d\n", err); 174 "err: %d\n", err);
@@ -225,7 +225,7 @@ static void lapbeth_connected(struct net_device *dev, int reason)
225 } 225 }
226 226
227 ptr = skb_put(skb, 1); 227 ptr = skb_put(skb, 1);
228 *ptr = 0x01; 228 *ptr = X25_IFACE_CONNECT;
229 229
230 skb->protocol = x25_type_trans(skb, dev); 230 skb->protocol = x25_type_trans(skb, dev);
231 netif_rx(skb); 231 netif_rx(skb);
@@ -242,7 +242,7 @@ static void lapbeth_disconnected(struct net_device *dev, int reason)
242 } 242 }
243 243
244 ptr = skb_put(skb, 1); 244 ptr = skb_put(skb, 1);
245 *ptr = 0x02; 245 *ptr = X25_IFACE_DISCONNECT;
246 246
247 skb->protocol = x25_type_trans(skb, dev); 247 skb->protocol = x25_type_trans(skb, dev);
248 netif_rx(skb); 248 netif_rx(skb);
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index b27850377121..e2c6f7f4f51c 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1506,8 +1506,6 @@ static netdev_tx_t lmc_start_xmit(struct sk_buff *skb,
1506 /* send now! */ 1506 /* send now! */
1507 LMC_CSR_WRITE (sc, csr_txpoll, 0); 1507 LMC_CSR_WRITE (sc, csr_txpoll, 0);
1508 1508
1509 dev->trans_start = jiffies;
1510
1511 spin_unlock_irqrestore(&sc->lmc_lock, flags); 1509 spin_unlock_irqrestore(&sc->lmc_lock, flags);
1512 1510
1513 lmc_trace(dev, "lmc_start_xmit_out"); 1511 lmc_trace(dev, "lmc_start_xmit_out");
@@ -2103,7 +2101,7 @@ static void lmc_driver_timeout(struct net_device *dev)
2103 printk("%s: Xmitter busy|\n", dev->name); 2101 printk("%s: Xmitter busy|\n", dev->name);
2104 2102
2105 sc->extra_stats.tx_tbusy_calls++; 2103 sc->extra_stats.tx_tbusy_calls++;
2106 if (jiffies - dev->trans_start < TX_TIMEOUT) 2104 if (jiffies - dev_trans_start(dev) < TX_TIMEOUT)
2107 goto bug_out; 2105 goto bug_out;
2108 2106
2109 /* 2107 /*
@@ -2135,7 +2133,7 @@ static void lmc_driver_timeout(struct net_device *dev)
2135 sc->lmc_device->stats.tx_errors++; 2133 sc->lmc_device->stats.tx_errors++;
2136 sc->extra_stats.tx_ProcTimeout++; /* -baz */ 2134 sc->extra_stats.tx_ProcTimeout++; /* -baz */
2137 2135
2138 dev->trans_start = jiffies; 2136 dev->trans_start = jiffies; /* prevent tx timeout */
2139 2137
2140bug_out: 2138bug_out:
2141 2139
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c643094..c6aa66e5b52f 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
396 u16 next_bd = card->chan[ch].tx_next_bd; 396 u16 next_bd = card->chan[ch].tx_next_bd;
397 u32 scabase = card->hw.scabase; 397 u32 scabase = card->hw.scabase;
398 398
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 399 printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, 400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
401 first_bd, TX_BD_ADDR(ch, first_bd), 401 first_bd, TX_BD_ADDR(ch, first_bd),
402 next_bd, TX_BD_ADDR(ch, next_bd)); 402 next_bd, TX_BD_ADDR(ch, next_bd));
@@ -1790,7 +1790,7 @@ static void cpc_tx_timeout(struct net_device *dev)
1790 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) & 1790 cpc_readb(card->hw.falcbase + card->hw.cpld_reg2) &
1791 ~(CPLD_REG2_FALC_LED1 << (2 * ch))); 1791 ~(CPLD_REG2_FALC_LED1 << (2 * ch)));
1792 } 1792 }
1793 dev->trans_start = jiffies; 1793 dev->trans_start = jiffies; /* prevent tx timeout */
1794 CPC_UNLOCK(card, flags); 1794 CPC_UNLOCK(card, flags);
1795 netif_wake_queue(dev); 1795 netif_wake_queue(dev);
1796} 1796}
@@ -1849,7 +1849,6 @@ static int cpc_queue_xmit(struct sk_buff *skb, struct net_device *dev)
1849 if (d->trace_on) { 1849 if (d->trace_on) {
1850 cpc_trace(dev, skb, 'T'); 1850 cpc_trace(dev, skb, 'T');
1851 } 1851 }
1852 dev->trans_start = jiffies;
1853 1852
1854 /* Start transmission */ 1853 /* Start transmission */
1855 CPC_LOCK(card, flags); 1854 CPC_LOCK(card, flags);
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94943bd..4293889e287e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
366 int res; 366 int res;
367 367
368 if (!tty || !tty->driver_data ) { 368 if (!tty || !tty->driver_data ) {
369 CPC_TTY_DBG("hdlx-tty: no TTY in close \n"); 369 CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
370 return; 370 return;
371 } 371 }
372 372
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af2246d..43ae6f440bfb 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1352 return(-EINVAL); 1352 return(-EINVAL);
1353 1353
1354 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ 1354 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
1355 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr); 1355 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
1356 return(-EINVAL); 1356 return(-EINVAL);
1357 } 1357 }
1358 base = map->base_addr; 1358 base = map->base_addr;
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index 541c700dceef..db73a7be199f 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -298,7 +298,6 @@ static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
298 desc->stat = PACKET_FULL; 298 desc->stat = PACKET_FULL;
299 writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node), 299 writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
300 port->card->plx + PLX_DOORBELL_TO_CARD); 300 port->card->plx + PLX_DOORBELL_TO_CARD);
301 dev->trans_start = jiffies;
302 301
303 port->tx_out = (port->tx_out + 1) % TX_BUFFERS; 302 port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
304 303
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 80d5c5834a0b..166e77dfffda 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -29,12 +29,12 @@
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <linux/skbuff.h> 30#include <linux/skbuff.h>
31#include <linux/if_arp.h> 31#include <linux/if_arp.h>
32#include <linux/x25.h>
33#include <linux/lapb.h> 32#include <linux/lapb.h>
34#include <linux/init.h> 33#include <linux/init.h>
35#include <linux/rtnetlink.h> 34#include <linux/rtnetlink.h>
36#include <linux/compat.h> 35#include <linux/compat.h>
37#include <linux/slab.h> 36#include <linux/slab.h>
37#include <net/x25device.h>
38#include "x25_asy.h" 38#include "x25_asy.h"
39 39
40#include <net/x25device.h> 40#include <net/x25device.h>
@@ -315,15 +315,15 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
315 } 315 }
316 316
317 switch (skb->data[0]) { 317 switch (skb->data[0]) {
318 case 0x00: 318 case X25_IFACE_DATA:
319 break; 319 break;
320 case 0x01: /* Connection request .. do nothing */ 320 case X25_IFACE_CONNECT: /* Connection request .. do nothing */
321 err = lapb_connect_request(dev); 321 err = lapb_connect_request(dev);
322 if (err != LAPB_OK) 322 if (err != LAPB_OK)
323 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err); 323 printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
324 kfree_skb(skb); 324 kfree_skb(skb);
325 return NETDEV_TX_OK; 325 return NETDEV_TX_OK;
326 case 0x02: /* Disconnect request .. do nothing - hang up ?? */ 326 case X25_IFACE_DISCONNECT: /* do nothing - hang up ?? */
327 err = lapb_disconnect_request(dev); 327 err = lapb_disconnect_request(dev);
328 if (err != LAPB_OK) 328 if (err != LAPB_OK)
329 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err); 329 printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
@@ -411,7 +411,7 @@ static void x25_asy_connected(struct net_device *dev, int reason)
411 } 411 }
412 412
413 ptr = skb_put(skb, 1); 413 ptr = skb_put(skb, 1);
414 *ptr = 0x01; 414 *ptr = X25_IFACE_CONNECT;
415 415
416 skb->protocol = x25_type_trans(skb, sl->dev); 416 skb->protocol = x25_type_trans(skb, sl->dev);
417 netif_rx(skb); 417 netif_rx(skb);
@@ -430,7 +430,7 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
430 } 430 }
431 431
432 ptr = skb_put(skb, 1); 432 ptr = skb_put(skb, 1);
433 *ptr = 0x02; 433 *ptr = X25_IFACE_DISCONNECT;
434 434
435 skb->protocol = x25_type_trans(skb, sl->dev); 435 skb->protocol = x25_type_trans(skb, sl->dev);
436 netif_rx(skb); 436 netif_rx(skb);
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index d8322d2d1e29..746a5ee32f33 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -395,7 +395,6 @@ wd_reset_8390(struct net_device *dev)
395 outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5); 395 outb(NIC16 | ((dev->mem_start>>19) & 0x1f), wd_cmd_port+WD_CMDREG5);
396 396
397 if (ei_debug > 1) printk("reset done\n"); 397 if (ei_debug > 1) printk("reset done\n");
398 return;
399} 398}
400 399
401/* Grab the 8390 specific header. Similar to the block_input routine, but 400/* Grab the 8390 specific header. Similar to the block_input routine, but
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 6180772dcc09..d86e8f31e7fc 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -83,6 +83,21 @@
83#define D_SUBMODULE control 83#define D_SUBMODULE control
84#include "debug-levels.h" 84#include "debug-levels.h"
85 85
86static int i2400m_idle_mode_disabled;/* 0 (idle mode enabled) by default */
87module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
88MODULE_PARM_DESC(idle_mode_disabled,
89 "If true, the device will not enable idle mode negotiation "
90 "with the base station (when connected) to save power.");
91
92/* 0 (power saving enabled) by default */
93static int i2400m_power_save_disabled;
94module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
95MODULE_PARM_DESC(power_save_disabled,
96 "If true, the driver will not tell the device to enter "
97 "power saving mode when it reports it is ready for it. "
98 "False by default (so the device is told to do power "
99 "saving).");
100
86int i2400m_passive_mode; /* 0 (passive mode disabled) by default */ 101int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
87module_param_named(passive_mode, i2400m_passive_mode, int, 0644); 102module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
88MODULE_PARM_DESC(passive_mode, 103MODULE_PARM_DESC(passive_mode,
@@ -346,7 +361,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
346 i2400m_state); 361 i2400m_state);
347 i2400m_reset(i2400m, I2400M_RT_WARM); 362 i2400m_reset(i2400m, I2400M_RT_WARM);
348 break; 363 break;
349 }; 364 }
350 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", 365 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
351 i2400m, ss, i2400m_state); 366 i2400m, ss, i2400m_state);
352} 367}
@@ -395,7 +410,7 @@ void i2400m_report_tlv_media_status(struct i2400m *i2400m,
395 default: 410 default:
396 dev_err(dev, "HW BUG? unknown media status %u\n", 411 dev_err(dev, "HW BUG? unknown media status %u\n",
397 status); 412 status);
398 }; 413 }
399 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n", 414 d_fnend(3, dev, "(i2400m %p ms %p [%u]) = void\n",
400 i2400m, ms, status); 415 i2400m, ms, status);
401} 416}
@@ -524,7 +539,7 @@ void i2400m_report_hook(struct i2400m *i2400m,
524 } 539 }
525 } 540 }
526 break; 541 break;
527 }; 542 }
528 d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n", 543 d_fnend(3, dev, "(i2400m %p l3l4_hdr %p size %zu) = void\n",
529 i2400m, l3l4_hdr, size); 544 i2400m, l3l4_hdr, size);
530} 545}
@@ -567,8 +582,7 @@ void i2400m_msg_ack_hook(struct i2400m *i2400m,
567 size); 582 size);
568 } 583 }
569 break; 584 break;
570 }; 585 }
571 return;
572} 586}
573 587
574 588
@@ -740,7 +754,7 @@ struct sk_buff *i2400m_msg_to_dev(struct i2400m *i2400m,
740 break; 754 break;
741 default: 755 default:
742 ack_timeout = HZ; 756 ack_timeout = HZ;
743 }; 757 }
744 758
745 if (unlikely(i2400m->trace_msg_from_user)) 759 if (unlikely(i2400m->trace_msg_from_user))
746 wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL); 760 wimax_msg(&i2400m->wimax_dev, "echo", buf, buf_len, GFP_KERNEL);
@@ -1419,5 +1433,4 @@ void i2400m_dev_shutdown(struct i2400m *i2400m)
1419 1433
1420 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 1434 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1421 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 1435 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
1422 return;
1423} 1436}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 94dc83c3969d..9c8b78d4abd2 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -75,25 +75,6 @@
75#include "debug-levels.h" 75#include "debug-levels.h"
76 76
77 77
78int i2400m_idle_mode_disabled; /* 0 (idle mode enabled) by default */
79module_param_named(idle_mode_disabled, i2400m_idle_mode_disabled, int, 0644);
80MODULE_PARM_DESC(idle_mode_disabled,
81 "If true, the device will not enable idle mode negotiation "
82 "with the base station (when connected) to save power.");
83
84int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
85module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
86MODULE_PARM_DESC(rx_reorder_disabled,
87 "If true, RX reordering will be disabled.");
88
89int i2400m_power_save_disabled; /* 0 (power saving enabled) by default */
90module_param_named(power_save_disabled, i2400m_power_save_disabled, int, 0644);
91MODULE_PARM_DESC(power_save_disabled,
92 "If true, the driver will not tell the device to enter "
93 "power saving mode when it reports it is ready for it. "
94 "False by default (so the device is told to do power "
95 "saving).");
96
97static char i2400m_debug_params[128]; 78static char i2400m_debug_params[128];
98module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params), 79module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
99 0644); 80 0644);
@@ -395,6 +376,16 @@ retry:
395 result = i2400m_dev_initialize(i2400m); 376 result = i2400m_dev_initialize(i2400m);
396 if (result < 0) 377 if (result < 0)
397 goto error_dev_initialize; 378 goto error_dev_initialize;
379
380 /* We don't want any additional unwanted error recovery triggered
381 * from any other context so if anything went wrong before we come
382 * here, let's keep i2400m->error_recovery untouched and leave it to
383 * dev_reset_handle(). See dev_reset_handle(). */
384
385 atomic_dec(&i2400m->error_recovery);
386 /* Every thing works so far, ok, now we are ready to
387 * take error recovery if it's required. */
388
398 /* At this point, reports will come for the device and set it 389 /* At this point, reports will come for the device and set it
399 * to the right state if it is different than UNINITIALIZED */ 390 * to the right state if it is different than UNINITIALIZED */
400 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", 391 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
@@ -403,10 +394,10 @@ retry:
403 394
404error_dev_initialize: 395error_dev_initialize:
405error_check_mac_addr: 396error_check_mac_addr:
397error_fw_check:
406 i2400m->ready = 0; 398 i2400m->ready = 0;
407 wmb(); /* see i2400m->ready's documentation */ 399 wmb(); /* see i2400m->ready's documentation */
408 flush_workqueue(i2400m->work_queue); 400 flush_workqueue(i2400m->work_queue);
409error_fw_check:
410 if (i2400m->bus_dev_stop) 401 if (i2400m->bus_dev_stop)
411 i2400m->bus_dev_stop(i2400m); 402 i2400m->bus_dev_stop(i2400m);
412error_bus_dev_start: 403error_bus_dev_start:
@@ -436,7 +427,8 @@ int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
436 result = __i2400m_dev_start(i2400m, bm_flags); 427 result = __i2400m_dev_start(i2400m, bm_flags);
437 if (result >= 0) { 428 if (result >= 0) {
438 i2400m->updown = 1; 429 i2400m->updown = 1;
439 wmb(); /* see i2400m->updown's documentation */ 430 i2400m->alive = 1;
431 wmb();/* see i2400m->updown and i2400m->alive's doc */
440 } 432 }
441 } 433 }
442 mutex_unlock(&i2400m->init_mutex); 434 mutex_unlock(&i2400m->init_mutex);
@@ -497,7 +489,8 @@ void i2400m_dev_stop(struct i2400m *i2400m)
497 if (i2400m->updown) { 489 if (i2400m->updown) {
498 __i2400m_dev_stop(i2400m); 490 __i2400m_dev_stop(i2400m);
499 i2400m->updown = 0; 491 i2400m->updown = 0;
500 wmb(); /* see i2400m->updown's documentation */ 492 i2400m->alive = 0;
493 wmb(); /* see i2400m->updown and i2400m->alive's doc */
501 } 494 }
502 mutex_unlock(&i2400m->init_mutex); 495 mutex_unlock(&i2400m->init_mutex);
503} 496}
@@ -617,12 +610,12 @@ int i2400m_post_reset(struct i2400m *i2400m)
617error_dev_start: 610error_dev_start:
618 if (i2400m->bus_release) 611 if (i2400m->bus_release)
619 i2400m->bus_release(i2400m); 612 i2400m->bus_release(i2400m);
620error_bus_setup:
621 /* even if the device was up, it could not be recovered, so we 613 /* even if the device was up, it could not be recovered, so we
622 * mark it as down. */ 614 * mark it as down. */
623 i2400m->updown = 0; 615 i2400m->updown = 0;
624 wmb(); /* see i2400m->updown's documentation */ 616 wmb(); /* see i2400m->updown's documentation */
625 mutex_unlock(&i2400m->init_mutex); 617 mutex_unlock(&i2400m->init_mutex);
618error_bus_setup:
626 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 619 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
627 return result; 620 return result;
628} 621}
@@ -669,6 +662,9 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
669 662
670 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); 663 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
671 664
665 i2400m->boot_mode = 1;
666 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
667
672 result = 0; 668 result = 0;
673 if (mutex_trylock(&i2400m->init_mutex) == 0) { 669 if (mutex_trylock(&i2400m->init_mutex) == 0) {
674 /* We are still in i2400m_dev_start() [let it fail] or 670 /* We are still in i2400m_dev_start() [let it fail] or
@@ -679,39 +675,68 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
679 complete(&i2400m->msg_completion); 675 complete(&i2400m->msg_completion);
680 goto out; 676 goto out;
681 } 677 }
682 if (i2400m->updown == 0) { 678
683 dev_info(dev, "%s: device is down, doing nothing\n", reason);
684 goto out_unlock;
685 }
686 dev_err(dev, "%s: reinitializing driver\n", reason); 679 dev_err(dev, "%s: reinitializing driver\n", reason);
687 __i2400m_dev_stop(i2400m); 680 rmb();
688 result = __i2400m_dev_start(i2400m, 681 if (i2400m->updown) {
689 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); 682 __i2400m_dev_stop(i2400m);
690 if (result < 0) {
691 i2400m->updown = 0; 683 i2400m->updown = 0;
692 wmb(); /* see i2400m->updown's documentation */ 684 wmb(); /* see i2400m->updown's documentation */
693 dev_err(dev, "%s: cannot start the device: %d\n",
694 reason, result);
695 result = -EUCLEAN;
696 } 685 }
697out_unlock: 686
687 if (i2400m->alive) {
688 result = __i2400m_dev_start(i2400m,
689 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
690 if (result < 0) {
691 dev_err(dev, "%s: cannot start the device: %d\n",
692 reason, result);
693 result = -EUCLEAN;
694 if (atomic_read(&i2400m->bus_reset_retries)
695 >= I2400M_BUS_RESET_RETRIES) {
696 result = -ENODEV;
697 dev_err(dev, "tried too many times to "
698 "reset the device, giving up\n");
699 }
700 }
701 }
702
698 if (i2400m->reset_ctx) { 703 if (i2400m->reset_ctx) {
699 ctx->result = result; 704 ctx->result = result;
700 complete(&ctx->completion); 705 complete(&ctx->completion);
701 } 706 }
702 mutex_unlock(&i2400m->init_mutex); 707 mutex_unlock(&i2400m->init_mutex);
703 if (result == -EUCLEAN) { 708 if (result == -EUCLEAN) {
709 /*
710 * We come here because the reset during operational mode
711 * wasn't successully done and need to proceed to a bus
712 * reset. For the dev_reset_handle() to be able to handle
713 * the reset event later properly, we restore boot_mode back
714 * to the state before previous reset. ie: just like we are
715 * issuing the bus reset for the first time
716 */
717 i2400m->boot_mode = 0;
718 wmb();
719
720 atomic_inc(&i2400m->bus_reset_retries);
704 /* ops, need to clean up [w/ init_mutex not held] */ 721 /* ops, need to clean up [w/ init_mutex not held] */
705 result = i2400m_reset(i2400m, I2400M_RT_BUS); 722 result = i2400m_reset(i2400m, I2400M_RT_BUS);
706 if (result >= 0) 723 if (result >= 0)
707 result = -ENODEV; 724 result = -ENODEV;
725 } else {
726 rmb();
727 if (i2400m->alive) {
728 /* great, we expect the device state up and
729 * dev_start() actually brings the device state up */
730 i2400m->updown = 1;
731 wmb();
732 atomic_set(&i2400m->bus_reset_retries, 0);
733 }
708 } 734 }
709out: 735out:
710 i2400m_put(i2400m); 736 i2400m_put(i2400m);
711 kfree(iw); 737 kfree(iw);
712 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", 738 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
713 ws, i2400m, reason); 739 ws, i2400m, reason);
714 return;
715} 740}
716 741
717 742
@@ -729,14 +754,72 @@ out:
729 */ 754 */
730int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) 755int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
731{ 756{
732 i2400m->boot_mode = 1;
733 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
734 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 757 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
735 GFP_ATOMIC, &reason, sizeof(reason)); 758 GFP_ATOMIC, &reason, sizeof(reason));
736} 759}
737EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 760EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
738 761
739 762
763 /*
764 * The actual work of error recovery.
765 *
766 * The current implementation of error recovery is to trigger a bus reset.
767 */
768static
769void __i2400m_error_recovery(struct work_struct *ws)
770{
771 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
772 struct i2400m *i2400m = iw->i2400m;
773
774 i2400m_reset(i2400m, I2400M_RT_BUS);
775
776 i2400m_put(i2400m);
777 kfree(iw);
778 return;
779}
780
781/*
782 * Schedule a work struct for error recovery.
783 *
784 * The intention of error recovery is to bring back the device to some
785 * known state whenever TX sees -110 (-ETIMEOUT) on copying the data to
786 * the device. The TX failure could mean a device bus stuck, so the current
787 * error recovery implementation is to trigger a bus reset to the device
788 * and hopefully it can bring back the device.
789 *
790 * The actual work of error recovery has to be in a thread context because
791 * it is kicked off in the TX thread (i2400ms->tx_workqueue) which is to be
792 * destroyed by the error recovery mechanism (currently a bus reset).
793 *
794 * Also, there may be already a queue of TX works that all hit
795 * the -ETIMEOUT error condition because the device is stuck already.
796 * Since bus reset is used as the error recovery mechanism and we don't
797 * want consecutive bus resets simply because the multiple TX works
798 * in the queue all hit the same device erratum, the flag "error_recovery"
799 * is introduced for preventing unwanted consecutive bus resets.
800 *
801 * Error recovery shall only be invoked again if previous one was completed.
802 * The flag error_recovery is set when error recovery mechanism is scheduled,
803 * and is checked when we need to schedule another error recovery. If it is
804 * in place already, then we shouldn't schedule another one.
805 */
806void i2400m_error_recovery(struct i2400m *i2400m)
807{
808 struct device *dev = i2400m_dev(i2400m);
809
810 if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
811 if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
812 GFP_ATOMIC, NULL, 0) < 0) {
813 dev_err(dev, "run out of memory for "
814 "scheduling an error recovery ?\n");
815 atomic_dec(&i2400m->error_recovery);
816 }
817 } else
818 atomic_dec(&i2400m->error_recovery);
819 return;
820}
821EXPORT_SYMBOL_GPL(i2400m_error_recovery);
822
740/* 823/*
741 * Alloc the command and ack buffers for boot mode 824 * Alloc the command and ack buffers for boot mode
742 * 825 *
@@ -803,6 +886,13 @@ void i2400m_init(struct i2400m *i2400m)
803 886
804 mutex_init(&i2400m->init_mutex); 887 mutex_init(&i2400m->init_mutex);
805 /* wake_tx_ws is initialized in i2400m_tx_setup() */ 888 /* wake_tx_ws is initialized in i2400m_tx_setup() */
889 atomic_set(&i2400m->bus_reset_retries, 0);
890
891 i2400m->alive = 0;
892
893 /* initialize error_recovery to 1 for denoting we
894 * are not yet ready to take any error recovery */
895 atomic_set(&i2400m->error_recovery, 1);
806} 896}
807EXPORT_SYMBOL_GPL(i2400m_init); 897EXPORT_SYMBOL_GPL(i2400m_init);
808 898
@@ -996,7 +1086,6 @@ void __exit i2400m_driver_exit(void)
996 /* for scheds i2400m_dev_reset_handle() */ 1086 /* for scheds i2400m_dev_reset_handle() */
997 flush_scheduled_work(); 1087 flush_scheduled_work();
998 i2400m_barker_db_exit(); 1088 i2400m_barker_db_exit();
999 return;
1000} 1089}
1001module_exit(i2400m_driver_exit); 1090module_exit(i2400m_driver_exit);
1002 1091
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index b9c4bed3b457..360d4fb195f4 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -99,7 +99,10 @@ enum {
99 * 99 *
100 * @tx_workqueue: workqeueue used for data TX; we don't use the 100 * @tx_workqueue: workqeueue used for data TX; we don't use the
101 * system's workqueue as that might cause deadlocks with code in 101 * system's workqueue as that might cause deadlocks with code in
102 * the bus-generic driver. 102 * the bus-generic driver. The read/write operation to the queue
103 * is protected with spinlock (tx_lock in struct i2400m) to avoid
104 * the queue being destroyed in the middle of a the queue read/write
105 * operation.
103 * 106 *
104 * @debugfs_dentry: dentry for the SDIO specific debugfs files 107 * @debugfs_dentry: dentry for the SDIO specific debugfs files
105 * 108 *
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 820b128705ec..fa74777fd65f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -160,6 +160,16 @@
160#include <linux/wimax/i2400m.h> 160#include <linux/wimax/i2400m.h>
161#include <asm/byteorder.h> 161#include <asm/byteorder.h>
162 162
163enum {
164/* netdev interface */
165 /*
166 * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
167 *
168 * The MTU is 1400 or less
169 */
170 I2400M_MAX_MTU = 1400,
171};
172
163/* Misc constants */ 173/* Misc constants */
164enum { 174enum {
165 /* Size of the Boot Mode Command buffer */ 175 /* Size of the Boot Mode Command buffer */
@@ -167,6 +177,11 @@ enum {
167 I2400M_BM_ACK_BUF_SIZE = 256, 177 I2400M_BM_ACK_BUF_SIZE = 256,
168}; 178};
169 179
180enum {
181 /* Maximum number of bus reset can be retried */
182 I2400M_BUS_RESET_RETRIES = 3,
183};
184
170/** 185/**
171 * struct i2400m_poke_table - Hardware poke table for the Intel 2400m 186 * struct i2400m_poke_table - Hardware poke table for the Intel 2400m
172 * 187 *
@@ -227,6 +242,11 @@ struct i2400m_barker_db;
227 * so we have a tx_blk_size variable that the bus layer sets to 242 * so we have a tx_blk_size variable that the bus layer sets to
228 * tell the engine how much of that we need. 243 * tell the engine how much of that we need.
229 * 244 *
245 * @bus_tx_room_min: [fill] Minimum room required while allocating
246 * TX queue's buffer space for message header. SDIO requires
247 * 224 bytes and USB 16 bytes. Refer bus specific driver code
248 * for details.
249 *
230 * @bus_pl_size_max: [fill] Maximum payload size. 250 * @bus_pl_size_max: [fill] Maximum payload size.
231 * 251 *
232 * @bus_setup: [optional fill] Function called by the bus-generic code 252 * @bus_setup: [optional fill] Function called by the bus-generic code
@@ -397,7 +417,7 @@ struct i2400m_barker_db;
397 * 417 *
398 * @tx_size_max: biggest TX message sent. 418 * @tx_size_max: biggest TX message sent.
399 * 419 *
400 * @rx_lock: spinlock to protect RX members 420 * @rx_lock: spinlock to protect RX members and rx_roq_refcount.
401 * 421 *
402 * @rx_pl_num: total number of payloads received 422 * @rx_pl_num: total number of payloads received
403 * 423 *
@@ -421,6 +441,10 @@ struct i2400m_barker_db;
421 * delivered. Then the driver can release them to the host. See 441 * delivered. Then the driver can release them to the host. See
422 * drivers/net/i2400m/rx.c for details. 442 * drivers/net/i2400m/rx.c for details.
423 * 443 *
444 * @rx_roq_refcount: refcount rx_roq. This refcounts any access to
445 * rx_roq thus preventing rx_roq being destroyed when rx_roq
446 * is being accessed. rx_roq_refcount is protected by rx_lock.
447 *
424 * @rx_reports: reports received from the device that couldn't be 448 * @rx_reports: reports received from the device that couldn't be
425 * processed because the driver wasn't still ready; when ready, 449 * processed because the driver wasn't still ready; when ready,
426 * they are pulled from here and chewed. 450 * they are pulled from here and chewed.
@@ -507,6 +531,38 @@ struct i2400m_barker_db;
507 * same. 531 * same.
508 * 532 *
509 * @pm_notifier: used to register for PM events 533 * @pm_notifier: used to register for PM events
534 *
535 * @bus_reset_retries: counter for the number of bus resets attempted for
536 * this boot. It's not for tracking the number of bus resets during
537 * the whole driver life cycle (from insmod to rmmod) but for the
538 * number of dev_start() executed until dev_start() returns a success
539 * (ie: a good boot means a dev_stop() followed by a successful
540 * dev_start()). dev_reset_handler() increments this counter whenever
541 * it is triggering a bus reset. It checks this counter to decide if a
542 * subsequent bus reset should be retried. dev_reset_handler() retries
543 * the bus reset until dev_start() succeeds or the counter reaches
544 * I2400M_BUS_RESET_RETRIES. The counter is cleared to 0 in
545 * dev_reset_handle() when dev_start() returns a success,
546 * ie: a successul boot is completed.
547 *
548 * @alive: flag to denote if the device *should* be alive. This flag is
549 * everything like @updown (see doc for @updown) except reflecting
550 * the device state *we expect* rather than the actual state as denoted
551 * by @updown. It is set 1 whenever @updown is set 1 in dev_start().
552 * Then the device is expected to be alive all the time
553 * (i2400m->alive remains 1) until the driver is removed. Therefore
554 * all the device reboot events detected can be still handled properly
555 * by either dev_reset_handle() or .pre_reset/.post_reset as long as
556 * the driver presents. It is set 0 along with @updown in dev_stop().
557 *
558 * @error_recovery: flag to denote if we are ready to take an error recovery.
559 * 0 for ready to take an error recovery; 1 for not ready. It is
560 * initialized to 1 while probe() since we don't tend to take any error
561 * recovery during probe(). It is decremented by 1 whenever dev_start()
562 * succeeds to indicate we are ready to take error recovery from now on.
563 * It is checked every time we wanna schedule an error recovery. If an
564 * error recovery is already in place (error_recovery was set 1), we
565 * should not schedule another one until the last one is done.
510 */ 566 */
511struct i2400m { 567struct i2400m {
512 struct wimax_dev wimax_dev; /* FIRST! See doc */ 568 struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -522,6 +578,7 @@ struct i2400m {
522 wait_queue_head_t state_wq; /* Woken up when on state updates */ 578 wait_queue_head_t state_wq; /* Woken up when on state updates */
523 579
524 size_t bus_tx_block_size; 580 size_t bus_tx_block_size;
581 size_t bus_tx_room_min;
525 size_t bus_pl_size_max; 582 size_t bus_pl_size_max;
526 unsigned bus_bm_retries; 583 unsigned bus_bm_retries;
527 584
@@ -550,10 +607,12 @@ struct i2400m {
550 tx_num, tx_size_acc, tx_size_min, tx_size_max; 607 tx_num, tx_size_acc, tx_size_min, tx_size_max;
551 608
552 /* RX stuff */ 609 /* RX stuff */
553 spinlock_t rx_lock; /* protect RX state */ 610 /* protect RX state and rx_roq_refcount */
611 spinlock_t rx_lock;
554 unsigned rx_pl_num, rx_pl_max, rx_pl_min, 612 unsigned rx_pl_num, rx_pl_max, rx_pl_min,
555 rx_num, rx_size_acc, rx_size_min, rx_size_max; 613 rx_num, rx_size_acc, rx_size_min, rx_size_max;
556 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 614 struct i2400m_roq *rx_roq; /* access is refcounted */
615 struct kref rx_roq_refcount; /* refcount access to rx_roq */
557 u8 src_mac_addr[ETH_HLEN]; 616 u8 src_mac_addr[ETH_HLEN];
558 struct list_head rx_reports; /* under rx_lock! */ 617 struct list_head rx_reports; /* under rx_lock! */
559 struct work_struct rx_report_ws; 618 struct work_struct rx_report_ws;
@@ -581,6 +640,16 @@ struct i2400m {
581 struct i2400m_barker_db *barker; 640 struct i2400m_barker_db *barker;
582 641
583 struct notifier_block pm_notifier; 642 struct notifier_block pm_notifier;
643
644 /* counting bus reset retries in this boot */
645 atomic_t bus_reset_retries;
646
647 /* if the device is expected to be alive */
648 unsigned alive;
649
650 /* 0 if we are ready for error recovery; 1 if not ready */
651 atomic_t error_recovery;
652
584}; 653};
585 654
586 655
@@ -803,6 +872,7 @@ void i2400m_put(struct i2400m *i2400m)
803extern int i2400m_dev_reset_handle(struct i2400m *, const char *); 872extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
804extern int i2400m_pre_reset(struct i2400m *); 873extern int i2400m_pre_reset(struct i2400m *);
805extern int i2400m_post_reset(struct i2400m *); 874extern int i2400m_post_reset(struct i2400m *);
875extern void i2400m_error_recovery(struct i2400m *);
806 876
807/* 877/*
808 * _setup()/_release() are called by the probe/disconnect functions of 878 * _setup()/_release() are called by the probe/disconnect functions of
@@ -815,7 +885,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
815extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); 885extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
816extern void i2400m_tx_msg_sent(struct i2400m *); 886extern void i2400m_tx_msg_sent(struct i2400m *);
817 887
818extern int i2400m_power_save_disabled;
819 888
820/* 889/*
821 * Utility functions 890 * Utility functions
@@ -922,10 +991,5 @@ extern int i2400m_barker_db_init(const char *);
922extern void i2400m_barker_db_exit(void); 991extern void i2400m_barker_db_exit(void);
923 992
924 993
925/* Module parameters */
926
927extern int i2400m_idle_mode_disabled;
928extern int i2400m_rx_reorder_disabled;
929
930 994
931#endif /* #ifndef __I2400M_H__ */ 995#endif /* #ifndef __I2400M_H__ */
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index b811c2f1f5e9..94742e1eafe0 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -84,17 +84,15 @@
84 84
85enum { 85enum {
86/* netdev interface */ 86/* netdev interface */
87 /*
88 * Out of NWG spec (R1_v1.2.2), 3.3.3 ASN Bearer Plane MTU Size
89 *
90 * The MTU is 1400 or less
91 */
92 I2400M_MAX_MTU = 1400,
93 /* 20 secs? yep, this is the maximum timeout that the device 87 /* 20 secs? yep, this is the maximum timeout that the device
94 * might take to get out of IDLE / negotiate it with the base 88 * might take to get out of IDLE / negotiate it with the base
95 * station. We add 1sec for good measure. */ 89 * station. We add 1sec for good measure. */
96 I2400M_TX_TIMEOUT = 21 * HZ, 90 I2400M_TX_TIMEOUT = 21 * HZ,
97 I2400M_TX_QLEN = 5, 91 /*
92 * Experimentation has determined that, 20 to be a good value
93 * for minimizing the jitter in the throughput.
94 */
95 I2400M_TX_QLEN = 20,
98}; 96};
99 97
100 98
@@ -255,7 +253,6 @@ void i2400m_net_wake_stop(struct i2400m *i2400m)
255 kfree_skb(wake_tx_skb); 253 kfree_skb(wake_tx_skb);
256 } 254 }
257 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 255 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
258 return;
259} 256}
260 257
261 258
@@ -434,7 +431,6 @@ void i2400m_tx_timeout(struct net_device *net_dev)
434 * this, there might be data pending to be sent or not... 431 * this, there might be data pending to be sent or not...
435 */ 432 */
436 net_dev->stats.tx_errors++; 433 net_dev->stats.tx_errors++;
437 return;
438} 434}
439 435
440 436
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index fa2e11e5b4b9..6537593fae66 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -155,6 +155,11 @@
155#define D_SUBMODULE rx 155#define D_SUBMODULE rx
156#include "debug-levels.h" 156#include "debug-levels.h"
157 157
158static int i2400m_rx_reorder_disabled; /* 0 (rx reorder enabled) by default */
159module_param_named(rx_reorder_disabled, i2400m_rx_reorder_disabled, int, 0644);
160MODULE_PARM_DESC(rx_reorder_disabled,
161 "If true, RX reordering will be disabled.");
162
158struct i2400m_report_hook_args { 163struct i2400m_report_hook_args {
159 struct sk_buff *skb_rx; 164 struct sk_buff *skb_rx;
160 const struct i2400m_l3l4_hdr *l3l4_hdr; 165 const struct i2400m_l3l4_hdr *l3l4_hdr;
@@ -300,20 +305,18 @@ void i2400m_rx_ctl_ack(struct i2400m *i2400m,
300 d_printf(1, dev, "Huh? waiter for command reply cancelled\n"); 305 d_printf(1, dev, "Huh? waiter for command reply cancelled\n");
301 goto error_waiter_cancelled; 306 goto error_waiter_cancelled;
302 } 307 }
303 if (ack_skb == NULL) { 308 if (IS_ERR(ack_skb))
304 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n"); 309 dev_err(dev, "CMD/GET/SET ack: cannot allocate SKB\n");
305 i2400m->ack_skb = ERR_PTR(-ENOMEM); 310 i2400m->ack_skb = ack_skb;
306 } else
307 i2400m->ack_skb = ack_skb;
308 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 311 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
309 complete(&i2400m->msg_completion); 312 complete(&i2400m->msg_completion);
310 return; 313 return;
311 314
312error_waiter_cancelled: 315error_waiter_cancelled:
313 kfree_skb(ack_skb); 316 if (!IS_ERR(ack_skb))
317 kfree_skb(ack_skb);
314error_no_waiter: 318error_no_waiter:
315 spin_unlock_irqrestore(&i2400m->rx_lock, flags); 319 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
316 return;
317} 320}
318 321
319 322
@@ -718,7 +721,6 @@ void __i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
718out: 721out:
719 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n", 722 d_fnend(4, dev, "(i2400m %p roq %p skb %p sn %u nsn %d) = void\n",
720 i2400m, roq, skb, sn, nsn); 723 i2400m, roq, skb, sn, nsn);
721 return;
722} 724}
723 725
724 726
@@ -743,12 +745,12 @@ unsigned __i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
743 unsigned new_nws, nsn_itr; 745 unsigned new_nws, nsn_itr;
744 746
745 new_nws = __i2400m_roq_nsn(roq, sn); 747 new_nws = __i2400m_roq_nsn(roq, sn);
746 if (unlikely(new_nws >= 1024) && d_test(1)) { 748 /*
747 dev_err(dev, "SW BUG? __update_ws new_nws %u (sn %u ws %u)\n", 749 * For type 2(update_window_start) rx messages, there is no
748 new_nws, sn, roq->ws); 750 * need to check if the normalized sequence number is greater 1023.
749 WARN_ON(1); 751 * Simply insert and deliver all packets to the host up to the
750 i2400m_roq_log_dump(i2400m, roq); 752 * window start.
751 } 753 */
752 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { 754 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) {
753 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb; 755 roq_data_itr = (struct i2400m_roq_data *) &skb_itr->cb;
754 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn); 756 nsn_itr = __i2400m_roq_nsn(roq, roq_data_itr->sn);
@@ -798,7 +800,6 @@ void i2400m_roq_reset(struct i2400m *i2400m, struct i2400m_roq *roq)
798 } 800 }
799 roq->ws = 0; 801 roq->ws = 0;
800 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq); 802 d_fnend(2, dev, "(i2400m %p roq %p) = void\n", i2400m, roq);
801 return;
802} 803}
803 804
804 805
@@ -837,7 +838,6 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
837 } 838 }
838 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n", 839 d_fnend(2, dev, "(i2400m %p roq %p skb %p lbn %u) = void\n",
839 i2400m, roq, skb, lbn); 840 i2400m, roq, skb, lbn);
840 return;
841} 841}
842 842
843 843
@@ -863,7 +863,6 @@ void i2400m_roq_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS, 863 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_WS,
864 old_ws, len, sn, nsn, roq->ws); 864 old_ws, len, sn, nsn, roq->ws);
865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn); 865 d_fnstart(2, dev, "(i2400m %p roq %p sn %u) = void\n", i2400m, roq, sn);
866 return;
867} 866}
868 867
869 868
@@ -890,33 +889,52 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
890 i2400m, roq, skb, sn); 889 i2400m, roq, skb, sn);
891 len = skb_queue_len(&roq->queue); 890 len = skb_queue_len(&roq->queue);
892 nsn = __i2400m_roq_nsn(roq, sn); 891 nsn = __i2400m_roq_nsn(roq, sn);
892 /*
893 * For type 3(queue_update_window_start) rx messages, there is no
894 * need to check if the normalized sequence number is greater 1023.
895 * Simply insert and deliver all packets to the host up to the
896 * window start.
897 */
893 old_ws = roq->ws; 898 old_ws = roq->ws;
894 if (unlikely(nsn >= 1024)) { 899 /* If the queue is empty, don't bother as we'd queue
895 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", 900 * it and immediately unqueue it -- just deliver it.
896 nsn, sn, roq->ws); 901 */
897 i2400m_roq_log_dump(i2400m, roq); 902 if (len == 0) {
898 i2400m_reset(i2400m, I2400M_RT_WARM); 903 struct i2400m_roq_data *roq_data;
899 } else { 904 roq_data = (struct i2400m_roq_data *) &skb->cb;
900 /* if the queue is empty, don't bother as we'd queue 905 i2400m_net_erx(i2400m, skb, roq_data->cs);
901 * it and inmediately unqueue it -- just deliver it */ 906 } else
902 if (len == 0) { 907 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
903 struct i2400m_roq_data *roq_data; 908
904 roq_data = (struct i2400m_roq_data *) &skb->cb; 909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
905 i2400m_net_erx(i2400m, skb, roq_data->cs); 910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
906 } 911 old_ws, len, sn, nsn, roq->ws);
907 else 912
908 __i2400m_roq_queue(i2400m, roq, skb, sn, nsn);
909 __i2400m_roq_update_ws(i2400m, roq, sn + 1);
910 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET_WS,
911 old_ws, len, sn, nsn, roq->ws);
912 }
913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n", 913 d_fnend(2, dev, "(i2400m %p roq %p skb %p sn %u) = void\n",
914 i2400m, roq, skb, sn); 914 i2400m, roq, skb, sn);
915 return;
916} 915}
917 916
918 917
919/* 918/*
919 * This routine destroys the memory allocated for rx_roq, when no
920 * other thread is accessing it. Access to rx_roq is refcounted by
921 * rx_roq_refcount, hence memory allocated must be destroyed when
922 * rx_roq_refcount becomes zero. This routine gets executed when
923 * rx_roq_refcount becomes zero.
924 */
925void i2400m_rx_roq_destroy(struct kref *ref)
926{
927 unsigned itr;
928 struct i2400m *i2400m
929 = container_of(ref, struct i2400m, rx_roq_refcount);
930 for (itr = 0; itr < I2400M_RO_CIN + 1; itr++)
931 __skb_queue_purge(&i2400m->rx_roq[itr].queue);
932 kfree(i2400m->rx_roq[0].log);
933 kfree(i2400m->rx_roq);
934 i2400m->rx_roq = NULL;
935}
936
937/*
920 * Receive and send up an extended data packet 938 * Receive and send up an extended data packet
921 * 939 *
922 * @i2400m: device descriptor 940 * @i2400m: device descriptor
@@ -969,6 +987,7 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
969 unsigned ro_needed, ro_type, ro_cin, ro_sn; 987 unsigned ro_needed, ro_type, ro_cin, ro_sn;
970 struct i2400m_roq *roq; 988 struct i2400m_roq *roq;
971 struct i2400m_roq_data *roq_data; 989 struct i2400m_roq_data *roq_data;
990 unsigned long flags;
972 991
973 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr)); 992 BUILD_BUG_ON(ETH_HLEN > sizeof(*hdr));
974 993
@@ -1007,7 +1026,16 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1007 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN; 1026 ro_cin = (reorder >> I2400M_RO_CIN_SHIFT) & I2400M_RO_CIN;
1008 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN; 1027 ro_sn = (reorder >> I2400M_RO_SN_SHIFT) & I2400M_RO_SN;
1009 1028
1029 spin_lock_irqsave(&i2400m->rx_lock, flags);
1010 roq = &i2400m->rx_roq[ro_cin]; 1030 roq = &i2400m->rx_roq[ro_cin];
1031 if (roq == NULL) {
1032 kfree_skb(skb); /* rx_roq is already destroyed */
1033 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1034 goto error;
1035 }
1036 kref_get(&i2400m->rx_roq_refcount);
1037 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1038
1011 roq_data = (struct i2400m_roq_data *) &skb->cb; 1039 roq_data = (struct i2400m_roq_data *) &skb->cb;
1012 roq_data->sn = ro_sn; 1040 roq_data->sn = ro_sn;
1013 roq_data->cs = cs; 1041 roq_data->cs = cs;
@@ -1034,6 +1062,10 @@ void i2400m_rx_edata(struct i2400m *i2400m, struct sk_buff *skb_rx,
1034 default: 1062 default:
1035 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type); 1063 dev_err(dev, "HW BUG? unknown reorder type %u\n", ro_type);
1036 } 1064 }
1065
1066 spin_lock_irqsave(&i2400m->rx_lock, flags);
1067 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1068 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1037 } 1069 }
1038 else 1070 else
1039 i2400m_net_erx(i2400m, skb, cs); 1071 i2400m_net_erx(i2400m, skb, cs);
@@ -1041,7 +1073,6 @@ error_skb_clone:
1041error: 1073error:
1042 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p " 1074 d_fnend(2, dev, "(i2400m %p skb_rx %p single %u payload %p "
1043 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size); 1075 "size %zu) = void\n", i2400m, skb_rx, single_last, payload, size);
1044 return;
1045} 1076}
1046 1077
1047 1078
@@ -1344,6 +1375,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
1344 __i2400m_roq_init(&i2400m->rx_roq[itr]); 1375 __i2400m_roq_init(&i2400m->rx_roq[itr]);
1345 i2400m->rx_roq[itr].log = &rd[itr]; 1376 i2400m->rx_roq[itr].log = &rd[itr];
1346 } 1377 }
1378 kref_init(&i2400m->rx_roq_refcount);
1347 } 1379 }
1348 return 0; 1380 return 0;
1349 1381
@@ -1357,12 +1389,12 @@ error_roq_alloc:
1357/* Tear down the RX queue and infrastructure */ 1389/* Tear down the RX queue and infrastructure */
1358void i2400m_rx_release(struct i2400m *i2400m) 1390void i2400m_rx_release(struct i2400m *i2400m)
1359{ 1391{
1392 unsigned long flags;
1393
1360 if (i2400m->rx_reorder) { 1394 if (i2400m->rx_reorder) {
1361 unsigned itr; 1395 spin_lock_irqsave(&i2400m->rx_lock, flags);
1362 for(itr = 0; itr < I2400M_RO_CIN + 1; itr++) 1396 kref_put(&i2400m->rx_roq_refcount, i2400m_rx_roq_destroy);
1363 __skb_queue_purge(&i2400m->rx_roq[itr].queue); 1397 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
1364 kfree(i2400m->rx_roq[0].log);
1365 kfree(i2400m->rx_roq);
1366 } 1398 }
1367 /* at this point, nothing can be received... */ 1399 /* at this point, nothing can be received... */
1368 i2400m_report_hook_flush(i2400m); 1400 i2400m_report_hook_flush(i2400m);
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index d619da33f20b..8b809c2ead6c 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -197,7 +197,6 @@ error_alloc_skb:
197error_get_size: 197error_get_size:
198error_bad_size: 198error_bad_size:
199 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret); 199 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
200 return;
201} 200}
202 201
203 202
@@ -229,7 +228,6 @@ void i2400ms_irq(struct sdio_func *func)
229 i2400ms_rx(i2400ms); 228 i2400ms_rx(i2400ms);
230error_no_irq: 229error_no_irq:
231 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms); 230 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
232 return;
233} 231}
234 232
235 233
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index de66d068c9cb..b53cd1c80e3e 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -98,6 +98,10 @@ void i2400ms_tx_submit(struct work_struct *ws)
98 tx_msg_size, result); 98 tx_msg_size, result);
99 } 99 }
100 100
101 if (result == -ETIMEDOUT) {
102 i2400m_error_recovery(i2400m);
103 break;
104 }
101 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size); 105 d_printf(2, dev, "TX: %zub submitted\n", tx_msg_size);
102 } 106 }
103 107
@@ -114,13 +118,17 @@ void i2400ms_bus_tx_kick(struct i2400m *i2400m)
114{ 118{
115 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 119 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
116 struct device *dev = &i2400ms->func->dev; 120 struct device *dev = &i2400ms->func->dev;
121 unsigned long flags;
117 122
118 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m); 123 d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
119 124
120 /* schedule tx work, this is because tx may block, therefore 125 /* schedule tx work, this is because tx may block, therefore
121 * it has to run in a thread context. 126 * it has to run in a thread context.
122 */ 127 */
123 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker); 128 spin_lock_irqsave(&i2400m->tx_lock, flags);
129 if (i2400ms->tx_workqueue != NULL)
130 queue_work(i2400ms->tx_workqueue, &i2400ms->tx_worker);
131 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
124 132
125 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 133 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
126} 134}
@@ -130,27 +138,40 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
130 int result; 138 int result;
131 struct device *dev = &i2400ms->func->dev; 139 struct device *dev = &i2400ms->func->dev;
132 struct i2400m *i2400m = &i2400ms->i2400m; 140 struct i2400m *i2400m = &i2400ms->i2400m;
141 struct workqueue_struct *tx_workqueue;
142 unsigned long flags;
133 143
134 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms); 144 d_fnstart(5, dev, "(i2400ms %p)\n", i2400ms);
135 145
136 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit); 146 INIT_WORK(&i2400ms->tx_worker, i2400ms_tx_submit);
137 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name), 147 snprintf(i2400ms->tx_wq_name, sizeof(i2400ms->tx_wq_name),
138 "%s-tx", i2400m->wimax_dev.name); 148 "%s-tx", i2400m->wimax_dev.name);
139 i2400ms->tx_workqueue = 149 tx_workqueue =
140 create_singlethread_workqueue(i2400ms->tx_wq_name); 150 create_singlethread_workqueue(i2400ms->tx_wq_name);
141 if (NULL == i2400ms->tx_workqueue) { 151 if (tx_workqueue == NULL) {
142 dev_err(dev, "TX: failed to create workqueue\n"); 152 dev_err(dev, "TX: failed to create workqueue\n");
143 result = -ENOMEM; 153 result = -ENOMEM;
144 } else 154 } else
145 result = 0; 155 result = 0;
156 spin_lock_irqsave(&i2400m->tx_lock, flags);
157 i2400ms->tx_workqueue = tx_workqueue;
158 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
146 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result); 159 d_fnend(5, dev, "(i2400ms %p) = %d\n", i2400ms, result);
147 return result; 160 return result;
148} 161}
149 162
150void i2400ms_tx_release(struct i2400ms *i2400ms) 163void i2400ms_tx_release(struct i2400ms *i2400ms)
151{ 164{
152 if (i2400ms->tx_workqueue) { 165 struct i2400m *i2400m = &i2400ms->i2400m;
153 destroy_workqueue(i2400ms->tx_workqueue); 166 struct workqueue_struct *tx_workqueue;
154 i2400ms->tx_workqueue = NULL; 167 unsigned long flags;
155 } 168
169 tx_workqueue = i2400ms->tx_workqueue;
170
171 spin_lock_irqsave(&i2400m->tx_lock, flags);
172 i2400ms->tx_workqueue = NULL;
173 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
174
175 if (tx_workqueue)
176 destroy_workqueue(tx_workqueue);
156} 177}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 7632f80954e3..9bfc26e1bc6b 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -483,6 +483,13 @@ int i2400ms_probe(struct sdio_func *func,
483 sdio_set_drvdata(func, i2400ms); 483 sdio_set_drvdata(func, i2400ms);
484 484
485 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; 485 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
486 /*
487 * Room required in the TX queue for SDIO message to accommodate
488 * a smallest payload while allocating header space is 224 bytes,
489 * which is the smallest message size(the block size 256 bytes)
490 * minus the smallest message header size(32 bytes).
491 */
492 i2400m->bus_tx_room_min = I2400MS_BLK_SIZE - I2400M_PL_ALIGN * 2;
486 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; 493 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
487 i2400m->bus_setup = i2400ms_bus_setup; 494 i2400m->bus_setup = i2400ms_bus_setup;
488 i2400m->bus_dev_start = i2400ms_bus_dev_start; 495 i2400m->bus_dev_start = i2400ms_bus_dev_start;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..3f819efc06b5 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -258,8 +258,10 @@ enum {
258 * Doc says maximum transaction is 16KiB. If we had 16KiB en 258 * Doc says maximum transaction is 16KiB. If we had 16KiB en
259 * route and 16KiB being queued, it boils down to needing 259 * route and 16KiB being queued, it boils down to needing
260 * 32KiB. 260 * 32KiB.
261 * 32KiB is insufficient for 1400 MTU, hence increasing
262 * tx buffer size to 64KiB.
261 */ 263 */
262 I2400M_TX_BUF_SIZE = 32768, 264 I2400M_TX_BUF_SIZE = 65536,
263 /** 265 /**
264 * Message header and payload descriptors have to be 16 266 * Message header and payload descriptors have to be 16
265 * aligned (16 + 4 * N = 16 * M). If we take that average sent 267 * aligned (16 + 4 * N = 16 * M). If we take that average sent
@@ -270,10 +272,21 @@ enum {
270 * at the end there are less, we pad up to the nearest 272 * at the end there are less, we pad up to the nearest
271 * multiple of 16. 273 * multiple of 16.
272 */ 274 */
273 I2400M_TX_PLD_MAX = 12, 275 /*
276 * According to Intel Wimax i3200, i5x50 and i6x50 specification
277 * documents, the maximum number of payloads per message can be
278 * up to 60. Increasing the number of payloads to 60 per message
279 * helps to accommodate smaller payloads in a single transaction.
280 */
281 I2400M_TX_PLD_MAX = 60,
274 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr) 282 I2400M_TX_PLD_SIZE = sizeof(struct i2400m_msg_hdr)
275 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld), 283 + I2400M_TX_PLD_MAX * sizeof(struct i2400m_pld),
276 I2400M_TX_SKIP = 0x80000000, 284 I2400M_TX_SKIP = 0x80000000,
285 /*
286 * According to Intel Wimax i3200, i5x50 and i6x50 specification
287 * documents, the maximum size of each message can be up to 16KiB.
288 */
289 I2400M_TX_MSG_SIZE = 16384,
277}; 290};
278 291
279#define TAIL_FULL ((void *)~(unsigned long)NULL) 292#define TAIL_FULL ((void *)~(unsigned long)NULL)
@@ -328,6 +341,14 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
328 * @padding: ensure that there is at least this many bytes of free 341 * @padding: ensure that there is at least this many bytes of free
329 * contiguous space in the fifo. This is needed because later on 342 * contiguous space in the fifo. This is needed because later on
330 * we might need to add padding. 343 * we might need to add padding.
344 * @try_head: specify either to allocate head room or tail room space
345 * in the TX FIFO. This boolean is required to avoids a system hang
346 * due to an infinite loop caused by i2400m_tx_fifo_push().
347 * The caller must always try to allocate tail room space first by
348 * calling this routine with try_head = 0. In case if there
349 * is not enough tail room space but there is enough head room space,
350 * (i2400m_tx_fifo_push() returns TAIL_FULL) try to allocate head
351 * room space, by calling this routine again with try_head = 1.
331 * 352 *
332 * Returns: 353 * Returns:
333 * 354 *
@@ -359,6 +380,48 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
359 * fail and return TAIL_FULL and let the caller figure out if we wants to 380 * fail and return TAIL_FULL and let the caller figure out if we wants to
360 * skip the tail room and try to allocate from the head. 381 * skip the tail room and try to allocate from the head.
361 * 382 *
383 * There is a corner case, wherein i2400m_tx_new() can get into
384 * an infinite loop calling i2400m_tx_fifo_push().
385 * In certain situations, tx_in would have reached on the top of TX FIFO
386 * and i2400m_tx_tail_room() returns 0, as described below:
387 *
388 * N ___________ tail room is zero
389 * |<- IN ->|
390 * | |
391 * | |
392 * | |
393 * | data |
394 * |<- OUT ->|
395 * | |
396 * | |
397 * | head room |
398 * 0 -----------
399 * During such a time, where tail room is zero in the TX FIFO and if there
400 * is a request to add a payload to TX FIFO, which calls:
401 * i2400m_tx()
402 * ->calls i2400m_tx_close()
403 * ->calls i2400m_tx_skip_tail()
404 * goto try_new;
405 * ->calls i2400m_tx_new()
406 * |----> [try_head:]
407 * infinite loop | ->calls i2400m_tx_fifo_push()
408 * | if (tail_room < needed)
409 * | if (head_room => needed)
410 * | return TAIL_FULL;
411 * |<---- goto try_head;
412 *
413 * i2400m_tx() calls i2400m_tx_close() to close the message, since there
414 * is no tail room to accommodate the payload and calls
415 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
416 * i2400m_tx_new() to allocate space for new message header calling
417 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
418 * to accommodate the message header, but there is enough head space.
419 * The i2400m_tx_new() keeps re-retrying by calling i2400m_tx_fifo_push()
420 * ending up in a loop causing system freeze.
421 *
422 * This corner case is avoided by using a try_head boolean,
423 * as an argument to i2400m_tx_fifo_push().
424 *
362 * Note: 425 * Note:
363 * 426 *
364 * Assumes i2400m->tx_lock is taken, and we use that as a barrier 427 * Assumes i2400m->tx_lock is taken, and we use that as a barrier
@@ -367,7 +430,8 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
367 * pop data off the queue 430 * pop data off the queue
368 */ 431 */
369static 432static
370void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding) 433void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size,
434 size_t padding, bool try_head)
371{ 435{
372 struct device *dev = i2400m_dev(i2400m); 436 struct device *dev = i2400m_dev(i2400m);
373 size_t room, tail_room, needed_size; 437 size_t room, tail_room, needed_size;
@@ -382,9 +446,21 @@ void *i2400m_tx_fifo_push(struct i2400m *i2400m, size_t size, size_t padding)
382 } 446 }
383 /* Is there space at the tail? */ 447 /* Is there space at the tail? */
384 tail_room = __i2400m_tx_tail_room(i2400m); 448 tail_room = __i2400m_tx_tail_room(i2400m);
385 if (tail_room < needed_size) { 449 if (!try_head && tail_room < needed_size) {
386 if (i2400m->tx_out % I2400M_TX_BUF_SIZE 450 /*
387 < i2400m->tx_in % I2400M_TX_BUF_SIZE) { 451 * If the tail room space is not enough to push the message
452 * in the TX FIFO, then there are two possibilities:
453 * 1. There is enough head room space to accommodate
454 * this message in the TX FIFO.
455 * 2. There is not enough space in the head room and
456 * in tail room of the TX FIFO to accommodate the message.
457 * In the case (1), return TAIL_FULL so that the caller
458 * can figure out, if the caller wants to push the message
459 * into the head room space.
460 * In the case (2), return NULL, indicating that the TX FIFO
461 * cannot accommodate the message.
462 */
463 if (room - tail_room >= needed_size) {
388 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", 464 d_printf(2, dev, "fifo push %zu/%zu: tail full\n",
389 size, padding); 465 size, padding);
390 return TAIL_FULL; /* There might be head space */ 466 return TAIL_FULL; /* There might be head space */
@@ -485,14 +561,25 @@ void i2400m_tx_new(struct i2400m *i2400m)
485{ 561{
486 struct device *dev = i2400m_dev(i2400m); 562 struct device *dev = i2400m_dev(i2400m);
487 struct i2400m_msg_hdr *tx_msg; 563 struct i2400m_msg_hdr *tx_msg;
564 bool try_head = 0;
488 BUG_ON(i2400m->tx_msg != NULL); 565 BUG_ON(i2400m->tx_msg != NULL);
566 /*
567 * In certain situations, TX queue might have enough space to
568 * accommodate the new message header I2400M_TX_PLD_SIZE, but
569 * might not have enough space to accommodate the payloads.
570 * Adding bus_tx_room_min padding while allocating a new TX message
571 * increases the possibilities of including at least one payload of the
572 * size <= bus_tx_room_min.
573 */
489try_head: 574try_head:
490 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, 0); 575 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE,
576 i2400m->bus_tx_room_min, try_head);
491 if (tx_msg == NULL) 577 if (tx_msg == NULL)
492 goto out; 578 goto out;
493 else if (tx_msg == TAIL_FULL) { 579 else if (tx_msg == TAIL_FULL) {
494 i2400m_tx_skip_tail(i2400m); 580 i2400m_tx_skip_tail(i2400m);
495 d_printf(2, dev, "new TX message: tail full, trying head\n"); 581 d_printf(2, dev, "new TX message: tail full, trying head\n");
582 try_head = 1;
496 goto try_head; 583 goto try_head;
497 } 584 }
498 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); 585 memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -566,7 +653,7 @@ void i2400m_tx_close(struct i2400m *i2400m)
566 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size); 653 aligned_size = ALIGN(tx_msg_moved->size, i2400m->bus_tx_block_size);
567 padding = aligned_size - tx_msg_moved->size; 654 padding = aligned_size - tx_msg_moved->size;
568 if (padding > 0) { 655 if (padding > 0) {
569 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0); 656 pad_buf = i2400m_tx_fifo_push(i2400m, padding, 0, 0);
570 if (unlikely(WARN_ON(pad_buf == NULL 657 if (unlikely(WARN_ON(pad_buf == NULL
571 || pad_buf == TAIL_FULL))) { 658 || pad_buf == TAIL_FULL))) {
572 /* This should not happen -- append should verify 659 /* This should not happen -- append should verify
@@ -632,6 +719,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
632 unsigned long flags; 719 unsigned long flags;
633 size_t padded_len; 720 size_t padded_len;
634 void *ptr; 721 void *ptr;
722 bool try_head = 0;
635 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM 723 unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
636 || pl_type == I2400M_PT_RESET_COLD; 724 || pl_type == I2400M_PT_RESET_COLD;
637 725
@@ -643,9 +731,11 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
643 * current one is out of payload slots or we have a singleton, 731 * current one is out of payload slots or we have a singleton,
644 * close it and start a new one */ 732 * close it and start a new one */
645 spin_lock_irqsave(&i2400m->tx_lock, flags); 733 spin_lock_irqsave(&i2400m->tx_lock, flags);
646 result = -ESHUTDOWN; 734 /* If tx_buf is NULL, device is shutdown */
647 if (i2400m->tx_buf == NULL) 735 if (i2400m->tx_buf == NULL) {
736 result = -ESHUTDOWN;
648 goto error_tx_new; 737 goto error_tx_new;
738 }
649try_new: 739try_new:
650 if (unlikely(i2400m->tx_msg == NULL)) 740 if (unlikely(i2400m->tx_msg == NULL))
651 i2400m_tx_new(i2400m); 741 i2400m_tx_new(i2400m);
@@ -659,7 +749,13 @@ try_new:
659 } 749 }
660 if (i2400m->tx_msg == NULL) 750 if (i2400m->tx_msg == NULL)
661 goto error_tx_new; 751 goto error_tx_new;
662 if (i2400m->tx_msg->size + padded_len > I2400M_TX_BUF_SIZE / 2) { 752 /*
753 * Check if this skb will fit in the TX queue's current active
754 * TX message. The total message size must not exceed the maximum
755 * size of each message I2400M_TX_MSG_SIZE. If it exceeds,
756 * close the current message and push this skb into the new message.
757 */
758 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) {
663 d_printf(2, dev, "TX: message too big, going new\n"); 759 d_printf(2, dev, "TX: message too big, going new\n");
664 i2400m_tx_close(i2400m); 760 i2400m_tx_close(i2400m);
665 i2400m_tx_new(i2400m); 761 i2400m_tx_new(i2400m);
@@ -669,11 +765,12 @@ try_new:
669 /* So we have a current message header; now append space for 765 /* So we have a current message header; now append space for
670 * the message -- if there is not enough, try the head */ 766 * the message -- if there is not enough, try the head */
671 ptr = i2400m_tx_fifo_push(i2400m, padded_len, 767 ptr = i2400m_tx_fifo_push(i2400m, padded_len,
672 i2400m->bus_tx_block_size); 768 i2400m->bus_tx_block_size, try_head);
673 if (ptr == TAIL_FULL) { /* Tail is full, try head */ 769 if (ptr == TAIL_FULL) { /* Tail is full, try head */
674 d_printf(2, dev, "pl append: tail full\n"); 770 d_printf(2, dev, "pl append: tail full\n");
675 i2400m_tx_close(i2400m); 771 i2400m_tx_close(i2400m);
676 i2400m_tx_skip_tail(i2400m); 772 i2400m_tx_skip_tail(i2400m);
773 try_head = 1;
677 goto try_new; 774 goto try_new;
678 } else if (ptr == NULL) { /* All full */ 775 } else if (ptr == NULL) { /* All full */
679 result = -ENOSPC; 776 result = -ENOSPC;
@@ -689,7 +786,7 @@ try_new:
689 pl_type, buf_len); 786 pl_type, buf_len);
690 tx_msg->num_pls = le16_to_cpu(num_pls+1); 787 tx_msg->num_pls = le16_to_cpu(num_pls+1);
691 tx_msg->size += padded_len; 788 tx_msg->size += padded_len;
692 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", 789 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
693 padded_len, tx_msg->size, num_pls+1); 790 padded_len, tx_msg->size, num_pls+1);
694 d_printf(2, dev, 791 d_printf(2, dev,
695 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 792 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
@@ -860,25 +957,43 @@ EXPORT_SYMBOL_GPL(i2400m_tx_msg_sent);
860 * i2400m_tx_setup - Initialize the TX queue and infrastructure 957 * i2400m_tx_setup - Initialize the TX queue and infrastructure
861 * 958 *
862 * Make sure we reset the TX sequence to zero, as when this function 959 * Make sure we reset the TX sequence to zero, as when this function
863 * is called, the firmware has been just restarted. 960 * is called, the firmware has been just restarted. Same rational
961 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
962 * the memory for TX queue is reallocated.
864 */ 963 */
865int i2400m_tx_setup(struct i2400m *i2400m) 964int i2400m_tx_setup(struct i2400m *i2400m)
866{ 965{
867 int result; 966 int result = 0;
967 void *tx_buf;
968 unsigned long flags;
868 969
869 /* Do this here only once -- can't do on 970 /* Do this here only once -- can't do on
870 * i2400m_hard_start_xmit() as we'll cause race conditions if 971 * i2400m_hard_start_xmit() as we'll cause race conditions if
871 * the WS was scheduled on another CPU */ 972 * the WS was scheduled on another CPU */
872 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work); 973 INIT_WORK(&i2400m->wake_tx_ws, i2400m_wake_tx_work);
873 974
874 i2400m->tx_sequence = 0; 975 tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_ATOMIC);
875 i2400m->tx_buf = kmalloc(I2400M_TX_BUF_SIZE, GFP_KERNEL); 976 if (tx_buf == NULL) {
876 if (i2400m->tx_buf == NULL)
877 result = -ENOMEM; 977 result = -ENOMEM;
878 else 978 goto error_kmalloc;
879 result = 0; 979 }
980
981 /*
982 * Fail the build if we can't fit at least two maximum size messages
983 * on the TX FIFO [one being delivered while one is constructed].
984 */
985 BUILD_BUG_ON(2 * I2400M_TX_MSG_SIZE > I2400M_TX_BUF_SIZE);
986 spin_lock_irqsave(&i2400m->tx_lock, flags);
987 i2400m->tx_sequence = 0;
988 i2400m->tx_in = 0;
989 i2400m->tx_out = 0;
990 i2400m->tx_msg_size = 0;
991 i2400m->tx_msg = NULL;
992 i2400m->tx_buf = tx_buf;
993 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
880 /* Huh? the bus layer has to define this... */ 994 /* Huh? the bus layer has to define this... */
881 BUG_ON(i2400m->bus_tx_block_size == 0); 995 BUG_ON(i2400m->bus_tx_block_size == 0);
996error_kmalloc:
882 return result; 997 return result;
883 998
884} 999}
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 7b6a1d98bd74..d44b545f4082 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -178,7 +178,6 @@ error_submit:
178out: 178out:
179 d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n", 179 d_fnend(4, dev, "(urb %p status %d actual_length %d) = void\n",
180 urb, urb->status, urb->actual_length); 180 urb, urb->status, urb->actual_length);
181 return;
182} 181}
183 182
184 183
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d8c4d6497fdf..0d5081d77dc0 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -82,6 +82,8 @@ MODULE_PARM_DESC(debug,
82 82
83/* Our firmware file name */ 83/* Our firmware file name */
84static const char *i2400mu_bus_fw_names_5x50[] = { 84static const char *i2400mu_bus_fw_names_5x50[] = {
85#define I2400MU_FW_FILE_NAME_v1_5 "i2400m-fw-usb-1.5.sbcf"
86 I2400MU_FW_FILE_NAME_v1_5,
85#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" 87#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
86 I2400MU_FW_FILE_NAME_v1_4, 88 I2400MU_FW_FILE_NAME_v1_4,
87 NULL, 89 NULL,
@@ -467,6 +469,13 @@ int i2400mu_probe(struct usb_interface *iface,
467 usb_set_intfdata(iface, i2400mu); 469 usb_set_intfdata(iface, i2400mu);
468 470
469 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; 471 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
472 /*
473 * Room required in the Tx queue for USB message to accommodate
474 * a smallest payload while allocating header space is 16 bytes.
475 * Adding this room for the new tx message increases the
476 * possibilities of including any payload with size <= 16 bytes.
477 */
478 i2400m->bus_tx_room_min = I2400MU_BLK_SIZE;
470 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; 479 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
471 i2400m->bus_setup = NULL; 480 i2400m->bus_setup = NULL;
472 i2400m->bus_dev_start = i2400mu_bus_dev_start; 481 i2400m->bus_dev_start = i2400mu_bus_dev_start;
@@ -505,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
505 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 514 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
506 device_init_wakeup(dev, 1); 515 device_init_wakeup(dev, 1);
507 usb_dev->autosuspend_delay = 15 * HZ; 516 usb_dev->autosuspend_delay = 15 * HZ;
508 usb_dev->autosuspend_disabled = 0; 517 usb_enable_autosuspend(usb_dev);
509#endif 518#endif
510 519
511 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT); 520 result = i2400m_setup(i2400m, I2400M_BRI_MAC_REINIT);
@@ -778,4 +787,5 @@ MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
778MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M " 787MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
779 "(5x50 & 6050)"); 788 "(5x50 & 6050)");
780MODULE_LICENSE("GPL"); 789MODULE_LICENSE("GPL");
781MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4); 790MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_5);
791MODULE_FIRMWARE(I6050U_FW_FILE_NAME_v1_5);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 588943660755..174e3442d519 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
5menuconfig WLAN 5menuconfig WLAN
6 bool "Wireless LAN" 6 bool "Wireless LAN"
7 depends on !S390 7 depends on !S390
8 depends on NET
8 select WIRELESS 9 select WIRELESS
9 default y 10 default y
10 ---help--- 11 ---help---
@@ -38,6 +39,12 @@ config LIBERTAS_THINFIRM
38 ---help--- 39 ---help---
39 A library for Marvell Libertas 8xxx devices using thinfirm. 40 A library for Marvell Libertas 8xxx devices using thinfirm.
40 41
42config LIBERTAS_THINFIRM_DEBUG
43 bool "Enable full debugging output in the Libertas thin firmware module."
44 depends on LIBERTAS_THINFIRM
45 ---help---
46 Debugging support.
47
41config LIBERTAS_THINFIRM_USB 48config LIBERTAS_THINFIRM_USB
42 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware" 49 tristate "Marvell Libertas 8388 USB 802.11b/g cards with thin firmware"
43 depends on LIBERTAS_THINFIRM && USB 50 depends on LIBERTAS_THINFIRM && USB
@@ -210,90 +217,7 @@ config USB_NET_RNDIS_WLAN
210 217
211 If you choose to build a module, it'll be called rndis_wlan. 218 If you choose to build a module, it'll be called rndis_wlan.
212 219
213config RTL8180 220source "drivers/net/wireless/rtl818x/Kconfig"
214 tristate "Realtek 8180/8185 PCI support"
215 depends on MAC80211 && PCI && EXPERIMENTAL
216 select EEPROM_93CX6
217 ---help---
218 This is a driver for RTL8180 and RTL8185 based cards.
219 These are PCI based chips found in cards such as:
220
221 (RTL8185 802.11g)
222 A-Link WL54PC
223
224 (RTL8180 802.11b)
225 Belkin F5D6020 v3
226 Belkin F5D6020 v3
227 Dlink DWL-610
228 Dlink DWL-510
229 Netgear MA521
230 Level-One WPC-0101
231 Acer Aspire 1357 LMi
232 VCTnet PC-11B1
233 Ovislink AirLive WL-1120PCM
234 Mentor WL-PCI
235 Linksys WPC11 v4
236 TrendNET TEW-288PI
237 D-Link DWL-520 Rev D
238 Repotec RP-WP7126
239 TP-Link TL-WN250/251
240 Zonet ZEW1000
241 Longshine LCS-8031-R
242 HomeLine HLW-PCC200
243 GigaFast WF721-AEX
244 Planet WL-3553
245 Encore ENLWI-PCI1-NT
246 TrendNET TEW-266PC
247 Gigabyte GN-WLMR101
248 Siemens-fujitsu Amilo D1840W
249 Edimax EW-7126
250 PheeNet WL-11PCIR
251 Tonze PC-2100T
252 Planet WL-8303
253 Dlink DWL-650 v M1
254 Edimax EW-7106
255 Q-Tec 770WC
256 Topcom Skyr@cer 4011b
257 Roper FreeLan 802.11b (edition 2004)
258 Wistron Neweb Corp CB-200B
259 Pentagram HorNET
260 QTec 775WC
261 TwinMOS Booming B Series
262 Micronet SP906BB
263 Sweex LC700010
264 Surecom EP-9428
265 Safecom SWLCR-1100
266
267 Thanks to Realtek for their support!
268
269config RTL8187
270 tristate "Realtek 8187 and 8187B USB support"
271 depends on MAC80211 && USB
272 select EEPROM_93CX6
273 ---help---
274 This is a driver for RTL8187 and RTL8187B based cards.
275 These are USB based chips found in devices such as:
276
277 Netgear WG111v2
278 Level 1 WNC-0301USB
279 Micronet SP907GK V5
280 Encore ENUWI-G2
281 Trendnet TEW-424UB
282 ASUS P5B Deluxe/P5K Premium motherboards
283 Toshiba Satellite Pro series of laptops
284 Asus Wireless Link
285 Linksys WUSB54GC-EU v2
286 (v1 = rt73usb; v3 is rt2070-based,
287 use staging/rt3070 or try rt2800usb)
288
289 Thanks to Realtek for their support!
290
291# If possible, automatically enable LEDs for RTL8187.
292
293config RTL8187_LEDS
294 bool
295 depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
296 default y
297 221
298config ADM8211 222config ADM8211
299 tristate "ADMtek ADM8211 support" 223 tristate "ADMtek ADM8211 support"
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b558d6..880ad9d170c2 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
1318} 1318}
1319 1319
1320static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, 1320static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
1321 int mc_count, struct dev_addr_list *mclist) 1321 struct netdev_hw_addr_list *mc_list)
1322{ 1322{
1323 unsigned int bit_nr, i; 1323 unsigned int bit_nr;
1324 u32 mc_filter[2]; 1324 u32 mc_filter[2];
1325 struct netdev_hw_addr *ha;
1325 1326
1326 mc_filter[1] = mc_filter[0] = 0; 1327 mc_filter[1] = mc_filter[0] = 0;
1327 1328
1328 for (i = 0; i < mc_count; i++) { 1329 netdev_hw_addr_list_for_each(ha, mc_list) {
1329 if (!mclist) 1330 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1330 break;
1331 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1332 1331
1333 bit_nr &= 0x3F; 1332 bit_nr &= 0x3F;
1334 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1333 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1335 mclist = mclist->next;
1336 } 1334 }
1337 1335
1338 return mc_filter[0] | ((u64)(mc_filter[1]) << 32); 1336 return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a6d9ed..a441aad922c2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2876 ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0; 2876 ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
2877 ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0; 2877 ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
2878 2878
2879 airo_print_info(dev->name, "Firmware version %x.%x.%02x", 2879 airo_print_info(dev->name, "Firmware version %x.%x.%02d",
2880 ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF), 2880 ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
2881 (le16_to_cpu(cap_rid.softVer) & 0xFF), 2881 (le16_to_cpu(cap_rid.softVer) & 0xFF),
2882 le16_to_cpu(cap_rid.softSubVer)); 2882 le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
3193{ 3193{
3194 u8 reason = status & 0xFF; 3194 u8 reason = status & 0xFF;
3195 3195
3196 switch (status) { 3196 switch (status & 0xFF00) {
3197 case STAT_NOBEACON: 3197 case STAT_NOBEACON:
3198 airo_print_dbg(devname, "link lost (missed beacons)"); 3198 switch (status) {
3199 break; 3199 case STAT_NOBEACON:
3200 case STAT_MAXRETRIES: 3200 airo_print_dbg(devname, "link lost (missed beacons)");
3201 case STAT_MAXARL: 3201 break;
3202 airo_print_dbg(devname, "link lost (max retries)"); 3202 case STAT_MAXRETRIES:
3203 break; 3203 case STAT_MAXARL:
3204 case STAT_FORCELOSS: 3204 airo_print_dbg(devname, "link lost (max retries)");
3205 airo_print_dbg(devname, "link lost (local choice)"); 3205 break;
3206 break; 3206 case STAT_FORCELOSS:
3207 case STAT_TSFSYNC: 3207 airo_print_dbg(devname, "link lost (local choice)");
3208 airo_print_dbg(devname, "link lost (TSF sync lost)"); 3208 break;
3209 case STAT_TSFSYNC:
3210 airo_print_dbg(devname, "link lost (TSF sync lost)");
3211 break;
3212 default:
3213 airo_print_dbg(devname, "unknow status %x\n", status);
3214 break;
3215 }
3209 break; 3216 break;
3210 case STAT_DEAUTH: 3217 case STAT_DEAUTH:
3211 airo_print_dbg(devname, "deauthenticated (reason: %d)", reason); 3218 airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
3221 airo_print_dbg(devname, "authentication failed (reason: %d)", 3228 airo_print_dbg(devname, "authentication failed (reason: %d)",
3222 reason); 3229 reason);
3223 break; 3230 break;
3231 case STAT_ASSOC:
3232 case STAT_REASSOC:
3233 break;
3224 default: 3234 default:
3235 airo_print_dbg(devname, "unknow status %x\n", status);
3225 break; 3236 break;
3226 } 3237 }
3227} 3238}
diff --git a/drivers/net/wireless/airo_cs.c b/drivers/net/wireless/airo_cs.c
index f6036fb42319..33bdc6a84e81 100644
--- a/drivers/net/wireless/airo_cs.c
+++ b/drivers/net/wireless/airo_cs.c
@@ -75,42 +75,7 @@ static void airo_release(struct pcmcia_device *link);
75 75
76static void airo_detach(struct pcmcia_device *p_dev); 76static void airo_detach(struct pcmcia_device *p_dev);
77 77
78/*
79 You'll also need to prototype all the functions that will actually
80 be used to talk to your device. See 'pcmem_cs' for a good example
81 of a fully self-sufficient driver; the other drivers rely more or
82 less on other parts of the kernel.
83*/
84
85/*
86 A linked list of "instances" of the aironet device. Each actual
87 PCMCIA card corresponds to one device instance, and is described
88 by one struct pcmcia_device structure (defined in ds.h).
89
90 You may not want to use a linked list for this -- for example, the
91 memory card driver uses an array of struct pcmcia_device pointers,
92 where minor device numbers are used to derive the corresponding
93 array index.
94*/
95
96/*
97 A driver needs to provide a dev_node_t structure for each device
98 on a card. In some cases, there is only one device per card (for
99 example, ethernet cards, modems). In other cases, there may be
100 many actual or logical devices (SCSI adapters, memory cards with
101 multiple partitions). The dev_node_t structures need to be kept
102 in a linked list starting at the 'dev' field of a struct pcmcia_device
103 structure. We allocate them in the card's private data structure,
104 because they generally shouldn't be allocated dynamically.
105
106 In this case, we also provide a flag to indicate if a device is
107 "stopped" due to a power management event, or card ejection. The
108 device IO routines can use a flag like this to throttle IO to a
109 card that is not ready to accept it.
110*/
111
112typedef struct local_info_t { 78typedef struct local_info_t {
113 dev_node_t node;
114 struct net_device *eth_dev; 79 struct net_device *eth_dev;
115} local_info_t; 80} local_info_t;
116 81
@@ -132,10 +97,6 @@ static int airo_probe(struct pcmcia_device *p_dev)
132 97
133 dev_dbg(&p_dev->dev, "airo_attach()\n"); 98 dev_dbg(&p_dev->dev, "airo_attach()\n");
134 99
135 /* Interrupt setup */
136 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
137 p_dev->irq.Handler = NULL;
138
139 /* 100 /*
140 General socket configuration defaults can go here. In this 101 General socket configuration defaults can go here. In this
141 client, we assume very little, and rely on the CIS for almost 102 client, we assume very little, and rely on the CIS for almost
@@ -212,9 +173,7 @@ static int airo_cs_config_check(struct pcmcia_device *p_dev,
212 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) 173 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
213 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 174 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
214 175
215 /* Do we need to allocate an interrupt? */ 176 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
216 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
217 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
218 177
219 /* IO window settings */ 178 /* IO window settings */
220 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 179 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -300,16 +259,8 @@ static int airo_config(struct pcmcia_device *link)
300 if (ret) 259 if (ret)
301 goto failed; 260 goto failed;
302 261
303 /* 262 if (!link->irq)
304 Allocate an interrupt line. Note that this does not assign a 263 goto failed;
305 handler to the interrupt, unless the 'Handler' member of the
306 irq structure is initialized.
307 */
308 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
309 ret = pcmcia_request_irq(link, &link->irq);
310 if (ret)
311 goto failed;
312 }
313 264
314 /* 265 /*
315 This actually configures the PCMCIA socket -- setting up 266 This actually configures the PCMCIA socket -- setting up
@@ -320,26 +271,17 @@ static int airo_config(struct pcmcia_device *link)
320 if (ret) 271 if (ret)
321 goto failed; 272 goto failed;
322 ((local_info_t *)link->priv)->eth_dev = 273 ((local_info_t *)link->priv)->eth_dev =
323 init_airo_card(link->irq.AssignedIRQ, 274 init_airo_card(link->irq,
324 link->io.BasePort1, 1, &link->dev); 275 link->io.BasePort1, 1, &link->dev);
325 if (!((local_info_t *)link->priv)->eth_dev) 276 if (!((local_info_t *)link->priv)->eth_dev)
326 goto failed; 277 goto failed;
327 278
328 /*
329 At this point, the dev_node_t structure(s) need to be
330 initialized and arranged in a linked list at link->dev_node.
331 */
332 strcpy(dev->node.dev_name, ((local_info_t *)link->priv)->eth_dev->name);
333 dev->node.major = dev->node.minor = 0;
334 link->dev_node = &dev->node;
335
336 /* Finally, report what we've done */ 279 /* Finally, report what we've done */
337 printk(KERN_INFO "%s: index 0x%02x: ", 280 dev_info(&link->dev, "index 0x%02x: ",
338 dev->node.dev_name, link->conf.ConfigIndex); 281 link->conf.ConfigIndex);
339 if (link->conf.Vpp) 282 if (link->conf.Vpp)
340 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10); 283 printk(", Vpp %d.%d", link->conf.Vpp/10, link->conf.Vpp%10);
341 if (link->conf.Attributes & CONF_ENABLE_IRQ) 284 printk(", irq %d", link->irq);
342 printk(", irq %d", link->irq.AssignedIRQ);
343 if (link->io.NumPorts1) 285 if (link->io.NumPorts1)
344 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 286 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
345 link->io.BasePort1+link->io.NumPorts1-1); 287 link->io.BasePort1+link->io.NumPorts1-1);
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 0fb419936dff..8a2d4afc74f8 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1223,7 +1223,6 @@ static void at76_rx_callback(struct urb *urb)
1223 1223
1224 priv->rx_tasklet.data = (unsigned long)urb; 1224 priv->rx_tasklet.data = (unsigned long)urb;
1225 tasklet_schedule(&priv->rx_tasklet); 1225 tasklet_schedule(&priv->rx_tasklet);
1226 return;
1227} 1226}
1228 1227
1229static int at76_submit_rx_urb(struct at76_priv *priv) 1228static int at76_submit_rx_urb(struct at76_priv *priv)
@@ -1889,6 +1888,7 @@ static void at76_dwork_hw_scan(struct work_struct *work)
1889} 1888}
1890 1889
1891static int at76_hw_scan(struct ieee80211_hw *hw, 1890static int at76_hw_scan(struct ieee80211_hw *hw,
1891 struct ieee80211_vif *vif,
1892 struct cfg80211_scan_request *req) 1892 struct cfg80211_scan_request *req)
1893{ 1893{
1894 struct at76_priv *priv = hw->priv; 1894 struct at76_priv *priv = hw->priv;
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd695c8..0a75be027afa 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
3 depends on CFG80211 3 depends on CFG80211
4 ---help--- 4 ---help---
5 This will enable the support for the Atheros wireless drivers. 5 This will enable the support for the Atheros wireless drivers.
6 ath5k, ath9k and ar9170 drivers share some common code, this option 6 ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
7 enables the common ath.ko module which shares common helpers. 7 enables the common ath.ko module which shares common helpers.
8 8
9 For more information and documentation on this module you can visit: 9 For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index dc662b76a1c8..4f845f80c098 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,41 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
109 bool has_plcp; 109 bool has_plcp;
110}; 110};
111 111
112#define AR9170_NUM_TID 16
113#define WME_BA_BMP_SIZE 64
114#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
115
116#define WME_AC_BE 2
117#define WME_AC_BK 3
118#define WME_AC_VI 1
119#define WME_AC_VO 0
120
121#define TID_TO_WME_AC(_tid) \
122 ((((_tid) == 0) || ((_tid) == 3)) ? WME_AC_BE : \
123 (((_tid) == 1) || ((_tid) == 2)) ? WME_AC_BK : \
124 (((_tid) == 4) || ((_tid) == 5)) ? WME_AC_VI : \
125 WME_AC_VO)
126
127#define BAW_WITHIN(_start, _bawsz, _seqno) \
128 ((((_seqno) - (_start)) & 0xfff) < (_bawsz))
129
130enum ar9170_tid_state {
131 AR9170_TID_STATE_INVALID,
132 AR9170_TID_STATE_SHUTDOWN,
133 AR9170_TID_STATE_PROGRESS,
134 AR9170_TID_STATE_COMPLETE,
135};
136
137struct ar9170_sta_tid {
138 struct list_head list;
139 struct sk_buff_head queue;
140 u8 addr[ETH_ALEN];
141 u16 ssn;
142 u16 tid;
143 enum ar9170_tid_state state;
144 bool active;
145};
146
147struct ar9170_tx_queue_stats { 112struct ar9170_tx_queue_stats {
148 unsigned int len; 113 unsigned int len;
149 unsigned int limit; 114 unsigned int limit;
@@ -152,14 +117,11 @@ struct ar9170_tx_queue_stats {
152 117
153#define AR9170_QUEUE_TIMEOUT 64 118#define AR9170_QUEUE_TIMEOUT 64
154#define AR9170_TX_TIMEOUT 8 119#define AR9170_TX_TIMEOUT 8
155#define AR9170_BA_TIMEOUT 4
156#define AR9170_JANITOR_DELAY 128 120#define AR9170_JANITOR_DELAY 128
157#define AR9170_TX_INVALID_RATE 0xffffffff 121#define AR9170_TX_INVALID_RATE 0xffffffff
158 122
159#define AR9170_NUM_TX_STATUS 128 123#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
160#define AR9170_NUM_TX_AGG_MAX 30 124#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
161#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
162#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
163 125
164struct ar9170 { 126struct ar9170 {
165 struct ieee80211_hw *hw; 127 struct ieee80211_hw *hw;
@@ -234,11 +196,6 @@ struct ar9170 {
234 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ]; 196 struct sk_buff_head tx_pending[__AR9170_NUM_TXQ];
235 struct sk_buff_head tx_status[__AR9170_NUM_TXQ]; 197 struct sk_buff_head tx_status[__AR9170_NUM_TXQ];
236 struct delayed_work tx_janitor; 198 struct delayed_work tx_janitor;
237 /* tx ampdu */
238 struct sk_buff_head tx_status_ampdu;
239 spinlock_t tx_ampdu_list_lock;
240 struct list_head tx_ampdu_list;
241 atomic_t tx_ampdu_pending;
242 199
243 /* rxstream mpdu merge */ 200 /* rxstream mpdu merge */
244 struct ar9170_rxstream_mpdu_merge rx_mpdu; 201 struct ar9170_rxstream_mpdu_merge rx_mpdu;
@@ -250,11 +207,6 @@ struct ar9170 {
250 u8 global_ampdu_factor; 207 u8 global_ampdu_factor;
251}; 208};
252 209
253struct ar9170_sta_info {
254 struct ar9170_sta_tid agg[AR9170_NUM_TID];
255 unsigned int ampdu_max_len;
256};
257
258struct ar9170_tx_info { 210struct ar9170_tx_info {
259 unsigned long timeout; 211 unsigned long timeout;
260}; 212};
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e6b274..ec8134b4b949 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
79 if (__nreg) { \ 79 if (__nreg) { \
80 if (IS_ACCEPTING_CMD(__ar)) \ 80 if (IS_ACCEPTING_CMD(__ar)) \
81 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ 81 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
82 8 * __nreg, \ 82 8 * __nreg, \
83 (u8 *) &__ar->cmdbuf[1], \ 83 (u8 *) &__ar->cmdbuf[1], \
84 0, NULL); \ 84 0, NULL); \
85 __nreg = 0; \ 85 __nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1dd..6c4663883423 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
127 __le16 checksum; 127 __le16 checksum;
128 __le16 version; 128 __le16 version;
129 u8 operating_flags; 129 u8 operating_flags;
130#define AR9170_OPFLAG_5GHZ 1 130#define AR9170_OPFLAG_5GHZ 1
131#define AR9170_OPFLAG_2GHZ 2 131#define AR9170_OPFLAG_2GHZ 2
132 u8 misc; 132 u8 misc;
133 __le16 reg_domain[2]; 133 __le16 reg_domain[2];
134 u8 mac_address[6]; 134 u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c28e68a..06f1f3c951a4 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
425 425
426#define AR9170_TXQ_DEPTH 32 426#define AR9170_TXQ_DEPTH 32
427#define AR9170_TX_MAX_PENDING 128 427#define AR9170_TX_MAX_PENDING 128
428#define AR9170_RX_STREAM_MAX_SIZE 65535
428 429
429#endif /* __AR9170_HW_H */ 430#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c53692980990..2abc87578994 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -50,10 +50,6 @@ static int modparam_nohwcrypt;
50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 50module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 51MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
52 52
53static int modparam_ht;
54module_param_named(ht, modparam_ht, bool, S_IRUGO);
55MODULE_PARM_DESC(ht, "enable MPDU aggregation.");
56
57#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \ 53#define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
58 .bitrate = (_bitrate), \ 54 .bitrate = (_bitrate), \
59 .flags = (_flags), \ 55 .flags = (_flags), \
@@ -182,7 +178,6 @@ static struct ieee80211_supported_band ar9170_band_5GHz = {
182}; 178};
183 179
184static void ar9170_tx(struct ar9170 *ar); 180static void ar9170_tx(struct ar9170 *ar);
185static bool ar9170_tx_ampdu(struct ar9170 *ar);
186 181
187static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr) 182static inline u16 ar9170_get_seq_h(struct ieee80211_hdr *hdr)
188{ 183{
@@ -195,21 +190,7 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
195 return ar9170_get_seq_h((void *) txc->frame_data); 190 return ar9170_get_seq_h((void *) txc->frame_data);
196} 191}
197 192
198static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr) 193#ifdef AR9170_QUEUE_DEBUG
199{
200 return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
201}
202
203static inline u16 ar9170_get_tid(struct sk_buff *skb)
204{
205 struct ar9170_tx_control *txc = (void *) skb->data;
206 return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
207}
208
209#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
210#define GET_NEXT_SEQ_FROM_SKB(skb) (GET_NEXT_SEQ(ar9170_get_seq(skb)))
211
212#if (defined AR9170_QUEUE_DEBUG) || (defined AR9170_TXAGG_DEBUG)
213static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb) 194static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
214{ 195{
215 struct ar9170_tx_control *txc = (void *) skb->data; 196 struct ar9170_tx_control *txc = (void *) skb->data;
@@ -236,7 +217,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
236 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 217 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
237 218
238 skb_queue_walk(queue, skb) { 219 skb_queue_walk(queue, skb) {
239 printk(KERN_DEBUG "index:%d => \n", i++); 220 printk(KERN_DEBUG "index:%d =>\n", i++);
240 ar9170_print_txheader(ar, skb); 221 ar9170_print_txheader(ar, skb);
241 } 222 }
242 if (i != skb_queue_len(queue)) 223 if (i != skb_queue_len(queue))
@@ -244,7 +225,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
244 "mismatch %d != %d\n", skb_queue_len(queue), i); 225 "mismatch %d != %d\n", skb_queue_len(queue), i);
245 printk(KERN_DEBUG "---[ end ]---\n"); 226 printk(KERN_DEBUG "---[ end ]---\n");
246} 227}
247#endif /* AR9170_QUEUE_DEBUG || AR9170_TXAGG_DEBUG */ 228#endif /* AR9170_QUEUE_DEBUG */
248 229
249#ifdef AR9170_QUEUE_DEBUG 230#ifdef AR9170_QUEUE_DEBUG
250static void ar9170_dump_txqueue(struct ar9170 *ar, 231static void ar9170_dump_txqueue(struct ar9170 *ar,
@@ -275,20 +256,6 @@ static void __ar9170_dump_txstats(struct ar9170 *ar)
275} 256}
276#endif /* AR9170_QUEUE_STOP_DEBUG */ 257#endif /* AR9170_QUEUE_STOP_DEBUG */
277 258
278#ifdef AR9170_TXAGG_DEBUG
279static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
280{
281 unsigned long flags;
282
283 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
284 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n",
285 wiphy_name(ar->hw->wiphy));
286 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
287 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
288}
289
290#endif /* AR9170_TXAGG_DEBUG */
291
292/* caller must guarantee exclusive access for _bin_ queue. */ 259/* caller must guarantee exclusive access for _bin_ queue. */
293static void ar9170_recycle_expired(struct ar9170 *ar, 260static void ar9170_recycle_expired(struct ar9170 *ar,
294 struct sk_buff_head *queue, 261 struct sk_buff_head *queue,
@@ -308,7 +275,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
308 if (time_is_before_jiffies(arinfo->timeout)) { 275 if (time_is_before_jiffies(arinfo->timeout)) {
309#ifdef AR9170_QUEUE_DEBUG 276#ifdef AR9170_QUEUE_DEBUG
310 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " 277 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
311 "recycle \n", wiphy_name(ar->hw->wiphy), 278 "recycle\n", wiphy_name(ar->hw->wiphy),
312 jiffies, arinfo->timeout); 279 jiffies, arinfo->timeout);
313 ar9170_print_txheader(ar, skb); 280 ar9170_print_txheader(ar, skb);
314#endif /* AR9170_QUEUE_DEBUG */ 281#endif /* AR9170_QUEUE_DEBUG */
@@ -360,70 +327,6 @@ static void ar9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
360 ieee80211_tx_status_irqsafe(ar->hw, skb); 327 ieee80211_tx_status_irqsafe(ar->hw, skb);
361} 328}
362 329
363static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
364{
365 struct sk_buff_head success;
366 struct sk_buff *skb;
367 unsigned int i;
368 unsigned long queue_bitmap = 0;
369
370 skb_queue_head_init(&success);
371
372 while (skb_queue_len(&ar->tx_status_ampdu) > AR9170_NUM_TX_STATUS)
373 __skb_queue_tail(&success, skb_dequeue(&ar->tx_status_ampdu));
374
375 ar9170_recycle_expired(ar, &ar->tx_status_ampdu, &success);
376
377#ifdef AR9170_TXAGG_DEBUG
378 printk(KERN_DEBUG "%s: collected %d A-MPDU frames.\n",
379 wiphy_name(ar->hw->wiphy), skb_queue_len(&success));
380 __ar9170_dump_txqueue(ar, &success);
381#endif /* AR9170_TXAGG_DEBUG */
382
383 while ((skb = __skb_dequeue(&success))) {
384 struct ieee80211_tx_info *txinfo;
385
386 queue_bitmap |= BIT(skb_get_queue_mapping(skb));
387
388 txinfo = IEEE80211_SKB_CB(skb);
389 ieee80211_tx_info_clear_status(txinfo);
390
391 txinfo->flags |= IEEE80211_TX_STAT_ACK;
392 txinfo->status.rates[0].count = 1;
393
394 skb_pull(skb, sizeof(struct ar9170_tx_control));
395 ieee80211_tx_status_irqsafe(ar->hw, skb);
396 }
397
398 for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
399#ifdef AR9170_QUEUE_STOP_DEBUG
400 printk(KERN_DEBUG "%s: wake queue %d\n",
401 wiphy_name(ar->hw->wiphy), i);
402 __ar9170_dump_txstats(ar);
403#endif /* AR9170_QUEUE_STOP_DEBUG */
404 ieee80211_wake_queue(ar->hw, i);
405 }
406
407 if (queue_bitmap)
408 ar9170_tx(ar);
409}
410
411static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
412{
413 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
414 struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
415
416 arinfo->timeout = jiffies +
417 msecs_to_jiffies(AR9170_BA_TIMEOUT);
418
419 skb_queue_tail(&ar->tx_status_ampdu, skb);
420 ar9170_tx_fake_ampdu_status(ar);
421
422 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
423 !list_empty(&ar->tx_ampdu_list))
424 ar9170_tx_ampdu(ar);
425}
426
427void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb) 330void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
428{ 331{
429 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 332 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -447,14 +350,10 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
447 if (info->flags & IEEE80211_TX_CTL_NO_ACK) { 350 if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
448 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED); 351 ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
449 } else { 352 } else {
450 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 353 arinfo->timeout = jiffies +
451 ar9170_tx_ampdu_callback(ar, skb); 354 msecs_to_jiffies(AR9170_TX_TIMEOUT);
452 } else {
453 arinfo->timeout = jiffies +
454 msecs_to_jiffies(AR9170_TX_TIMEOUT);
455 355
456 skb_queue_tail(&ar->tx_status[queue], skb); 356 skb_queue_tail(&ar->tx_status[queue], skb);
457 }
458 } 357 }
459 358
460 if (!ar->tx_stats[queue].len && 359 if (!ar->tx_stats[queue].len &&
@@ -524,38 +423,6 @@ static struct sk_buff *ar9170_get_queued_skb(struct ar9170 *ar,
524 return NULL; 423 return NULL;
525} 424}
526 425
527static void ar9170_handle_block_ack(struct ar9170 *ar, u16 count, u16 r)
528{
529 struct sk_buff *skb;
530 struct ieee80211_tx_info *txinfo;
531
532 while (count) {
533 skb = ar9170_get_queued_skb(ar, NULL, &ar->tx_status_ampdu, r);
534 if (!skb)
535 break;
536
537 txinfo = IEEE80211_SKB_CB(skb);
538 ieee80211_tx_info_clear_status(txinfo);
539
540 /* FIXME: maybe more ? */
541 txinfo->status.rates[0].count = 1;
542
543 skb_pull(skb, sizeof(struct ar9170_tx_control));
544 ieee80211_tx_status_irqsafe(ar->hw, skb);
545 count--;
546 }
547
548#ifdef AR9170_TXAGG_DEBUG
549 if (count) {
550 printk(KERN_DEBUG "%s: got %d more failed mpdus, but no more "
551 "suitable frames left in tx_status queue.\n",
552 wiphy_name(ar->hw->wiphy), count);
553
554 ar9170_dump_tx_status_ampdu(ar);
555 }
556#endif /* AR9170_TXAGG_DEBUG */
557}
558
559/* 426/*
560 * This worker tries to keeps an maintain tx_status queues. 427 * This worker tries to keeps an maintain tx_status queues.
561 * So we can guarantee that incoming tx_status reports are 428 * So we can guarantee that incoming tx_status reports are
@@ -592,8 +459,6 @@ static void ar9170_tx_janitor(struct work_struct *work)
592 resched = true; 459 resched = true;
593 } 460 }
594 461
595 ar9170_tx_fake_ampdu_status(ar);
596
597 if (!resched) 462 if (!resched)
598 return; 463 return;
599 464
@@ -673,10 +538,6 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
673 538
674 case 0xc5: 539 case 0xc5:
675 /* BlockACK events */ 540 /* BlockACK events */
676 ar9170_handle_block_ack(ar,
677 le16_to_cpu(cmd->ba_fail_cnt.failed),
678 le16_to_cpu(cmd->ba_fail_cnt.rate));
679 ar9170_tx_fake_ampdu_status(ar);
680 break; 541 break;
681 542
682 case 0xc6: 543 case 0xc6:
@@ -689,7 +550,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
689 550
690 /* firmware debug */ 551 /* firmware debug */
691 case 0xca: 552 case 0xca:
692 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4); 553 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
554 (char *)buf + 4);
693 break; 555 break;
694 case 0xcb: 556 case 0xcb:
695 len -= 4; 557 len -= 4;
@@ -926,7 +788,6 @@ static void ar9170_rx_phy_status(struct ar9170 *ar,
926 788
927 /* TODO: we could do something with phy_errors */ 789 /* TODO: we could do something with phy_errors */
928 status->signal = ar->noise[0] + phy->rssi_combined; 790 status->signal = ar->noise[0] + phy->rssi_combined;
929 status->noise = ar->noise[0];
930} 791}
931 792
932static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len) 793static struct sk_buff *ar9170_rx_copy_data(u8 *buf, int len)
@@ -1247,7 +1108,6 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
1247 ar->global_ampdu_density = 6; 1108 ar->global_ampdu_density = 6;
1248 ar->global_ampdu_factor = 3; 1109 ar->global_ampdu_factor = 3;
1249 1110
1250 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies; 1111 ar->bad_hw_nagger = jiffies;
1252 1112
1253 err = ar->open(ar); 1113 err = ar->open(ar);
@@ -1310,40 +1170,10 @@ static void ar9170_op_stop(struct ieee80211_hw *hw)
1310 skb_queue_purge(&ar->tx_pending[i]); 1170 skb_queue_purge(&ar->tx_pending[i]);
1311 skb_queue_purge(&ar->tx_status[i]); 1171 skb_queue_purge(&ar->tx_status[i]);
1312 } 1172 }
1313 skb_queue_purge(&ar->tx_status_ampdu);
1314 1173
1315 mutex_unlock(&ar->mutex); 1174 mutex_unlock(&ar->mutex);
1316} 1175}
1317 1176
1318static void ar9170_tx_indicate_immba(struct ar9170 *ar, struct sk_buff *skb)
1319{
1320 struct ar9170_tx_control *txc = (void *) skb->data;
1321
1322 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_IMM_AMPDU);
1323}
1324
1325static void ar9170_tx_copy_phy(struct ar9170 *ar, struct sk_buff *dst,
1326 struct sk_buff *src)
1327{
1328 struct ar9170_tx_control *dst_txc, *src_txc;
1329 struct ieee80211_tx_info *dst_info, *src_info;
1330 struct ar9170_tx_info *dst_arinfo, *src_arinfo;
1331
1332 src_txc = (void *) src->data;
1333 src_info = IEEE80211_SKB_CB(src);
1334 src_arinfo = (void *) src_info->rate_driver_data;
1335
1336 dst_txc = (void *) dst->data;
1337 dst_info = IEEE80211_SKB_CB(dst);
1338 dst_arinfo = (void *) dst_info->rate_driver_data;
1339
1340 dst_txc->phy_control = src_txc->phy_control;
1341
1342 /* same MCS for the whole aggregate */
1343 memcpy(dst_info->driver_rates, src_info->driver_rates,
1344 sizeof(dst_info->driver_rates));
1345}
1346
1347static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb) 1177static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1348{ 1178{
1349 struct ieee80211_hdr *hdr; 1179 struct ieee80211_hdr *hdr;
@@ -1420,14 +1250,7 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
1420 txc->phy_control |= 1250 txc->phy_control |=
1421 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT); 1251 cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
1422 1252
1423 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1253 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1424 if (unlikely(!info->control.sta))
1425 goto err_out;
1426
1427 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
1428 } else {
1429 txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
1430 }
1431 } 1254 }
1432 1255
1433 return 0; 1256 return 0;
@@ -1537,158 +1360,6 @@ static void ar9170_tx_prepare_phy(struct ar9170 *ar, struct sk_buff *skb)
1537 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT); 1360 txc->phy_control |= cpu_to_le32(chains << AR9170_TX_PHY_TXCHAIN_SHIFT);
1538} 1361}
1539 1362
1540static bool ar9170_tx_ampdu(struct ar9170 *ar)
1541{
1542 struct sk_buff_head agg;
1543 struct ar9170_sta_tid *tid_info = NULL, *tmp;
1544 struct sk_buff *skb, *first = NULL;
1545 unsigned long flags, f2;
1546 unsigned int i = 0;
1547 u16 seq, queue, tmpssn;
1548 bool run = false;
1549
1550 skb_queue_head_init(&agg);
1551
1552 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1553 if (list_empty(&ar->tx_ampdu_list)) {
1554#ifdef AR9170_TXAGG_DEBUG
1555 printk(KERN_DEBUG "%s: aggregation list is empty.\n",
1556 wiphy_name(ar->hw->wiphy));
1557#endif /* AR9170_TXAGG_DEBUG */
1558 goto out_unlock;
1559 }
1560
1561 list_for_each_entry_safe(tid_info, tmp, &ar->tx_ampdu_list, list) {
1562 if (tid_info->state != AR9170_TID_STATE_COMPLETE) {
1563#ifdef AR9170_TXAGG_DEBUG
1564 printk(KERN_DEBUG "%s: dangling aggregation entry!\n",
1565 wiphy_name(ar->hw->wiphy));
1566#endif /* AR9170_TXAGG_DEBUG */
1567 continue;
1568 }
1569
1570 if (++i > 64) {
1571#ifdef AR9170_TXAGG_DEBUG
1572 printk(KERN_DEBUG "%s: enough frames aggregated.\n",
1573 wiphy_name(ar->hw->wiphy));
1574#endif /* AR9170_TXAGG_DEBUG */
1575 break;
1576 }
1577
1578 queue = TID_TO_WME_AC(tid_info->tid);
1579
1580 if (skb_queue_len(&ar->tx_pending[queue]) >=
1581 AR9170_NUM_TX_AGG_MAX) {
1582#ifdef AR9170_TXAGG_DEBUG
1583 printk(KERN_DEBUG "%s: queue %d full.\n",
1584 wiphy_name(ar->hw->wiphy), queue);
1585#endif /* AR9170_TXAGG_DEBUG */
1586 continue;
1587 }
1588
1589 list_del_init(&tid_info->list);
1590
1591 spin_lock_irqsave(&tid_info->queue.lock, f2);
1592 tmpssn = seq = tid_info->ssn;
1593 first = skb_peek(&tid_info->queue);
1594
1595 if (likely(first))
1596 tmpssn = ar9170_get_seq(first);
1597
1598 if (unlikely(tmpssn != seq)) {
1599#ifdef AR9170_TXAGG_DEBUG
1600 printk(KERN_DEBUG "%s: ssn mismatch [%d != %d]\n.",
1601 wiphy_name(ar->hw->wiphy), seq, tmpssn);
1602#endif /* AR9170_TXAGG_DEBUG */
1603 tid_info->ssn = tmpssn;
1604 }
1605
1606#ifdef AR9170_TXAGG_DEBUG
1607 printk(KERN_DEBUG "%s: generate A-MPDU for tid:%d ssn:%d with "
1608 "%d queued frames.\n", wiphy_name(ar->hw->wiphy),
1609 tid_info->tid, tid_info->ssn,
1610 skb_queue_len(&tid_info->queue));
1611 __ar9170_dump_txqueue(ar, &tid_info->queue);
1612#endif /* AR9170_TXAGG_DEBUG */
1613
1614 while ((skb = skb_peek(&tid_info->queue))) {
1615 if (unlikely(ar9170_get_seq(skb) != seq))
1616 break;
1617
1618 __skb_unlink(skb, &tid_info->queue);
1619 tid_info->ssn = seq = GET_NEXT_SEQ(seq);
1620
1621 if (unlikely(skb_get_queue_mapping(skb) != queue)) {
1622#ifdef AR9170_TXAGG_DEBUG
1623 printk(KERN_DEBUG "%s: tid:%d(q:%d) queue:%d "
1624 "!match.\n", wiphy_name(ar->hw->wiphy),
1625 tid_info->tid,
1626 TID_TO_WME_AC(tid_info->tid),
1627 skb_get_queue_mapping(skb));
1628#endif /* AR9170_TXAGG_DEBUG */
1629 dev_kfree_skb_any(skb);
1630 continue;
1631 }
1632
1633 if (unlikely(first == skb)) {
1634 ar9170_tx_prepare_phy(ar, skb);
1635 __skb_queue_tail(&agg, skb);
1636 first = skb;
1637 } else {
1638 ar9170_tx_copy_phy(ar, skb, first);
1639 __skb_queue_tail(&agg, skb);
1640 }
1641
1642 if (unlikely(skb_queue_len(&agg) ==
1643 AR9170_NUM_TX_AGG_MAX))
1644 break;
1645 }
1646
1647 if (skb_queue_empty(&tid_info->queue))
1648 tid_info->active = false;
1649 else
1650 list_add_tail(&tid_info->list,
1651 &ar->tx_ampdu_list);
1652
1653 spin_unlock_irqrestore(&tid_info->queue.lock, f2);
1654
1655 if (unlikely(skb_queue_empty(&agg))) {
1656#ifdef AR9170_TXAGG_DEBUG
1657 printk(KERN_DEBUG "%s: queued empty list!\n",
1658 wiphy_name(ar->hw->wiphy));
1659#endif /* AR9170_TXAGG_DEBUG */
1660 continue;
1661 }
1662
1663 /*
1664 * tell the FW/HW that this is the last frame,
1665 * that way it will wait for the immediate block ack.
1666 */
1667 ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
1668
1669#ifdef AR9170_TXAGG_DEBUG
1670 printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
1671 wiphy_name(ar->hw->wiphy));
1672 __ar9170_dump_txqueue(ar, &agg);
1673#endif /* AR9170_TXAGG_DEBUG */
1674
1675 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1676
1677 spin_lock_irqsave(&ar->tx_pending[queue].lock, flags);
1678 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1679 spin_unlock_irqrestore(&ar->tx_pending[queue].lock, flags);
1680 run = true;
1681
1682 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1683 }
1684
1685out_unlock:
1686 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1687 __skb_queue_purge(&agg);
1688
1689 return run;
1690}
1691
1692static void ar9170_tx(struct ar9170 *ar) 1363static void ar9170_tx(struct ar9170 *ar)
1693{ 1364{
1694 struct sk_buff *skb; 1365 struct sk_buff *skb;
@@ -1728,7 +1399,7 @@ static void ar9170_tx(struct ar9170 *ar)
1728 printk(KERN_DEBUG "%s: queue %d full\n", 1399 printk(KERN_DEBUG "%s: queue %d full\n",
1729 wiphy_name(ar->hw->wiphy), i); 1400 wiphy_name(ar->hw->wiphy), i);
1730 1401
1731 printk(KERN_DEBUG "%s: stuck frames: ===> \n", 1402 printk(KERN_DEBUG "%s: stuck frames: ===>\n",
1732 wiphy_name(ar->hw->wiphy)); 1403 wiphy_name(ar->hw->wiphy));
1733 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 1404 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1734 ar9170_dump_txqueue(ar, &ar->tx_status[i]); 1405 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -1763,9 +1434,6 @@ static void ar9170_tx(struct ar9170 *ar)
1763 arinfo->timeout = jiffies + 1434 arinfo->timeout = jiffies +
1764 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1435 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1765 1436
1766 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1767 atomic_inc(&ar->tx_ampdu_pending);
1768
1769#ifdef AR9170_QUEUE_DEBUG 1437#ifdef AR9170_QUEUE_DEBUG
1770 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1438 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
1771 wiphy_name(ar->hw->wiphy), i); 1439 wiphy_name(ar->hw->wiphy), i);
@@ -1774,9 +1442,6 @@ static void ar9170_tx(struct ar9170 *ar)
1774 1442
1775 err = ar->tx(ar, skb); 1443 err = ar->tx(ar, skb);
1776 if (unlikely(err)) { 1444 if (unlikely(err)) {
1777 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1778 atomic_dec(&ar->tx_ampdu_pending);
1779
1780 frames_failed++; 1445 frames_failed++;
1781 dev_kfree_skb_any(skb); 1446 dev_kfree_skb_any(skb);
1782 } else { 1447 } else {
@@ -1823,94 +1488,11 @@ static void ar9170_tx(struct ar9170 *ar)
1823 msecs_to_jiffies(AR9170_JANITOR_DELAY)); 1488 msecs_to_jiffies(AR9170_JANITOR_DELAY));
1824} 1489}
1825 1490
1826static bool ar9170_tx_ampdu_queue(struct ar9170 *ar, struct sk_buff *skb)
1827{
1828 struct ieee80211_tx_info *txinfo;
1829 struct ar9170_sta_info *sta_info;
1830 struct ar9170_sta_tid *agg;
1831 struct sk_buff *iter;
1832 unsigned long flags, f2;
1833 unsigned int max;
1834 u16 tid, seq, qseq;
1835 bool run = false, queue = false;
1836
1837 tid = ar9170_get_tid(skb);
1838 seq = ar9170_get_seq(skb);
1839 txinfo = IEEE80211_SKB_CB(skb);
1840 sta_info = (void *) txinfo->control.sta->drv_priv;
1841 agg = &sta_info->agg[tid];
1842 max = sta_info->ampdu_max_len;
1843
1844 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
1845
1846 if (unlikely(agg->state != AR9170_TID_STATE_COMPLETE)) {
1847#ifdef AR9170_TXAGG_DEBUG
1848 printk(KERN_DEBUG "%s: BlockACK session not fully initialized "
1849 "for ESS:%pM tid:%d state:%d.\n",
1850 wiphy_name(ar->hw->wiphy), agg->addr, agg->tid,
1851 agg->state);
1852#endif /* AR9170_TXAGG_DEBUG */
1853 goto err_unlock;
1854 }
1855
1856 if (!agg->active) {
1857 agg->active = true;
1858 agg->ssn = seq;
1859 queue = true;
1860 }
1861
1862 /* check if seq is within the BA window */
1863 if (unlikely(!BAW_WITHIN(agg->ssn, max, seq))) {
1864#ifdef AR9170_TXAGG_DEBUG
1865 printk(KERN_DEBUG "%s: frame with tid:%d seq:%d does not "
1866 "fit into BA window (%d - %d)\n",
1867 wiphy_name(ar->hw->wiphy), tid, seq, agg->ssn,
1868 (agg->ssn + max) & 0xfff);
1869#endif /* AR9170_TXAGG_DEBUG */
1870 goto err_unlock;
1871 }
1872
1873 spin_lock_irqsave(&agg->queue.lock, f2);
1874
1875 skb_queue_reverse_walk(&agg->queue, iter) {
1876 qseq = ar9170_get_seq(iter);
1877
1878 if (GET_NEXT_SEQ(qseq) == seq) {
1879 __skb_queue_after(&agg->queue, iter, skb);
1880 goto queued;
1881 }
1882 }
1883
1884 __skb_queue_head(&agg->queue, skb);
1885
1886queued:
1887 spin_unlock_irqrestore(&agg->queue.lock, f2);
1888
1889#ifdef AR9170_TXAGG_DEBUG
1890 printk(KERN_DEBUG "%s: new aggregate %p queued.\n",
1891 wiphy_name(ar->hw->wiphy), skb);
1892 __ar9170_dump_txqueue(ar, &agg->queue);
1893#endif /* AR9170_TXAGG_DEBUG */
1894
1895 if (skb_queue_len(&agg->queue) >= AR9170_NUM_TX_AGG_MAX)
1896 run = true;
1897
1898 if (queue)
1899 list_add_tail(&agg->list, &ar->tx_ampdu_list);
1900
1901 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1902 return run;
1903
1904err_unlock:
1905 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
1906 dev_kfree_skb_irq(skb);
1907 return false;
1908}
1909
1910int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 1491int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1911{ 1492{
1912 struct ar9170 *ar = hw->priv; 1493 struct ar9170 *ar = hw->priv;
1913 struct ieee80211_tx_info *info; 1494 struct ieee80211_tx_info *info;
1495 unsigned int queue;
1914 1496
1915 if (unlikely(!IS_STARTED(ar))) 1497 if (unlikely(!IS_STARTED(ar)))
1916 goto err_free; 1498 goto err_free;
@@ -1918,18 +1500,10 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1918 if (unlikely(ar9170_tx_prepare(ar, skb))) 1500 if (unlikely(ar9170_tx_prepare(ar, skb)))
1919 goto err_free; 1501 goto err_free;
1920 1502
1503 queue = skb_get_queue_mapping(skb);
1921 info = IEEE80211_SKB_CB(skb); 1504 info = IEEE80211_SKB_CB(skb);
1922 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1505 ar9170_tx_prepare_phy(ar, skb);
1923 bool run = ar9170_tx_ampdu_queue(ar, skb); 1506 skb_queue_tail(&ar->tx_pending[queue], skb);
1924
1925 if (run || !atomic_read(&ar->tx_ampdu_pending))
1926 ar9170_tx_ampdu(ar);
1927 } else {
1928 unsigned int queue = skb_get_queue_mapping(skb);
1929
1930 ar9170_tx_prepare_phy(ar, skb);
1931 skb_queue_tail(&ar->tx_pending[queue], skb);
1932 }
1933 1507
1934 ar9170_tx(ar); 1508 ar9170_tx(ar);
1935 return NETDEV_TX_OK; 1509 return NETDEV_TX_OK;
@@ -2046,21 +1620,17 @@ out:
2046 return err; 1620 return err;
2047} 1621}
2048 1622
2049static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 1623static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
2050 struct dev_addr_list *mclist) 1624 struct netdev_hw_addr_list *mc_list)
2051{ 1625{
2052 u64 mchash; 1626 u64 mchash;
2053 int i; 1627 struct netdev_hw_addr *ha;
2054 1628
2055 /* always get broadcast frames */ 1629 /* always get broadcast frames */
2056 mchash = 1ULL << (0xff >> 2); 1630 mchash = 1ULL << (0xff >> 2);
2057 1631
2058 for (i = 0; i < mc_count; i++) { 1632 netdev_hw_addr_list_for_each(ha, mc_list)
2059 if (WARN_ON(!mclist)) 1633 mchash |= 1ULL << (ha->addr[5] >> 2);
2060 break;
2061 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2062 mclist = mclist->next;
2063 }
2064 1634
2065 return mchash; 1635 return mchash;
2066} 1636}
@@ -2330,57 +1900,6 @@ out:
2330 return err; 1900 return err;
2331} 1901}
2332 1902
2333static int ar9170_sta_add(struct ieee80211_hw *hw,
2334 struct ieee80211_vif *vif,
2335 struct ieee80211_sta *sta)
2336{
2337 struct ar9170 *ar = hw->priv;
2338 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2339 unsigned int i;
2340
2341 memset(sta_info, 0, sizeof(*sta_info));
2342
2343 if (!sta->ht_cap.ht_supported)
2344 return 0;
2345
2346 if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
2347 ar->global_ampdu_density = sta->ht_cap.ampdu_density;
2348
2349 if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
2350 ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
2351
2352 for (i = 0; i < AR9170_NUM_TID; i++) {
2353 sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
2354 sta_info->agg[i].active = false;
2355 sta_info->agg[i].ssn = 0;
2356 sta_info->agg[i].tid = i;
2357 INIT_LIST_HEAD(&sta_info->agg[i].list);
2358 skb_queue_head_init(&sta_info->agg[i].queue);
2359 }
2360
2361 sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
2362
2363 return 0;
2364}
2365
2366static int ar9170_sta_remove(struct ieee80211_hw *hw,
2367 struct ieee80211_vif *vif,
2368 struct ieee80211_sta *sta)
2369{
2370 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2371 unsigned int i;
2372
2373 if (!sta->ht_cap.ht_supported)
2374 return 0;
2375
2376 for (i = 0; i < AR9170_NUM_TID; i++) {
2377 sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
2378 skb_queue_purge(&sta_info->agg[i].queue);
2379 }
2380
2381 return 0;
2382}
2383
2384static int ar9170_get_stats(struct ieee80211_hw *hw, 1903static int ar9170_get_stats(struct ieee80211_hw *hw,
2385 struct ieee80211_low_level_stats *stats) 1904 struct ieee80211_low_level_stats *stats)
2386{ 1905{
@@ -2423,55 +1942,7 @@ static int ar9170_ampdu_action(struct ieee80211_hw *hw,
2423 enum ieee80211_ampdu_mlme_action action, 1942 enum ieee80211_ampdu_mlme_action action,
2424 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 1943 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2425{ 1944{
2426 struct ar9170 *ar = hw->priv;
2427 struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
2428 struct ar9170_sta_tid *tid_info = &sta_info->agg[tid];
2429 unsigned long flags;
2430
2431 if (!modparam_ht)
2432 return -EOPNOTSUPP;
2433
2434 switch (action) { 1945 switch (action) {
2435 case IEEE80211_AMPDU_TX_START:
2436 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2437 if (tid_info->state != AR9170_TID_STATE_SHUTDOWN ||
2438 !list_empty(&tid_info->list)) {
2439 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2440#ifdef AR9170_TXAGG_DEBUG
2441 printk(KERN_INFO "%s: A-MPDU [ESS:[%pM] tid:[%d]] "
2442 "is in a very bad state!\n",
2443 wiphy_name(hw->wiphy), sta->addr, tid);
2444#endif /* AR9170_TXAGG_DEBUG */
2445 return -EBUSY;
2446 }
2447
2448 *ssn = tid_info->ssn;
2449 tid_info->state = AR9170_TID_STATE_PROGRESS;
2450 tid_info->active = false;
2451 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2452 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2453 break;
2454
2455 case IEEE80211_AMPDU_TX_STOP:
2456 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2457 tid_info->state = AR9170_TID_STATE_SHUTDOWN;
2458 list_del_init(&tid_info->list);
2459 tid_info->active = false;
2460 skb_queue_purge(&tid_info->queue);
2461 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2462 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
2463 break;
2464
2465 case IEEE80211_AMPDU_TX_OPERATIONAL:
2466#ifdef AR9170_TXAGG_DEBUG
2467 printk(KERN_INFO "%s: A-MPDU for %pM [tid:%d] Operational.\n",
2468 wiphy_name(hw->wiphy), sta->addr, tid);
2469#endif /* AR9170_TXAGG_DEBUG */
2470 spin_lock_irqsave(&ar->tx_ampdu_list_lock, flags);
2471 sta_info->agg[tid].state = AR9170_TID_STATE_COMPLETE;
2472 spin_unlock_irqrestore(&ar->tx_ampdu_list_lock, flags);
2473 break;
2474
2475 case IEEE80211_AMPDU_RX_START: 1946 case IEEE80211_AMPDU_RX_START:
2476 case IEEE80211_AMPDU_RX_STOP: 1947 case IEEE80211_AMPDU_RX_STOP:
2477 /* Handled by firmware */ 1948 /* Handled by firmware */
@@ -2497,8 +1968,6 @@ static const struct ieee80211_ops ar9170_ops = {
2497 .bss_info_changed = ar9170_op_bss_info_changed, 1968 .bss_info_changed = ar9170_op_bss_info_changed,
2498 .get_tsf = ar9170_op_get_tsf, 1969 .get_tsf = ar9170_op_get_tsf,
2499 .set_key = ar9170_set_key, 1970 .set_key = ar9170_set_key,
2500 .sta_add = ar9170_sta_add,
2501 .sta_remove = ar9170_sta_remove,
2502 .get_stats = ar9170_get_stats, 1971 .get_stats = ar9170_get_stats,
2503 .ampdu_action = ar9170_ampdu_action, 1972 .ampdu_action = ar9170_ampdu_action,
2504}; 1973};
@@ -2516,7 +1985,7 @@ void *ar9170_alloc(size_t priv_size)
2516 * tends to split the streams into separate rx descriptors. 1985 * tends to split the streams into separate rx descriptors.
2517 */ 1986 */
2518 1987
2519 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); 1988 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2520 if (!skb) 1989 if (!skb)
2521 goto err_nomem; 1990 goto err_nomem;
2522 1991
@@ -2531,8 +2000,6 @@ void *ar9170_alloc(size_t priv_size)
2531 mutex_init(&ar->mutex); 2000 mutex_init(&ar->mutex);
2532 spin_lock_init(&ar->cmdlock); 2001 spin_lock_init(&ar->cmdlock);
2533 spin_lock_init(&ar->tx_stats_lock); 2002 spin_lock_init(&ar->tx_stats_lock);
2534 spin_lock_init(&ar->tx_ampdu_list_lock);
2535 skb_queue_head_init(&ar->tx_status_ampdu);
2536 for (i = 0; i < __AR9170_NUM_TXQ; i++) { 2003 for (i = 0; i < __AR9170_NUM_TXQ; i++) {
2537 skb_queue_head_init(&ar->tx_status[i]); 2004 skb_queue_head_init(&ar->tx_status[i]);
2538 skb_queue_head_init(&ar->tx_pending[i]); 2005 skb_queue_head_init(&ar->tx_pending[i]);
@@ -2540,7 +2007,6 @@ void *ar9170_alloc(size_t priv_size)
2540 ar9170_rx_reset_rx_mpdu(ar); 2007 ar9170_rx_reset_rx_mpdu(ar);
2541 INIT_WORK(&ar->beacon_work, ar9170_new_beacon); 2008 INIT_WORK(&ar->beacon_work, ar9170_new_beacon);
2542 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor); 2009 INIT_DELAYED_WORK(&ar->tx_janitor, ar9170_tx_janitor);
2543 INIT_LIST_HEAD(&ar->tx_ampdu_list);
2544 2010
2545 /* all hw supports 2.4 GHz, so set channel to 1 by default */ 2011 /* all hw supports 2.4 GHz, so set channel to 1 by default */
2546 ar->channel = &ar9170_2ghz_chantable[0]; 2012 ar->channel = &ar9170_2ghz_chantable[0];
@@ -2551,19 +2017,10 @@ void *ar9170_alloc(size_t priv_size)
2551 BIT(NL80211_IFTYPE_ADHOC); 2017 BIT(NL80211_IFTYPE_ADHOC);
2552 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS | 2018 ar->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
2553 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 2019 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2554 IEEE80211_HW_SIGNAL_DBM | 2020 IEEE80211_HW_SIGNAL_DBM;
2555 IEEE80211_HW_NOISE_DBM;
2556
2557 if (modparam_ht) {
2558 ar->hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
2559 } else {
2560 ar9170_band_2GHz.ht_cap.ht_supported = false;
2561 ar9170_band_5GHz.ht_cap.ht_supported = false;
2562 }
2563 2021
2564 ar->hw->queues = __AR9170_NUM_TXQ; 2022 ar->hw->queues = __AR9170_NUM_TXQ;
2565 ar->hw->extra_tx_headroom = 8; 2023 ar->hw->extra_tx_headroom = 8;
2566 ar->hw->sta_data_size = sizeof(struct ar9170_sta_info);
2567 2024
2568 ar->hw->max_rates = 1; 2025 ar->hw->max_rates = 1;
2569 ar->hw->max_rate_tries = 3; 2026 ar->hw->max_rate_tries = 3;
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..82ab532a4923 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -42,6 +42,7 @@
42#include <linux/usb.h> 42#include <linux/usb.h>
43#include <linux/firmware.h> 43#include <linux/firmware.h>
44#include <linux/etherdevice.h> 44#include <linux/etherdevice.h>
45#include <linux/device.h>
45#include <net/mac80211.h> 46#include <net/mac80211.h>
46#include "ar9170.h" 47#include "ar9170.h"
47#include "cmd.h" 48#include "cmd.h"
@@ -67,18 +68,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
67 { USB_DEVICE(0x0cf3, 0x1001) }, 68 { USB_DEVICE(0x0cf3, 0x1001) },
68 /* TP-Link TL-WN821N v2 */ 69 /* TP-Link TL-WN821N v2 */
69 { USB_DEVICE(0x0cf3, 0x1002) }, 70 { USB_DEVICE(0x0cf3, 0x1002) },
71 /* 3Com Dual Band 802.11n USB Adapter */
72 { USB_DEVICE(0x0cf3, 0x1010) },
73 /* H3C Dual Band 802.11n USB Adapter */
74 { USB_DEVICE(0x0cf3, 0x1011) },
70 /* Cace Airpcap NX */ 75 /* Cace Airpcap NX */
71 { USB_DEVICE(0xcace, 0x0300) }, 76 { USB_DEVICE(0xcace, 0x0300) },
72 /* D-Link DWA 160 A1 */ 77 /* D-Link DWA 160 A1 */
73 { USB_DEVICE(0x07d1, 0x3c10) }, 78 { USB_DEVICE(0x07d1, 0x3c10) },
74 /* D-Link DWA 160 A2 */ 79 /* D-Link DWA 160 A2 */
75 { USB_DEVICE(0x07d1, 0x3a09) }, 80 { USB_DEVICE(0x07d1, 0x3a09) },
81 /* Netgear WNA1000 */
82 { USB_DEVICE(0x0846, 0x9040) },
76 /* Netgear WNDA3100 */ 83 /* Netgear WNDA3100 */
77 { USB_DEVICE(0x0846, 0x9010) }, 84 { USB_DEVICE(0x0846, 0x9010) },
78 /* Netgear WN111 v2 */ 85 /* Netgear WN111 v2 */
79 { USB_DEVICE(0x0846, 0x9001) }, 86 { USB_DEVICE(0x0846, 0x9001) },
80 /* Zydas ZD1221 */ 87 /* Zydas ZD1221 */
81 { USB_DEVICE(0x0ace, 0x1221) }, 88 { USB_DEVICE(0x0ace, 0x1221) },
89 /* Proxim ORiNOCO 802.11n USB */
90 { USB_DEVICE(0x1435, 0x0804) },
91 /* WNC Generic 11n USB Dongle */
92 { USB_DEVICE(0x1435, 0x0326) },
82 /* ZyXEL NWD271N */ 93 /* ZyXEL NWD271N */
83 { USB_DEVICE(0x0586, 0x3417) }, 94 { USB_DEVICE(0x0586, 0x3417) },
84 /* Z-Com UB81 BG */ 95 /* Z-Com UB81 BG */
@@ -99,6 +110,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
99 { USB_DEVICE(0x0409, 0x0249) }, 110 { USB_DEVICE(0x0409, 0x0249) },
100 /* AVM FRITZ!WLAN USB Stick N 2.4 */ 111 /* AVM FRITZ!WLAN USB Stick N 2.4 */
101 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY }, 112 { USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
113 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
114 { USB_DEVICE(0x1668, 0x1200) },
102 115
103 /* terminate */ 116 /* terminate */
104 {} 117 {}
@@ -202,7 +215,7 @@ resubmit:
202 return; 215 return;
203 216
204free: 217free:
205 usb_buffer_free(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma); 218 usb_free_coherent(aru->udev, 64, urb->transfer_buffer, urb->transfer_dma);
206} 219}
207 220
208static void ar9170_usb_rx_completed(struct urb *urb) 221static void ar9170_usb_rx_completed(struct urb *urb)
@@ -283,7 +296,7 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
283 if (!urb) 296 if (!urb)
284 goto out; 297 goto out;
285 298
286 ibuf = usb_buffer_alloc(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma); 299 ibuf = usb_alloc_coherent(aru->udev, 64, GFP_KERNEL, &urb->transfer_dma);
287 if (!ibuf) 300 if (!ibuf)
288 goto out; 301 goto out;
289 302
@@ -296,8 +309,8 @@ static int ar9170_usb_alloc_rx_irq_urb(struct ar9170_usb *aru)
296 err = usb_submit_urb(urb, GFP_KERNEL); 309 err = usb_submit_urb(urb, GFP_KERNEL);
297 if (err) { 310 if (err) {
298 usb_unanchor_urb(urb); 311 usb_unanchor_urb(urb);
299 usb_buffer_free(aru->udev, 64, urb->transfer_buffer, 312 usb_free_coherent(aru->udev, 64, urb->transfer_buffer,
300 urb->transfer_dma); 313 urb->transfer_dma);
301 } 314 }
302 315
303out: 316out:
@@ -727,12 +740,16 @@ static void ar9170_usb_firmware_failed(struct ar9170_usb *aru)
727{ 740{
728 struct device *parent = aru->udev->dev.parent; 741 struct device *parent = aru->udev->dev.parent;
729 742
743 complete(&aru->firmware_loading_complete);
744
730 /* unbind anything failed */ 745 /* unbind anything failed */
731 if (parent) 746 if (parent)
732 down(&parent->sem); 747 device_lock(parent);
733 device_release_driver(&aru->udev->dev); 748 device_release_driver(&aru->udev->dev);
734 if (parent) 749 if (parent)
735 up(&parent->sem); 750 device_unlock(parent);
751
752 usb_put_dev(aru->udev);
736} 753}
737 754
738static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context) 755static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
@@ -761,6 +778,8 @@ static void ar9170_usb_firmware_finish(const struct firmware *fw, void *context)
761 if (err) 778 if (err)
762 goto err_unrx; 779 goto err_unrx;
763 780
781 complete(&aru->firmware_loading_complete);
782 usb_put_dev(aru->udev);
764 return; 783 return;
765 784
766 err_unrx: 785 err_unrx:
@@ -858,6 +877,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
858 init_usb_anchor(&aru->tx_pending); 877 init_usb_anchor(&aru->tx_pending);
859 init_usb_anchor(&aru->tx_submitted); 878 init_usb_anchor(&aru->tx_submitted);
860 init_completion(&aru->cmd_wait); 879 init_completion(&aru->cmd_wait);
880 init_completion(&aru->firmware_loading_complete);
861 spin_lock_init(&aru->tx_urb_lock); 881 spin_lock_init(&aru->tx_urb_lock);
862 882
863 aru->tx_pending_urbs = 0; 883 aru->tx_pending_urbs = 0;
@@ -877,6 +897,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
877 if (err) 897 if (err)
878 goto err_freehw; 898 goto err_freehw;
879 899
900 usb_get_dev(aru->udev);
880 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw", 901 return request_firmware_nowait(THIS_MODULE, 1, "ar9170.fw",
881 &aru->udev->dev, GFP_KERNEL, aru, 902 &aru->udev->dev, GFP_KERNEL, aru,
882 ar9170_usb_firmware_step2); 903 ar9170_usb_firmware_step2);
@@ -896,6 +917,9 @@ static void ar9170_usb_disconnect(struct usb_interface *intf)
896 return; 917 return;
897 918
898 aru->common.state = AR9170_IDLE; 919 aru->common.state = AR9170_IDLE;
920
921 wait_for_completion(&aru->firmware_loading_complete);
922
899 ar9170_unregister(&aru->common); 923 ar9170_unregister(&aru->common);
900 ar9170_usb_cancel_urbs(aru); 924 ar9170_usb_cancel_urbs(aru);
901 925
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index a2ce3b169ceb..919b06046eb3 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -71,6 +71,7 @@ struct ar9170_usb {
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
74 struct completion firmware_loading_complete;
74 int readlen; 75 int readlen;
75 u8 *readbuf; 76 u8 *readbuf;
76 77
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960814f0..d32f2828b098 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
48 ATH_HW_INITIALIZED, 48 ATH_HW_INITIALIZED,
49}; 49};
50 50
51enum ath_bus_type {
52 ATH_PCI,
53 ATH_AHB,
54 ATH_USB,
55};
56
51struct reg_dmn_pair_mapping { 57struct reg_dmn_pair_mapping {
52 u16 regDmnEnum; 58 u16 regDmnEnum;
53 u16 reg_5ghz_ctl; 59 u16 reg_5ghz_ctl;
@@ -65,17 +71,30 @@ struct ath_regulatory {
65 struct reg_dmn_pair_mapping *regpair; 71 struct reg_dmn_pair_mapping *regpair;
66}; 72};
67 73
74/**
75 * struct ath_ops - Register read/write operations
76 *
77 * @read: Register read
78 * @write: Register write
79 * @enable_write_buffer: Enable multiple register writes
80 * @disable_write_buffer: Disable multiple register writes
81 * @write_flush: Flush buffered register writes
82 */
68struct ath_ops { 83struct ath_ops {
69 unsigned int (*read)(void *, u32 reg_offset); 84 unsigned int (*read)(void *, u32 reg_offset);
70 void (*write)(void *, u32 val, u32 reg_offset); 85 void (*write)(void *, u32 val, u32 reg_offset);
86 void (*enable_write_buffer)(void *);
87 void (*disable_write_buffer)(void *);
88 void (*write_flush) (void *);
71}; 89};
72 90
73struct ath_common; 91struct ath_common;
74 92
75struct ath_bus_ops { 93struct ath_bus_ops {
76 void (*read_cachesize)(struct ath_common *common, int *csz); 94 enum ath_bus_type ath_bus_type;
77 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 95 void (*read_cachesize)(struct ath_common *common, int *csz);
78 void (*bt_coex_prep)(struct ath_common *common); 96 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
97 void (*bt_coex_prep)(struct ath_common *common);
79}; 98};
80 99
81struct ath_common { 100struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d268a3..cc09595b781a 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
12ath5k-y += base.o 12ath5k-y += base.o
13ath5k-y += led.o 13ath5k-y += led.o
14ath5k-y += rfkill.o 14ath5k-y += rfkill.o
15ath5k-y += ani.o
15ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 16ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
16obj-$(CONFIG_ATH5K) += ath5k.o 17obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 000000000000..f2311ab35504
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
1/*
2 * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath5k.h"
18#include "base.h"
19#include "reg.h"
20#include "debug.h"
21#include "ani.h"
22
23/**
24 * DOC: Basic ANI Operation
25 *
26 * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
27 * depending on the amount of interference in the environment, increasing
28 * or reducing sensitivity as necessary.
29 *
30 * The parameters are:
31 * - "noise immunity"
32 * - "spur immunity"
33 * - "firstep level"
34 * - "OFDM weak signal detection"
35 * - "CCK weak signal detection"
36 *
37 * Basically we look at the amount of ODFM and CCK timing errors we get and then
38 * raise or lower immunity accordingly by setting one or more of these
39 * parameters.
40 * Newer chipsets have PHY error counters in hardware which will generate a MIB
41 * interrupt when they overflow. Older hardware has too enable PHY error frames
42 * by setting a RX flag and then count every single PHY error. When a specified
43 * threshold of errors has been reached we will raise immunity.
44 * Also we regularly check the amount of errors and lower or raise immunity as
45 * necessary.
46 */
47
48
49/*** ANI parameter control ***/
50
51/**
52 * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
53 *
54 * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
55 */
56void
57ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
58{
59 /* TODO:
60 * ANI documents suggest the following five levels to use, but the HAL
61 * and ath9k use only use the last two levels, making this
62 * essentially an on/off option. There *may* be a reason for this (???),
63 * so i stick with the HAL version for now...
64 */
65#if 0
66 const s8 hi[] = { -18, -18, -16, -14, -12 };
67 const s8 lo[] = { -52, -56, -60, -64, -70 };
68 const s8 sz[] = { -34, -41, -48, -55, -62 };
69 const s8 fr[] = { -70, -72, -75, -78, -80 };
70#else
71 const s8 sz[] = { -55, -62 };
72 const s8 lo[] = { -64, -70 };
73 const s8 hi[] = { -14, -12 };
74 const s8 fr[] = { -78, -80 };
75#endif
76 if (level < 0 || level >= ARRAY_SIZE(sz)) {
77 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
78 "level out of range %d", level);
79 return;
80 }
81
82 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
83 AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
84 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
85 AR5K_PHY_AGCCOARSE_LO, lo[level]);
86 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
87 AR5K_PHY_AGCCOARSE_HI, hi[level]);
88 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
89 AR5K_PHY_SIG_FIRPWR, fr[level]);
90
91 ah->ah_sc->ani_state.noise_imm_level = level;
92 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
93}
94
95
96/**
97 * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
98 *
99 * @level: level between 0 and @max_spur_level (the maximum level is dependent
100 * on the chip revision).
101 */
102void
103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
104{
105 const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
106
107 if (level < 0 || level >= ARRAY_SIZE(val) ||
108 level > ah->ah_sc->ani_state.max_spur_level) {
109 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
110 "level out of range %d", level);
111 return;
112 }
113
114 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
115 AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
116
117 ah->ah_sc->ani_state.spur_level = level;
118 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
119}
120
121
122/**
123 * ath5k_ani_set_firstep_level() - Set "firstep" level
124 *
125 * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
126 */
127void
128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
129{
130 const int val[] = { 0, 4, 8 };
131
132 if (level < 0 || level >= ARRAY_SIZE(val)) {
133 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
134 "level out of range %d", level);
135 return;
136 }
137
138 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
139 AR5K_PHY_SIG_FIRSTEP, val[level]);
140
141 ah->ah_sc->ani_state.firstep_level = level;
142 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
143}
144
145
146/**
147 * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
148 * detection
149 *
150 * @on: turn on or off
151 */
152void
153ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
154{
155 const int m1l[] = { 127, 50 };
156 const int m2l[] = { 127, 40 };
157 const int m1[] = { 127, 0x4d };
158 const int m2[] = { 127, 0x40 };
159 const int m2cnt[] = { 31, 16 };
160 const int m2lcnt[] = { 63, 48 };
161
162 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
163 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
164 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
165 AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
166 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
167 AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
168 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
169 AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
170 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
171 AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
172 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
173 AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
174
175 if (on)
176 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
177 AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
178 else
179 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
180 AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
181
182 ah->ah_sc->ani_state.ofdm_weak_sig = on;
183 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
184 on ? "on" : "off");
185}
186
187
188/**
189 * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
190 *
191 * @on: turn on or off
192 */
193void
194ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
195{
196 const int val[] = { 8, 6 };
197 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
198 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
199 ah->ah_sc->ani_state.cck_weak_sig = on;
200 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
201 on ? "on" : "off");
202}
203
204
205/*** ANI algorithm ***/
206
207/**
208 * ath5k_ani_raise_immunity() - Increase noise immunity
209 *
210 * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
211 * the algorithm will tune more parameters then.
212 *
213 * Try to raise noise immunity (=decrease sensitivity) in several steps
214 * depending on the average RSSI of the beacons we received.
215 */
216static void
217ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
218 bool ofdm_trigger)
219{
220 int rssi = ah->ah_beacon_rssi_avg.avg;
221
222 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
223 ofdm_trigger ? "ODFM" : "CCK");
224
225 /* first: raise noise immunity */
226 if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
227 ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
228 return;
229 }
230
231 /* only OFDM: raise spur immunity level */
232 if (ofdm_trigger &&
233 as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
234 ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
235 return;
236 }
237
238 /* AP mode */
239 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
240 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
241 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
242 return;
243 }
244
245 /* STA and IBSS mode */
246
247 /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
248 * per each neighbour node and use the minimum of these, to make sure we
249 * don't shut out a remote node by raising immunity too high. */
250
251 if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
252 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
253 "beacon RSSI high");
254 /* only OFDM: beacon RSSI is high, we can disable ODFM weak
255 * signal detection */
256 if (ofdm_trigger && as->ofdm_weak_sig == true) {
257 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
258 ath5k_ani_set_spur_immunity_level(ah, 0);
259 return;
260 }
261 /* as a last resort or CCK: raise firstep level */
262 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
263 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
264 return;
265 }
266 } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
267 /* beacon RSSI in mid range, we need OFDM weak signal detect,
268 * but can raise firstep level */
269 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
270 "beacon RSSI mid");
271 if (ofdm_trigger && as->ofdm_weak_sig == false)
272 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
273 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
274 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
275 return;
276 } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
277 /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
278 * detect and zero firstep level to maximize CCK sensitivity */
279 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
280 "beacon RSSI low, 2GHz");
281 if (ofdm_trigger && as->ofdm_weak_sig == true)
282 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
283 if (as->firstep_level > 0)
284 ath5k_ani_set_firstep_level(ah, 0);
285 return;
286 }
287
288 /* TODO: why not?:
289 if (as->cck_weak_sig == true) {
290 ath5k_ani_set_cck_weak_signal_detection(ah, false);
291 }
292 */
293}
294
295
296/**
297 * ath5k_ani_lower_immunity() - Decrease noise immunity
298 *
299 * Try to lower noise immunity (=increase sensitivity) in several steps
300 * depending on the average RSSI of the beacons we received.
301 */
302static void
303ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
304{
305 int rssi = ah->ah_beacon_rssi_avg.avg;
306
307 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
308
309 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
310 /* AP mode */
311 if (as->firstep_level > 0) {
312 ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
313 return;
314 }
315 } else {
316 /* STA and IBSS mode (see TODO above) */
317 if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
318 /* beacon signal is high, leave OFDM weak signal
319 * detection off or it may oscillate
320 * TODO: who said it's off??? */
321 } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
322 /* beacon RSSI is mid-range: turn on ODFM weak signal
323 * detection and next, lower firstep level */
324 if (as->ofdm_weak_sig == false) {
325 ath5k_ani_set_ofdm_weak_signal_detection(ah,
326 true);
327 return;
328 }
329 if (as->firstep_level > 0) {
330 ath5k_ani_set_firstep_level(ah,
331 as->firstep_level - 1);
332 return;
333 }
334 } else {
335 /* beacon signal is low: only reduce firstep level */
336 if (as->firstep_level > 0) {
337 ath5k_ani_set_firstep_level(ah,
338 as->firstep_level - 1);
339 return;
340 }
341 }
342 }
343
344 /* all modes */
345 if (as->spur_level > 0) {
346 ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
347 return;
348 }
349
350 /* finally, reduce noise immunity */
351 if (as->noise_imm_level > 0) {
352 ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
353 return;
354 }
355}
356
357
358/**
359 * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
360 *
361 * Return an approximation of the time spent "listening" in milliseconds (ms)
362 * since the last call of this function by deducting the cycles spent
363 * transmitting and receiving from the total cycle count.
364 * Save profile count values for debugging/statistics and because we might want
365 * to use them later.
366 *
367 * We assume no one else clears these registers!
368 */
369static int
370ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
371{
372 int listen;
373
374 /* freeze */
375 ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
376 /* read */
377 as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
378 as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
379 as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
380 as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
381 /* clear */
382 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
383 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
384 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
385 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
386 /* un-freeze */
387 ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
388
389 /* TODO: where does 44000 come from? (11g clock rate?) */
390 listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
391
392 if (as->pfc_cycles == 0 || listen < 0)
393 return 0;
394 return listen;
395}
396
397
398/**
399 * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
400 *
401 * Clear the PHY error counters as soon as possible, since this might be called
402 * from a MIB interrupt and we want to make sure we don't get interrupted again.
403 * Add the count of CCK and OFDM errors to our internal state, so it can be used
404 * by the algorithm later.
405 *
406 * Will be called from interrupt and tasklet context.
407 * Returns 0 if both counters are zero.
408 */
409static int
410ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
411 struct ath5k_ani_state *as)
412{
413 unsigned int ofdm_err, cck_err;
414
415 if (!ah->ah_capabilities.cap_has_phyerr_counters)
416 return 0;
417
418 ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
419 cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
420
421 /* reset counters first, we might be in a hurry (interrupt) */
422 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
423 AR5K_PHYERR_CNT1);
424 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
425 AR5K_PHYERR_CNT2);
426
427 ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
428 cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
429
430 /* sometimes both can be zero, especially when there is a superfluous
431 * second interrupt. detect that here and return an error. */
432 if (ofdm_err <= 0 && cck_err <= 0)
433 return 0;
434
435 /* avoid negative values should one of the registers overflow */
436 if (ofdm_err > 0) {
437 as->ofdm_errors += ofdm_err;
438 as->sum_ofdm_errors += ofdm_err;
439 }
440 if (cck_err > 0) {
441 as->cck_errors += cck_err;
442 as->sum_cck_errors += cck_err;
443 }
444 return 1;
445}
446
447
448/**
449 * ath5k_ani_period_restart() - Restart ANI period
450 *
451 * Just reset counters, so they are clear for the next "ani period".
452 */
453static void
454ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
455{
456 /* keep last values for debugging */
457 as->last_ofdm_errors = as->ofdm_errors;
458 as->last_cck_errors = as->cck_errors;
459 as->last_listen = as->listen_time;
460
461 as->ofdm_errors = 0;
462 as->cck_errors = 0;
463 as->listen_time = 0;
464}
465
466
467/**
468 * ath5k_ani_calibration() - The main ANI calibration function
469 *
470 * We count OFDM and CCK errors relative to the time where we did not send or
471 * receive ("listen" time) and raise or lower immunity accordingly.
472 * This is called regularly (every second) from the calibration timer, but also
473 * when an error threshold has been reached.
474 *
475 * In order to synchronize access from different contexts, this should be
476 * called only indirectly by scheduling the ANI tasklet!
477 */
478void
479ath5k_ani_calibration(struct ath5k_hw *ah)
480{
481 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
482 int listen, ofdm_high, ofdm_low, cck_high, cck_low;
483
484 if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
485 return;
486
487 /* get listen time since last call and add it to the counter because we
488 * might not have restarted the "ani period" last time */
489 listen = ath5k_hw_ani_get_listen_time(ah, as);
490 as->listen_time += listen;
491
492 ath5k_ani_save_and_clear_phy_errors(ah, as);
493
494 ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
495 cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
496 ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
497 cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
498
499 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
500 "listen %d (now %d)", as->listen_time, listen);
501 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
502 "check high ofdm %d/%d cck %d/%d",
503 as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
504
505 if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
506 /* too many PHY errors - we have to raise immunity */
507 bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
508 ath5k_ani_raise_immunity(ah, as, ofdm_flag);
509 ath5k_ani_period_restart(ah, as);
510
511 } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
512 /* If more than 5 (TODO: why 5?) periods have passed and we got
513 * relatively little errors we can try to lower immunity */
514 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
515 "check low ofdm %d/%d cck %d/%d",
516 as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
517
518 if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
519 ath5k_ani_lower_immunity(ah, as);
520
521 ath5k_ani_period_restart(ah, as);
522 }
523}
524
525
526/*** INTERRUPT HANDLER ***/
527
528/**
529 * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
530 *
531 * Just read & reset the registers quickly, so they don't generate more
532 * interrupts, save the counters and schedule the tasklet to decide whether
533 * to raise immunity or not.
534 *
535 * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
536 * should take care of all "normal" MIB interrupts.
537 */
538void
539ath5k_ani_mib_intr(struct ath5k_hw *ah)
540{
541 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
542
543 /* nothing to do here if HW does not have PHY error counters - they
544 * can't be the reason for the MIB interrupt then */
545 if (!ah->ah_capabilities.cap_has_phyerr_counters)
546 return;
547
548 /* not in use but clear anyways */
549 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
550 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
551
552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
553 return;
554
555 /* if one of the errors triggered, we can get a superfluous second
556 * interrupt, even though we have already reset the register. the
557 * function detects that so we can return early */
558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
559 return;
560
561 if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
562 as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
563 tasklet_schedule(&ah->ah_sc->ani_tasklet);
564}
565
566
567/**
568 * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
569 *
570 * This is used by hardware without PHY error counters to report PHY errors
571 * on a frame-by-frame basis, instead of the interrupt.
572 */
573void
574ath5k_ani_phy_error_report(struct ath5k_hw *ah,
575 enum ath5k_phy_error_code phyerr)
576{
577 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
578
579 if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
580 as->ofdm_errors++;
581 if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
582 tasklet_schedule(&ah->ah_sc->ani_tasklet);
583 } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
584 as->cck_errors++;
585 if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
586 tasklet_schedule(&ah->ah_sc->ani_tasklet);
587 }
588}
589
590
591/*** INIT ***/
592
593/**
594 * ath5k_enable_phy_err_counters() - Enable PHY error counters
595 *
596 * Enable PHY error counters for OFDM and CCK timing errors.
597 */
598static void
599ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
600{
601 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
602 AR5K_PHYERR_CNT1);
603 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
604 AR5K_PHYERR_CNT2);
605 ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
606 ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
607
608 /* not in use */
609 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
610 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
611}
612
613
614/**
615 * ath5k_disable_phy_err_counters() - Disable PHY error counters
616 *
617 * Disable PHY error counters for OFDM and CCK timing errors.
618 */
619static void
620ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
621{
622 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
623 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
624 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
625 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
626
627 /* not in use */
628 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
629 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
630}
631
632
633/**
634 * ath5k_ani_init() - Initialize ANI
635 * @mode: Which mode to use (auto, manual high, manual low, off)
636 *
637 * Initialize ANI according to mode.
638 */
639void
640ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
641{
642 /* ANI is only possible on 5212 and newer */
643 if (ah->ah_version < AR5K_AR5212)
644 return;
645
646 /* clear old state information */
647 memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
648
649 /* older hardware has more spur levels than newer */
650 if (ah->ah_mac_srev < AR5K_SREV_AR2414)
651 ah->ah_sc->ani_state.max_spur_level = 7;
652 else
653 ah->ah_sc->ani_state.max_spur_level = 2;
654
655 /* initial values for our ani parameters */
656 if (mode == ATH5K_ANI_MODE_OFF) {
657 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
658 } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
659 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
660 "ANI manual low -> high sensitivity\n");
661 ath5k_ani_set_noise_immunity_level(ah, 0);
662 ath5k_ani_set_spur_immunity_level(ah, 0);
663 ath5k_ani_set_firstep_level(ah, 0);
664 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
665 ath5k_ani_set_cck_weak_signal_detection(ah, true);
666 } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
667 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
668 "ANI manual high -> low sensitivity\n");
669 ath5k_ani_set_noise_immunity_level(ah,
670 ATH5K_ANI_MAX_NOISE_IMM_LVL);
671 ath5k_ani_set_spur_immunity_level(ah,
672 ah->ah_sc->ani_state.max_spur_level);
673 ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
674 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
675 ath5k_ani_set_cck_weak_signal_detection(ah, false);
676 } else if (mode == ATH5K_ANI_MODE_AUTO) {
677 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
678 ath5k_ani_set_noise_immunity_level(ah, 0);
679 ath5k_ani_set_spur_immunity_level(ah, 0);
680 ath5k_ani_set_firstep_level(ah, 0);
681 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
682 ath5k_ani_set_cck_weak_signal_detection(ah, false);
683 }
684
685 /* newer hardware has PHY error counter registers which we can use to
686 * get OFDM and CCK error counts. older hardware has to set rxfilter and
687 * report every single PHY error by calling ath5k_ani_phy_error_report()
688 */
689 if (mode == ATH5K_ANI_MODE_AUTO) {
690 if (ah->ah_capabilities.cap_has_phyerr_counters)
691 ath5k_enable_phy_err_counters(ah);
692 else
693 ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
694 AR5K_RX_FILTER_PHYERR);
695 } else {
696 if (ah->ah_capabilities.cap_has_phyerr_counters)
697 ath5k_disable_phy_err_counters(ah);
698 else
699 ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
700 ~AR5K_RX_FILTER_PHYERR);
701 }
702
703 ah->ah_sc->ani_state.ani_mode = mode;
704}
705
706
707/*** DEBUG ***/
708
709#ifdef CONFIG_ATH5K_DEBUG
710
711void
712ath5k_ani_print_counters(struct ath5k_hw *ah)
713{
714 /* clears too */
715 printk(KERN_NOTICE "ACK fail\t%d\n",
716 ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
717 printk(KERN_NOTICE "RTS fail\t%d\n",
718 ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
719 printk(KERN_NOTICE "RTS success\t%d\n",
720 ath5k_hw_reg_read(ah, AR5K_RTS_OK));
721 printk(KERN_NOTICE "FCS error\t%d\n",
722 ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
723
724 /* no clear */
725 printk(KERN_NOTICE "tx\t%d\n",
726 ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
727 printk(KERN_NOTICE "rx\t%d\n",
728 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
729 printk(KERN_NOTICE "busy\t%d\n",
730 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
731 printk(KERN_NOTICE "cycles\t%d\n",
732 ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
733
734 printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
735 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
736 printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
737 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
738 printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
739 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
740 printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
741 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
742}
743
744#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 000000000000..55cf26d8522c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef ANI_H
17#define ANI_H
18
19/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
20#define ATH5K_ANI_LISTEN_PERIOD 100
21#define ATH5K_ANI_OFDM_TRIG_HIGH 500
22#define ATH5K_ANI_OFDM_TRIG_LOW 200
23#define ATH5K_ANI_CCK_TRIG_HIGH 200
24#define ATH5K_ANI_CCK_TRIG_LOW 100
25
26/* average beacon RSSI thresholds */
27#define ATH5K_ANI_RSSI_THR_HIGH 40
28#define ATH5K_ANI_RSSI_THR_LOW 7
29
30/* maximum availabe levels */
31#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
32#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
33
34
35/**
36 * enum ath5k_ani_mode - mode for ANI / noise sensitivity
37 *
38 * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
39 * algorithm after it has been on auto mode.
40 * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
41 * maximizing sensitivity. ANI will not run.
42 * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
43 * minimizing sensitivity. ANI will not run.
44 * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
45 * amount of OFDM and CCK frame errors (default).
46 */
47enum ath5k_ani_mode {
48 ATH5K_ANI_MODE_OFF = 0,
49 ATH5K_ANI_MODE_MANUAL_LOW = 1,
50 ATH5K_ANI_MODE_MANUAL_HIGH = 2,
51 ATH5K_ANI_MODE_AUTO = 3
52};
53
54
55/**
56 * struct ath5k_ani_state - ANI state and associated counters
57 *
58 * @max_spur_level: the maximum spur level is chip dependent
59 */
60struct ath5k_ani_state {
61 enum ath5k_ani_mode ani_mode;
62
63 /* state */
64 int noise_imm_level;
65 int spur_level;
66 int firstep_level;
67 bool ofdm_weak_sig;
68 bool cck_weak_sig;
69
70 int max_spur_level;
71
72 /* used by the algorithm */
73 unsigned int listen_time;
74 unsigned int ofdm_errors;
75 unsigned int cck_errors;
76
77 /* debug/statistics only: numbers from last ANI calibration */
78 unsigned int pfc_tx;
79 unsigned int pfc_rx;
80 unsigned int pfc_busy;
81 unsigned int pfc_cycles;
82 unsigned int last_listen;
83 unsigned int last_ofdm_errors;
84 unsigned int last_cck_errors;
85 unsigned int sum_ofdm_errors;
86 unsigned int sum_cck_errors;
87};
88
89void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
90void ath5k_ani_mib_intr(struct ath5k_hw *ah);
91void ath5k_ani_calibration(struct ath5k_hw *ah);
92void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
93 enum ath5k_phy_error_code phyerr);
94
95/* for manual control */
96void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
97void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
98void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
99void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
100void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
101
102void ath5k_ani_print_counters(struct ath5k_hw *ah);
103
104#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02e26d8..2785946f659a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
202#define AR5K_TUNE_MAX_TXPOWER 63 202#define AR5K_TUNE_MAX_TXPOWER 63
203#define AR5K_TUNE_DEFAULT_TXPOWER 25 203#define AR5K_TUNE_DEFAULT_TXPOWER 25
204#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
205#define AR5K_TUNE_HWTXTRIES 4 205#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
206 207
207#define AR5K_INIT_CARR_SENSE_EN 1 208#define AR5K_INIT_CARR_SENSE_EN 1
208 209
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
614#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/ 615#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
615#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/ 616#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
616 617
617#if 0
618/**
619 * struct ath5k_beacon_state - Per-station beacon timer state.
620 * @bs_interval: in TU's, can also include the above flags
621 * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
622 * Point Coordination Function capable AP
623 */
624struct ath5k_beacon_state {
625 u32 bs_next_beacon;
626 u32 bs_next_dtim;
627 u32 bs_interval;
628 u8 bs_dtim_period;
629 u8 bs_cfp_period;
630 u16 bs_cfp_max_duration;
631 u16 bs_cfp_du_remain;
632 u16 bs_tim_offset;
633 u16 bs_sleep_duration;
634 u16 bs_bmiss_threshold;
635 u32 bs_cfp_next;
636};
637#endif
638
639 618
640/* 619/*
641 * TSF to TU conversion: 620 * TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
822 * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold 801 * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
823 * We currently do increments on interrupt by 802 * We currently do increments on interrupt by
824 * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2 803 * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
825 * @AR5K_INT_MIB: Indicates the Management Information Base counters should be 804 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
826 * checked. We should do this with ath5k_hw_update_mib_counters() but 805 * one of the PHY error counters reached the maximum value and should be
827 * it seems we should also then do some noise immunity work. 806 * read and cleared.
828 * @AR5K_INT_RXPHY: RX PHY Error 807 * @AR5K_INT_RXPHY: RX PHY Error
829 * @AR5K_INT_RXKCM: RX Key cache miss 808 * @AR5K_INT_RXKCM: RX Key cache miss
830 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a 809 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
912 AR5K_INT_NOCARD = 0xffffffff 891 AR5K_INT_NOCARD = 0xffffffff
913}; 892};
914 893
915/* Software interrupts used for calibration */ 894/* mask which calibration is active at the moment */
916enum ath5k_software_interrupt { 895enum ath5k_calibration_mask {
917 AR5K_SWI_FULL_CALIBRATION = 0x01, 896 AR5K_CALIBRATION_FULL = 0x01,
918 AR5K_SWI_SHORT_CALIBRATION = 0x02, 897 AR5K_CALIBRATION_SHORT = 0x02,
898 AR5K_CALIBRATION_ANI = 0x04,
919}; 899};
920 900
921/* 901/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
1004 struct { 984 struct {
1005 u8 q_tx_num; 985 u8 q_tx_num;
1006 } cap_queues; 986 } cap_queues;
987
988 bool cap_has_phyerr_counters;
1007}; 989};
1008 990
1009/* size of noise floor history (keep it a power of two) */ 991/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
1014 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */ 996 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
1015}; 997};
1016 998
999/**
1000 * struct avg_val - Helper structure for average calculation
1001 * @avg: contains the actual average value
1002 * @avg_weight: is used internally during calculation to prevent rounding errors
1003 */
1004struct ath5k_avg_val {
1005 int avg;
1006 int avg_weight;
1007};
1017 1008
1018/***************************************\ 1009/***************************************\
1019 HARDWARE ABSTRACTION LAYER STRUCTURE 1010 HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
1028 1019
1029/* TODO: Clean up and merge with ath5k_softc */ 1020/* TODO: Clean up and merge with ath5k_softc */
1030struct ath5k_hw { 1021struct ath5k_hw {
1031 u32 ah_magic;
1032 struct ath_common common; 1022 struct ath_common common;
1033 1023
1034 struct ath5k_softc *ah_sc; 1024 struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
1036 1026
1037 enum ath5k_int ah_imr; 1027 enum ath5k_int ah_imr;
1038 1028
1039 enum nl80211_iftype ah_op_mode;
1040 struct ieee80211_channel *ah_current_channel; 1029 struct ieee80211_channel *ah_current_channel;
1041 bool ah_turbo; 1030 bool ah_turbo;
1042 bool ah_calibration; 1031 bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
1049 u32 ah_phy; 1038 u32 ah_phy;
1050 u32 ah_mac_srev; 1039 u32 ah_mac_srev;
1051 u16 ah_mac_version; 1040 u16 ah_mac_version;
1052 u16 ah_mac_revision;
1053 u16 ah_phy_revision; 1041 u16 ah_phy_revision;
1054 u16 ah_radio_5ghz_revision; 1042 u16 ah_radio_5ghz_revision;
1055 u16 ah_radio_2ghz_revision; 1043 u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
1071 u8 ah_def_ant; 1059 u8 ah_def_ant;
1072 bool ah_software_retry; 1060 bool ah_software_retry;
1073 1061
1074 int ah_gpio_npins;
1075
1076 struct ath5k_capabilities ah_capabilities; 1062 struct ath5k_capabilities ah_capabilities;
1077 1063
1078 struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES]; 1064 struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
1123 1109
1124 struct ath5k_nfcal_hist ah_nfcal_hist; 1110 struct ath5k_nfcal_hist ah_nfcal_hist;
1125 1111
1112 /* average beacon RSSI in our BSS (used by ANI) */
1113 struct ath5k_avg_val ah_beacon_rssi_avg;
1114
1126 /* noise floor from last periodic calibration */ 1115 /* noise floor from last periodic calibration */
1127 s32 ah_noise_floor; 1116 s32 ah_noise_floor;
1128 1117
1129 /* Calibration timestamp */ 1118 /* Calibration timestamp */
1130 unsigned long ah_cal_tstamp; 1119 unsigned long ah_cal_next_full;
1131 1120 unsigned long ah_cal_next_ani;
1132 /* Calibration interval (secs) */
1133 u8 ah_cal_intval;
1134 1121
1135 /* Software interrupt mask */ 1122 /* Calibration mask */
1136 u8 ah_swi_mask; 1123 u8 ah_cal_mask;
1137 1124
1138 /* 1125 /*
1139 * Function pointers 1126 * Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
1141 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc, 1128 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
1142 u32 size, unsigned int flags); 1129 u32 size, unsigned int flags);
1143 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1130 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1144 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1131 unsigned int, unsigned int, int, enum ath5k_pkt_type,
1145 unsigned int, unsigned int, unsigned int, unsigned int, 1132 unsigned int, unsigned int, unsigned int, unsigned int,
1146 unsigned int, unsigned int, unsigned int); 1133 unsigned int, unsigned int, unsigned int, unsigned int);
1147 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1134 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1148 unsigned int, unsigned int, unsigned int, unsigned int, 1135 unsigned int, unsigned int, unsigned int, unsigned int,
1149 unsigned int, unsigned int); 1136 unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
1158 */ 1145 */
1159 1146
1160/* Attach/Detach Functions */ 1147/* Attach/Detach Functions */
1161extern int ath5k_hw_attach(struct ath5k_softc *sc); 1148int ath5k_hw_attach(struct ath5k_softc *sc);
1162extern void ath5k_hw_detach(struct ath5k_hw *ah); 1149void ath5k_hw_detach(struct ath5k_hw *ah);
1163 1150
1164/* LED functions */ 1151/* LED functions */
1165extern int ath5k_init_leds(struct ath5k_softc *sc); 1152int ath5k_init_leds(struct ath5k_softc *sc);
1166extern void ath5k_led_enable(struct ath5k_softc *sc); 1153void ath5k_led_enable(struct ath5k_softc *sc);
1167extern void ath5k_led_off(struct ath5k_softc *sc); 1154void ath5k_led_off(struct ath5k_softc *sc);
1168extern void ath5k_unregister_leds(struct ath5k_softc *sc); 1155void ath5k_unregister_leds(struct ath5k_softc *sc);
1169 1156
1170/* Reset Functions */ 1157/* Reset Functions */
1171extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial); 1158int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
1172extern int ath5k_hw_on_hold(struct ath5k_hw *ah); 1159int ath5k_hw_on_hold(struct ath5k_hw *ah);
1173extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel); 1160int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1161 struct ieee80211_channel *channel, bool change_channel);
1162int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
1163 bool is_set);
1174/* Power management functions */ 1164/* Power management functions */
1175extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
1176 1165
1177/* DMA Related Functions */ 1166/* DMA Related Functions */
1178extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah); 1167void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
1179extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah); 1168int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
1180extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah); 1169u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
1181extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr); 1170void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
1182extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1171int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1183extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1172int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1184extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue); 1173u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
1185extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, 1174int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
1186 u32 phys_addr); 1175 u32 phys_addr);
1187extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase); 1176int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
1188/* Interrupt handling */ 1177/* Interrupt handling */
1189extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); 1178bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1190extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1179int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1191extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum 1180enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
1192ath5k_int new_mask); 1181void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
1193extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
1194 1182
1195/* EEPROM access functions */ 1183/* EEPROM access functions */
1196extern int ath5k_eeprom_init(struct ath5k_hw *ah); 1184int ath5k_eeprom_init(struct ath5k_hw *ah);
1197extern void ath5k_eeprom_detach(struct ath5k_hw *ah); 1185void ath5k_eeprom_detach(struct ath5k_hw *ah);
1198extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1186int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1199extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1200 1187
1201/* Protocol Control Unit Functions */ 1188/* Protocol Control Unit Functions */
1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1189extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
1203extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); 1190void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1204/* BSSID Functions */ 1191/* BSSID Functions */
1205extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1192int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1206extern void ath5k_hw_set_associd(struct ath5k_hw *ah); 1193void ath5k_hw_set_associd(struct ath5k_hw *ah);
1207extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1194void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1208/* Receive start/stop functions */ 1195/* Receive start/stop functions */
1209extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1196void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1210extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); 1197void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1211/* RX Filter functions */ 1198/* RX Filter functions */
1212extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); 1199void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
1213extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1200u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1214extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1201void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1215extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1216extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1217/* Beacon control functions */ 1202/* Beacon control functions */
1218extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); 1203u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1219extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1204void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1220extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); 1205void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1221extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1206void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1222extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1223#if 0
1224extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
1225extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
1226extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
1227#endif
1228/* ACK bit rate */ 1207/* ACK bit rate */
1229void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); 1208void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
1230/* ACK/CTS Timeouts */
1231extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
1232extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
1233extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
1234extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
1235/* Clock rate related functions */ 1209/* Clock rate related functions */
1236unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec); 1210unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1237unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock); 1211unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1238unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah); 1212unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1239/* Key table (WEP) functions */ 1213/* Key table (WEP) functions */
1240extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); 1214int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1241extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1215int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
1242extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac); 1216 const struct ieee80211_key_conf *key, const u8 *mac);
1243extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac); 1217int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1244 1218
1245/* Queue Control Unit, DFS Control Unit Functions */ 1219/* Queue Control Unit, DFS Control Unit Functions */
1246extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info); 1220int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
1247extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 1221 struct ath5k_txq_info *queue_info);
1248 const struct ath5k_txq_info *queue_info); 1222int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1249extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, 1223 const struct ath5k_txq_info *queue_info);
1250 enum ath5k_tx_queue queue_type, 1224int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1251 struct ath5k_txq_info *queue_info); 1225 enum ath5k_tx_queue queue_type,
1252extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); 1226 struct ath5k_txq_info *queue_info);
1253extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1227u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1254extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1228void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1255extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah); 1229int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1256extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time); 1230int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1257 1231
1258/* Hardware Descriptor Functions */ 1232/* Hardware Descriptor Functions */
1259extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); 1233int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
1260 1234
1261/* GPIO Functions */ 1235/* GPIO Functions */
1262extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1236void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
1263extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); 1237int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
1264extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio); 1238int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1265extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); 1239u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1266extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1240int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1267extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1241void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
1242 u32 interrupt_level);
1268 1243
1269/* rfkill Functions */ 1244/* rfkill Functions */
1270extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah); 1245void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
1271extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah); 1246void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
1272 1247
1273/* Misc functions */ 1248/* Misc functions */
1274int ath5k_hw_set_capabilities(struct ath5k_hw *ah); 1249int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1275extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1250int ath5k_hw_get_capability(struct ath5k_hw *ah,
1276extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id); 1251 enum ath5k_capability_type cap_type, u32 capability,
1277extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah); 1252 u32 *result);
1253int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
1254int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1278 1255
1279/* Initial register settings functions */ 1256/* Initial register settings functions */
1280extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1257int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1281 1258
1282/* Initialize RF */ 1259/* Initialize RF */
1283extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah, 1260int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
1284 struct ieee80211_channel *channel, 1261 struct ieee80211_channel *channel,
1285 unsigned int mode); 1262 unsigned int mode);
1286extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq); 1263int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
1287extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); 1264enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
1288extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah); 1265int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1289/* PHY/RF channel functions */ 1266/* PHY/RF channel functions */
1290extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1267bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1291extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1268int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1292/* PHY calibration */ 1269/* PHY calibration */
1293void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); 1270void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1294extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1271int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1295extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1272 struct ieee80211_channel *channel);
1296extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
1297extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
1298/* Spur mitigation */ 1273/* Spur mitigation */
1299bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1274bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
1300 struct ieee80211_channel *channel); 1275 struct ieee80211_channel *channel);
1301void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, 1276void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1302 struct ieee80211_channel *channel); 1277 struct ieee80211_channel *channel);
1303/* Misc PHY functions */ 1278/* Misc PHY functions */
1304extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); 1279u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1305extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1280int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1306/* Antenna control */ 1281/* Antenna control */
1307extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); 1282void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
1308extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
1309extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1310/* TX power setup */ 1283/* TX power setup */
1311extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower); 1284int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1312extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower); 1285 u8 ee_mode, u8 txpower);
1286int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1313 1287
1314/* 1288/*
1315 * Functions used internaly 1289 * Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1335 iowrite32(val, ah->ah_iobase + reg); 1309 iowrite32(val, ah->ah_iobase + reg);
1336} 1310}
1337 1311
1338#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
1339/*
1340 * Check if a register write has been completed
1341 */
1342static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
1343 u32 val, bool is_set)
1344{
1345 int i;
1346 u32 data;
1347
1348 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
1349 data = ath5k_hw_reg_read(ah, reg);
1350 if (is_set && (data & flag))
1351 break;
1352 else if ((data & flag) == val)
1353 break;
1354 udelay(15);
1355 }
1356
1357 return (i <= 0) ? -EAGAIN : 0;
1358}
1359#endif
1360
1361static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) 1312static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1362{ 1313{
1363 u32 retval = 0, bit, i; 1314 u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1370 return retval; 1321 return retval;
1371} 1322}
1372 1323
1373static inline int ath5k_pad_size(int hdrlen) 1324#define AVG_SAMPLES 8
1325#define AVG_FACTOR 1000
1326
1327/**
1328 * ath5k_moving_average - Exponentially weighted moving average
1329 * @avg: average structure
1330 * @val: current value
1331 *
1332 * This implementation make use of a struct ath5k_avg_val to prevent rounding
1333 * errors.
1334 */
1335static inline struct ath5k_avg_val
1336ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
1374{ 1337{
1375 return (hdrlen < 24) ? 0 : hdrlen & 3; 1338 struct ath5k_avg_val new;
1339 new.avg_weight = avg.avg_weight ?
1340 (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
1341 (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
1342 (val * (AVG_FACTOR));
1343 new.avg = new.avg_weight / (AVG_FACTOR);
1344 return new;
1376} 1345}
1377 1346
1378#endif 1347#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786cc2639..e0c244b02f05 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
114 /* 114 /*
115 * HW information 115 * HW information
116 */ 116 */
117 ah->ah_op_mode = NL80211_IFTYPE_STATION;
118 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT; 117 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
119 ah->ah_turbo = false; 118 ah->ah_turbo = false;
120 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
124 ah->ah_cw_min = AR5K_TUNE_CWMIN; 123 ah->ah_cw_min = AR5K_TUNE_CWMIN;
125 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 124 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
126 ah->ah_software_retry = false; 125 ah->ah_software_retry = false;
126 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
127 ah->ah_noise_floor = -95; /* until first NF calibration is run */
128 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
127 129
128 /* 130 /*
129 * Find the mac version 131 * Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
149 /* Get MAC, PHY and RADIO revisions */ 151 /* Get MAC, PHY and RADIO revisions */
150 ah->ah_mac_srev = srev; 152 ah->ah_mac_srev = srev;
151 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER); 153 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
152 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
153 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 154 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
154 0xffffffff; 155 0xffffffff;
155 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, 156 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
328 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 329 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
329 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); 330 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
330 ath5k_hw_set_associd(ah); 331 ath5k_hw_set_associd(ah);
331 ath5k_hw_set_opmode(ah); 332 ath5k_hw_set_opmode(ah, sc->opmode);
332 333
333 ath5k_hw_rfgain_opt_init(ah); 334 ath5k_hw_rfgain_opt_init(ah);
334 335
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe7513ab5..5f04cf38a5bc 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
59#include "base.h" 59#include "base.h"
60#include "reg.h" 60#include "reg.h"
61#include "debug.h" 61#include "debug.h"
62#include "ani.h"
62 63
63static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
64static int modparam_nohwcrypt; 64static int modparam_nohwcrypt;
65module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 65module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
199static int ath5k_pci_suspend(struct device *dev); 199static int ath5k_pci_suspend(struct device *dev);
200static int ath5k_pci_resume(struct device *dev); 200static int ath5k_pci_resume(struct device *dev);
201 201
202SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); 202static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
203#define ATH5K_PM_OPS (&ath5k_pm_ops) 203#define ATH5K_PM_OPS (&ath5k_pm_ops)
204#else 204#else
205#define ATH5K_PM_OPS NULL 205#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 struct ieee80211_vif *vif); 231 struct ieee80211_vif *vif);
232static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 232static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 int mc_count, struct dev_addr_list *mc_list); 234 struct netdev_hw_addr_list *mc_list);
235static void ath5k_configure_filter(struct ieee80211_hw *hw, 235static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 unsigned int changed_flags, 236 unsigned int changed_flags,
237 unsigned int *new_flags, 237 unsigned int *new_flags,
@@ -242,6 +242,8 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
242 struct ieee80211_key_conf *key); 242 struct ieee80211_key_conf *key);
243static int ath5k_get_stats(struct ieee80211_hw *hw, 243static int ath5k_get_stats(struct ieee80211_hw *hw,
244 struct ieee80211_low_level_stats *stats); 244 struct ieee80211_low_level_stats *stats);
245static int ath5k_get_survey(struct ieee80211_hw *hw,
246 int idx, struct survey_info *survey);
245static u64 ath5k_get_tsf(struct ieee80211_hw *hw); 247static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
246static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf); 248static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
247static void ath5k_reset_tsf(struct ieee80211_hw *hw); 249static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -267,6 +269,7 @@ static const struct ieee80211_ops ath5k_hw_ops = {
267 .configure_filter = ath5k_configure_filter, 269 .configure_filter = ath5k_configure_filter,
268 .set_key = ath5k_set_key, 270 .set_key = ath5k_set_key,
269 .get_stats = ath5k_get_stats, 271 .get_stats = ath5k_get_stats,
272 .get_survey = ath5k_get_survey,
270 .conf_tx = NULL, 273 .conf_tx = NULL,
271 .get_tsf = ath5k_get_tsf, 274 .get_tsf = ath5k_get_tsf,
272 .set_tsf = ath5k_set_tsf, 275 .set_tsf = ath5k_set_tsf,
@@ -308,7 +311,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
308 struct ath5k_buf *bf); 311 struct ath5k_buf *bf);
309static int ath5k_txbuf_setup(struct ath5k_softc *sc, 312static int ath5k_txbuf_setup(struct ath5k_softc *sc,
310 struct ath5k_buf *bf, 313 struct ath5k_buf *bf,
311 struct ath5k_txq *txq); 314 struct ath5k_txq *txq, int padsize);
312static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 315static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
313 struct ath5k_buf *bf) 316 struct ath5k_buf *bf)
314{ 317{
@@ -365,6 +368,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
365static void ath5k_beacon_config(struct ath5k_softc *sc); 368static void ath5k_beacon_config(struct ath5k_softc *sc);
366static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 369static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
367static void ath5k_tasklet_beacon(unsigned long data); 370static void ath5k_tasklet_beacon(unsigned long data);
371static void ath5k_tasklet_ani(unsigned long data);
368 372
369static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 373static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
370{ 374{
@@ -544,8 +548,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
544 SET_IEEE80211_DEV(hw, &pdev->dev); 548 SET_IEEE80211_DEV(hw, &pdev->dev);
545 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 549 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
546 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 550 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
547 IEEE80211_HW_SIGNAL_DBM | 551 IEEE80211_HW_SIGNAL_DBM;
548 IEEE80211_HW_NOISE_DBM;
549 552
550 hw->wiphy->interface_modes = 553 hw->wiphy->interface_modes =
551 BIT(NL80211_IFTYPE_AP) | 554 BIT(NL80211_IFTYPE_AP) |
@@ -830,6 +833,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
830 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); 833 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
831 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); 834 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
832 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); 835 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
836 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
833 837
834 ret = ath5k_eeprom_read_mac(ah, mac); 838 ret = ath5k_eeprom_read_mac(ah, mac);
835 if (ret) { 839 if (ret) {
@@ -1138,8 +1142,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1138 struct ath5k_hw *ah = sc->ah; 1142 struct ath5k_hw *ah = sc->ah;
1139 u32 rfilt; 1143 u32 rfilt;
1140 1144
1141 ah->ah_op_mode = sc->opmode;
1142
1143 /* configure rx filter */ 1145 /* configure rx filter */
1144 rfilt = sc->filter_flags; 1146 rfilt = sc->filter_flags;
1145 ath5k_hw_set_rx_filter(ah, rfilt); 1147 ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1150,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1148 ath5k_hw_set_bssid_mask(ah, sc->bssidmask); 1150 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
1149 1151
1150 /* configure operational mode */ 1152 /* configure operational mode */
1151 ath5k_hw_set_opmode(ah); 1153 ath5k_hw_set_opmode(ah, sc->opmode);
1152 1154
1155 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
1153 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1156 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1154} 1157}
1155 1158
@@ -1272,7 +1275,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1272 1275
1273static int 1276static int
1274ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1277ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1275 struct ath5k_txq *txq) 1278 struct ath5k_txq *txq, int padsize)
1276{ 1279{
1277 struct ath5k_hw *ah = sc->ah; 1280 struct ath5k_hw *ah = sc->ah;
1278 struct ath5k_desc *ds = bf->desc; 1281 struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1327,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1324 sc->vif, pktlen, info)); 1327 sc->vif, pktlen, info));
1325 } 1328 }
1326 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1329 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1327 ieee80211_get_hdrlen_from_skb(skb), 1330 ieee80211_get_hdrlen_from_skb(skb), padsize,
1328 get_hw_packet_type(skb), 1331 get_hw_packet_type(skb),
1329 (sc->power_level * 2), 1332 (sc->power_level * 2),
1330 hw_rate, 1333 hw_rate,
@@ -1636,7 +1639,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1636 sc->txqs[i].link); 1639 sc->txqs[i].link);
1637 } 1640 }
1638 } 1641 }
1639 ieee80211_wake_queues(sc->hw); /* XXX move to callers */
1640 1642
1641 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1643 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1642 if (sc->txqs[i].setup) 1644 if (sc->txqs[i].setup)
@@ -1807,6 +1809,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1807} 1809}
1808 1810
1809static void 1811static void
1812ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1813{
1814 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1815 struct ath5k_hw *ah = sc->ah;
1816 struct ath_common *common = ath5k_hw_common(ah);
1817
1818 /* only beacons from our BSSID */
1819 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1820 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1821 return;
1822
1823 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
1824 rssi);
1825
1826 /* in IBSS mode we should keep RSSI statistics per neighbour */
1827 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1828}
1829
1830/*
1831 * Compute padding position. skb must contains an IEEE 802.11 frame
1832 */
1833static int ath5k_common_padpos(struct sk_buff *skb)
1834{
1835 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1836 __le16 frame_control = hdr->frame_control;
1837 int padpos = 24;
1838
1839 if (ieee80211_has_a4(frame_control)) {
1840 padpos += ETH_ALEN;
1841 }
1842 if (ieee80211_is_data_qos(frame_control)) {
1843 padpos += IEEE80211_QOS_CTL_LEN;
1844 }
1845
1846 return padpos;
1847}
1848
1849/*
1850 * This function expects a 802.11 frame and returns the number of
1851 * bytes added, or -1 if we don't have enought header room.
1852 */
1853
1854static int ath5k_add_padding(struct sk_buff *skb)
1855{
1856 int padpos = ath5k_common_padpos(skb);
1857 int padsize = padpos & 3;
1858
1859 if (padsize && skb->len>padpos) {
1860
1861 if (skb_headroom(skb) < padsize)
1862 return -1;
1863
1864 skb_push(skb, padsize);
1865 memmove(skb->data, skb->data+padsize, padpos);
1866 return padsize;
1867 }
1868
1869 return 0;
1870}
1871
1872/*
1873 * This function expects a 802.11 frame and returns the number of
1874 * bytes removed
1875 */
1876
1877static int ath5k_remove_padding(struct sk_buff *skb)
1878{
1879 int padpos = ath5k_common_padpos(skb);
1880 int padsize = padpos & 3;
1881
1882 if (padsize && skb->len>=padpos+padsize) {
1883 memmove(skb->data + padsize, skb->data, padpos);
1884 skb_pull(skb, padsize);
1885 return padsize;
1886 }
1887
1888 return 0;
1889}
1890
1891static void
1810ath5k_tasklet_rx(unsigned long data) 1892ath5k_tasklet_rx(unsigned long data)
1811{ 1893{
1812 struct ieee80211_rx_status *rxs; 1894 struct ieee80211_rx_status *rxs;
@@ -1819,8 +1901,6 @@ ath5k_tasklet_rx(unsigned long data)
1819 struct ath5k_buf *bf; 1901 struct ath5k_buf *bf;
1820 struct ath5k_desc *ds; 1902 struct ath5k_desc *ds;
1821 int ret; 1903 int ret;
1822 int hdrlen;
1823 int padsize;
1824 int rx_flag; 1904 int rx_flag;
1825 1905
1826 spin_lock(&sc->rxbuflock); 1906 spin_lock(&sc->rxbuflock);
@@ -1845,18 +1925,24 @@ ath5k_tasklet_rx(unsigned long data)
1845 break; 1925 break;
1846 else if (unlikely(ret)) { 1926 else if (unlikely(ret)) {
1847 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 1927 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1928 sc->stats.rxerr_proc++;
1848 spin_unlock(&sc->rxbuflock); 1929 spin_unlock(&sc->rxbuflock);
1849 return; 1930 return;
1850 } 1931 }
1851 1932
1852 if (unlikely(rs.rs_more)) { 1933 sc->stats.rx_all_count++;
1853 ATH5K_WARN(sc, "unsupported jumbo\n");
1854 goto next;
1855 }
1856 1934
1857 if (unlikely(rs.rs_status)) { 1935 if (unlikely(rs.rs_status)) {
1858 if (rs.rs_status & AR5K_RXERR_PHY) 1936 if (rs.rs_status & AR5K_RXERR_CRC)
1937 sc->stats.rxerr_crc++;
1938 if (rs.rs_status & AR5K_RXERR_FIFO)
1939 sc->stats.rxerr_fifo++;
1940 if (rs.rs_status & AR5K_RXERR_PHY) {
1941 sc->stats.rxerr_phy++;
1942 if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
1943 sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
1859 goto next; 1944 goto next;
1945 }
1860 if (rs.rs_status & AR5K_RXERR_DECRYPT) { 1946 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1861 /* 1947 /*
1862 * Decrypt error. If the error occurred 1948 * Decrypt error. If the error occurred
@@ -1868,12 +1954,14 @@ ath5k_tasklet_rx(unsigned long data)
1868 * 1954 *
1869 * XXX do key cache faulting 1955 * XXX do key cache faulting
1870 */ 1956 */
1957 sc->stats.rxerr_decrypt++;
1871 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && 1958 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1872 !(rs.rs_status & AR5K_RXERR_CRC)) 1959 !(rs.rs_status & AR5K_RXERR_CRC))
1873 goto accept; 1960 goto accept;
1874 } 1961 }
1875 if (rs.rs_status & AR5K_RXERR_MIC) { 1962 if (rs.rs_status & AR5K_RXERR_MIC) {
1876 rx_flag |= RX_FLAG_MMIC_ERROR; 1963 rx_flag |= RX_FLAG_MMIC_ERROR;
1964 sc->stats.rxerr_mic++;
1877 goto accept; 1965 goto accept;
1878 } 1966 }
1879 1967
@@ -1883,6 +1971,12 @@ ath5k_tasklet_rx(unsigned long data)
1883 sc->opmode != NL80211_IFTYPE_MONITOR) 1971 sc->opmode != NL80211_IFTYPE_MONITOR)
1884 goto next; 1972 goto next;
1885 } 1973 }
1974
1975 if (unlikely(rs.rs_more)) {
1976 sc->stats.rxerr_jumbo++;
1977 goto next;
1978
1979 }
1886accept: 1980accept:
1887 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr); 1981 next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
1888 1982
@@ -1905,12 +1999,8 @@ accept:
1905 * bytes and we can optimize this a bit. In addition, we must 1999 * bytes and we can optimize this a bit. In addition, we must
1906 * not try to remove padding from short control frames that do 2000 * not try to remove padding from short control frames that do
1907 * not have payload. */ 2001 * not have payload. */
1908 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2002 ath5k_remove_padding(skb);
1909 padsize = ath5k_pad_size(hdrlen); 2003
1910 if (padsize) {
1911 memmove(skb->data + padsize, skb->data, hdrlen);
1912 skb_pull(skb, padsize);
1913 }
1914 rxs = IEEE80211_SKB_RXCB(skb); 2004 rxs = IEEE80211_SKB_RXCB(skb);
1915 2005
1916 /* 2006 /*
@@ -1939,10 +2029,15 @@ accept:
1939 rxs->freq = sc->curchan->center_freq; 2029 rxs->freq = sc->curchan->center_freq;
1940 rxs->band = sc->curband->band; 2030 rxs->band = sc->curband->band;
1941 2031
1942 rxs->noise = sc->ah->ah_noise_floor; 2032 rxs->signal = sc->ah->ah_noise_floor + rs.rs_rssi;
1943 rxs->signal = rxs->noise + rs.rs_rssi;
1944 2033
1945 rxs->antenna = rs.rs_antenna; 2034 rxs->antenna = rs.rs_antenna;
2035
2036 if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
2037 sc->stats.antenna_rx[rs.rs_antenna]++;
2038 else
2039 sc->stats.antenna_rx[0]++; /* invalid */
2040
1946 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 2041 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1947 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 2042 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
1948 2043
@@ -1952,6 +2047,8 @@ accept:
1952 2047
1953 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 2048 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1954 2049
2050 ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
2051
1955 /* check beacons in IBSS mode */ 2052 /* check beacons in IBSS mode */
1956 if (sc->opmode == NL80211_IFTYPE_ADHOC) 2053 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1957 ath5k_check_ibss_tsf(sc, skb, rxs); 2054 ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2085,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1988 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 2085 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1989 ds = bf->desc; 2086 ds = bf->desc;
1990 2087
2088 /*
2089 * It's possible that the hardware can say the buffer is
2090 * completed when it hasn't yet loaded the ds_link from
2091 * host memory and moved on. If there are more TX
2092 * descriptors in the queue, wait for TXDP to change
2093 * before processing this one.
2094 */
2095 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
2096 !list_is_last(&bf->list, &txq->q))
2097 break;
2098
1991 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 2099 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1992 if (unlikely(ret == -EINPROGRESS)) 2100 if (unlikely(ret == -EINPROGRESS))
1993 break; 2101 break;
@@ -1997,6 +2105,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1997 break; 2105 break;
1998 } 2106 }
1999 2107
2108 sc->stats.tx_all_count++;
2000 skb = bf->skb; 2109 skb = bf->skb;
2001 info = IEEE80211_SKB_CB(skb); 2110 info = IEEE80211_SKB_CB(skb);
2002 bf->skb = NULL; 2111 bf->skb = NULL;
@@ -2022,14 +2131,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2022 info->status.rates[ts.ts_final_idx].count++; 2131 info->status.rates[ts.ts_final_idx].count++;
2023 2132
2024 if (unlikely(ts.ts_status)) { 2133 if (unlikely(ts.ts_status)) {
2025 sc->ll_stats.dot11ACKFailureCount++; 2134 sc->stats.ack_fail++;
2026 if (ts.ts_status & AR5K_TXERR_FILT) 2135 if (ts.ts_status & AR5K_TXERR_FILT) {
2027 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2136 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2137 sc->stats.txerr_filt++;
2138 }
2139 if (ts.ts_status & AR5K_TXERR_XRETRY)
2140 sc->stats.txerr_retry++;
2141 if (ts.ts_status & AR5K_TXERR_FIFO)
2142 sc->stats.txerr_fifo++;
2028 } else { 2143 } else {
2029 info->flags |= IEEE80211_TX_STAT_ACK; 2144 info->flags |= IEEE80211_TX_STAT_ACK;
2030 info->status.ack_signal = ts.ts_rssi; 2145 info->status.ack_signal = ts.ts_rssi;
2031 } 2146 }
2032 2147
2148 /*
2149 * Remove MAC header padding before giving the frame
2150 * back to mac80211.
2151 */
2152 ath5k_remove_padding(skb);
2153
2154 if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
2155 sc->stats.antenna_tx[ts.ts_antenna]++;
2156 else
2157 sc->stats.antenna_tx[0]++; /* invalid */
2158
2033 ieee80211_tx_status(sc->hw, skb); 2159 ieee80211_tx_status(sc->hw, skb);
2034 2160
2035 spin_lock(&sc->txbuflock); 2161 spin_lock(&sc->txbuflock);
@@ -2073,6 +2199,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2073 int ret = 0; 2199 int ret = 0;
2074 u8 antenna; 2200 u8 antenna;
2075 u32 flags; 2201 u32 flags;
2202 const int padsize = 0;
2076 2203
2077 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 2204 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
2078 PCI_DMA_TODEVICE); 2205 PCI_DMA_TODEVICE);
@@ -2120,7 +2247,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2120 * from tx power (value is in dB units already) */ 2247 * from tx power (value is in dB units already) */
2121 ds->ds_data = bf->skbaddr; 2248 ds->ds_data = bf->skbaddr;
2122 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2249 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2123 ieee80211_get_hdrlen_from_skb(skb), 2250 ieee80211_get_hdrlen_from_skb(skb), padsize,
2124 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), 2251 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
2125 ieee80211_get_tx_rate(sc->hw, info)->hw_value, 2252 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
2126 1, AR5K_TXKEYIX_INVALID, 2253 1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2534,6 @@ ath5k_init(struct ath5k_softc *sc)
2407 */ 2534 */
2408 ath5k_stop_locked(sc); 2535 ath5k_stop_locked(sc);
2409 2536
2410 /* Set PHY calibration interval */
2411 ah->ah_cal_intval = ath5k_calinterval;
2412
2413 /* 2537 /*
2414 * The basic interface to setting the hardware in a good 2538 * The basic interface to setting the hardware in a good
2415 * state is ``reset''. On return the hardware is known to 2539 * state is ``reset''. On return the hardware is known to
@@ -2421,7 +2545,8 @@ ath5k_init(struct ath5k_softc *sc)
2421 sc->curband = &sc->sbands[sc->curchan->band]; 2545 sc->curband = &sc->sbands[sc->curchan->band];
2422 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2546 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2423 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2547 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2424 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI; 2548 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2549
2425 ret = ath5k_reset(sc, NULL); 2550 ret = ath5k_reset(sc, NULL);
2426 if (ret) 2551 if (ret)
2427 goto done; 2552 goto done;
@@ -2435,8 +2560,7 @@ ath5k_init(struct ath5k_softc *sc)
2435 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) 2560 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2436 ath5k_hw_reset_key(ah, i); 2561 ath5k_hw_reset_key(ah, i);
2437 2562
2438 /* Set ack to be sent at low bit-rates */ 2563 ath5k_hw_set_ack_bitrate_high(ah, true);
2439 ath5k_hw_set_ack_bitrate_high(ah, false);
2440 ret = 0; 2564 ret = 0;
2441done: 2565done:
2442 mmiowb(); 2566 mmiowb();
@@ -2533,12 +2657,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2533 tasklet_kill(&sc->restq); 2657 tasklet_kill(&sc->restq);
2534 tasklet_kill(&sc->calib); 2658 tasklet_kill(&sc->calib);
2535 tasklet_kill(&sc->beacontq); 2659 tasklet_kill(&sc->beacontq);
2660 tasklet_kill(&sc->ani_tasklet);
2536 2661
2537 ath5k_rfkill_hw_stop(sc->ah); 2662 ath5k_rfkill_hw_stop(sc->ah);
2538 2663
2539 return ret; 2664 return ret;
2540} 2665}
2541 2666
2667static void
2668ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2669{
2670 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2671 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2672 /* run ANI only when full calibration is not active */
2673 ah->ah_cal_next_ani = jiffies +
2674 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2675 tasklet_schedule(&ah->ah_sc->ani_tasklet);
2676
2677 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2678 ah->ah_cal_next_full = jiffies +
2679 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2680 tasklet_schedule(&ah->ah_sc->calib);
2681 }
2682 /* we could use SWI to generate enough interrupts to meet our
2683 * calibration interval requirements, if necessary:
2684 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2685}
2686
2542static irqreturn_t 2687static irqreturn_t
2543ath5k_intr(int irq, void *dev_id) 2688ath5k_intr(int irq, void *dev_id)
2544{ 2689{
@@ -2562,7 +2707,20 @@ ath5k_intr(int irq, void *dev_id)
2562 */ 2707 */
2563 tasklet_schedule(&sc->restq); 2708 tasklet_schedule(&sc->restq);
2564 } else if (unlikely(status & AR5K_INT_RXORN)) { 2709 } else if (unlikely(status & AR5K_INT_RXORN)) {
2565 tasklet_schedule(&sc->restq); 2710 /*
2711 * Receive buffers are full. Either the bus is busy or
2712 * the CPU is not fast enough to process all received
2713 * frames.
2714 * Older chipsets need a reset to come out of this
2715 * condition, but we treat it as RX for newer chips.
2716 * We don't know exactly which versions need a reset -
2717 * this guess is copied from the HAL.
2718 */
2719 sc->stats.rxorn_intr++;
2720 if (ah->ah_mac_srev < AR5K_SREV_AR5212)
2721 tasklet_schedule(&sc->restq);
2722 else
2723 tasklet_schedule(&sc->rxtq);
2566 } else { 2724 } else {
2567 if (status & AR5K_INT_SWBA) { 2725 if (status & AR5K_INT_SWBA) {
2568 tasklet_hi_schedule(&sc->beacontq); 2726 tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2745,10 @@ ath5k_intr(int irq, void *dev_id)
2587 if (status & AR5K_INT_BMISS) { 2745 if (status & AR5K_INT_BMISS) {
2588 /* TODO */ 2746 /* TODO */
2589 } 2747 }
2590 if (status & AR5K_INT_SWI) {
2591 tasklet_schedule(&sc->calib);
2592 }
2593 if (status & AR5K_INT_MIB) { 2748 if (status & AR5K_INT_MIB) {
2594 /* 2749 sc->stats.mib_intr++;
2595 * These stats are also used for ANI i think 2750 ath5k_hw_update_mib_counters(ah);
2596 * so how about updating them more often ? 2751 ath5k_ani_mib_intr(ah);
2597 */
2598 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
2599 } 2752 }
2600 if (status & AR5K_INT_GPIO) 2753 if (status & AR5K_INT_GPIO)
2601 tasklet_schedule(&sc->rf_kill.toggleq); 2754 tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2759,7 @@ ath5k_intr(int irq, void *dev_id)
2606 if (unlikely(!counter)) 2759 if (unlikely(!counter))
2607 ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); 2760 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2608 2761
2609 ath5k_hw_calibration_poll(ah); 2762 ath5k_intr_calibration_poll(ah);
2610 2763
2611 return IRQ_HANDLED; 2764 return IRQ_HANDLED;
2612} 2765}
@@ -2630,8 +2783,7 @@ ath5k_tasklet_calibrate(unsigned long data)
2630 struct ath5k_hw *ah = sc->ah; 2783 struct ath5k_hw *ah = sc->ah;
2631 2784
2632 /* Only full calibration for now */ 2785 /* Only full calibration for now */
2633 if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION) 2786 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2634 return;
2635 2787
2636 /* Stop queues so that calibration 2788 /* Stop queues so that calibration
2637 * doesn't interfere with tx */ 2789 * doesn't interfere with tx */
@@ -2647,18 +2799,29 @@ ath5k_tasklet_calibrate(unsigned long data)
2647 * to load new gain values. 2799 * to load new gain values.
2648 */ 2800 */
2649 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2801 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2650 ath5k_reset_wake(sc); 2802 ath5k_reset(sc, sc->curchan);
2651 } 2803 }
2652 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2804 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2653 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2805 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2654 ieee80211_frequency_to_channel( 2806 ieee80211_frequency_to_channel(
2655 sc->curchan->center_freq)); 2807 sc->curchan->center_freq));
2656 2808
2657 ah->ah_swi_mask = 0;
2658
2659 /* Wake queues */ 2809 /* Wake queues */
2660 ieee80211_wake_queues(sc->hw); 2810 ieee80211_wake_queues(sc->hw);
2661 2811
2812 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2813}
2814
2815
2816static void
2817ath5k_tasklet_ani(unsigned long data)
2818{
2819 struct ath5k_softc *sc = (void *)data;
2820 struct ath5k_hw *ah = sc->ah;
2821
2822 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2823 ath5k_ani_calibration(ah);
2824 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2662} 2825}
2663 2826
2664 2827
@@ -2680,7 +2843,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2680 struct ath5k_softc *sc = hw->priv; 2843 struct ath5k_softc *sc = hw->priv;
2681 struct ath5k_buf *bf; 2844 struct ath5k_buf *bf;
2682 unsigned long flags; 2845 unsigned long flags;
2683 int hdrlen;
2684 int padsize; 2846 int padsize;
2685 2847
2686 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2848 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2854,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2692 * the hardware expects the header padded to 4 byte boundaries 2854 * the hardware expects the header padded to 4 byte boundaries
2693 * if this is not the case we add the padding after the header 2855 * if this is not the case we add the padding after the header
2694 */ 2856 */
2695 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2857 padsize = ath5k_add_padding(skb);
2696 padsize = ath5k_pad_size(hdrlen); 2858 if (padsize < 0) {
2697 if (padsize) { 2859 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2698 2860 " headroom to pad");
2699 if (skb_headroom(skb) < padsize) { 2861 goto drop_packet;
2700 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2701 " headroom to pad %d\n", hdrlen, padsize);
2702 goto drop_packet;
2703 }
2704 skb_push(skb, padsize);
2705 memmove(skb->data, skb->data+padsize, hdrlen);
2706 } 2862 }
2707 2863
2708 spin_lock_irqsave(&sc->txbuflock, flags); 2864 spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2877,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2721 2877
2722 bf->skb = skb; 2878 bf->skb = skb;
2723 2879
2724 if (ath5k_txbuf_setup(sc, bf, txq)) { 2880 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
2725 bf->skb = NULL; 2881 bf->skb = NULL;
2726 spin_lock_irqsave(&sc->txbuflock, flags); 2882 spin_lock_irqsave(&sc->txbuflock, flags);
2727 list_add_tail(&bf->list, &sc->txbuf); 2883 list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2924,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2768 goto err; 2924 goto err;
2769 } 2925 }
2770 2926
2927 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
2928
2771 /* 2929 /*
2772 * Change channels and update the h/w rate map if we're switching; 2930 * Change channels and update the h/w rate map if we're switching;
2773 * e.g. 11a to 11b/g. 2931 * e.g. 11a to 11b/g.
@@ -2836,6 +2994,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2836 goto end; 2994 goto end;
2837 } 2995 }
2838 2996
2997 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
2998
2839 ath5k_hw_set_lladdr(sc->ah, vif->addr); 2999 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2840 ath5k_mode_setup(sc); 3000 ath5k_mode_setup(sc);
2841 3001
@@ -2906,7 +3066,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
2906 * then we must allow the user to set how many tx antennas we 3066 * then we must allow the user to set how many tx antennas we
2907 * have available 3067 * have available
2908 */ 3068 */
2909 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 3069 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
2910 3070
2911unlock: 3071unlock:
2912 mutex_unlock(&sc->lock); 3072 mutex_unlock(&sc->lock);
@@ -2914,22 +3074,20 @@ unlock:
2914} 3074}
2915 3075
2916static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 3076static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2917 int mc_count, struct dev_addr_list *mclist) 3077 struct netdev_hw_addr_list *mc_list)
2918{ 3078{
2919 u32 mfilt[2], val; 3079 u32 mfilt[2], val;
2920 int i;
2921 u8 pos; 3080 u8 pos;
3081 struct netdev_hw_addr *ha;
2922 3082
2923 mfilt[0] = 0; 3083 mfilt[0] = 0;
2924 mfilt[1] = 1; 3084 mfilt[1] = 1;
2925 3085
2926 for (i = 0; i < mc_count; i++) { 3086 netdev_hw_addr_list_for_each(ha, mc_list) {
2927 if (!mclist)
2928 break;
2929 /* calculate XOR of eight 6-bit values */ 3087 /* calculate XOR of eight 6-bit values */
2930 val = get_unaligned_le32(mclist->dmi_addr + 0); 3088 val = get_unaligned_le32(ha->addr + 0);
2931 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3089 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2932 val = get_unaligned_le32(mclist->dmi_addr + 3); 3090 val = get_unaligned_le32(ha->addr + 3);
2933 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3091 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2934 pos &= 0x3f; 3092 pos &= 0x3f;
2935 mfilt[pos / 32] |= (1 << (pos % 32)); 3093 mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3095,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2937 * but not sure, needs testing, if we do use this we'd 3095 * but not sure, needs testing, if we do use this we'd
2938 * neet to inform below to not reset the mcast */ 3096 * neet to inform below to not reset the mcast */
2939 /* ath5k_hw_set_mcast_filterindex(ah, 3097 /* ath5k_hw_set_mcast_filterindex(ah,
2940 * mclist->dmi_addr[5]); */ 3098 * ha->addr[5]); */
2941 mclist = mclist->next;
2942 } 3099 }
2943 3100
2944 return ((u64)(mfilt[1]) << 32) | mfilt[0]; 3101 return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3281,30 @@ ath5k_get_stats(struct ieee80211_hw *hw,
3124 struct ieee80211_low_level_stats *stats) 3281 struct ieee80211_low_level_stats *stats)
3125{ 3282{
3126 struct ath5k_softc *sc = hw->priv; 3283 struct ath5k_softc *sc = hw->priv;
3127 struct ath5k_hw *ah = sc->ah;
3128 3284
3129 /* Force update */ 3285 /* Force update */
3130 ath5k_hw_update_mib_counters(ah, &sc->ll_stats); 3286 ath5k_hw_update_mib_counters(sc->ah);
3287
3288 stats->dot11ACKFailureCount = sc->stats.ack_fail;
3289 stats->dot11RTSFailureCount = sc->stats.rts_fail;
3290 stats->dot11RTSSuccessCount = sc->stats.rts_ok;
3291 stats->dot11FCSErrorCount = sc->stats.fcs_error;
3292
3293 return 0;
3294}
3295
3296static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3297 struct survey_info *survey)
3298{
3299 struct ath5k_softc *sc = hw->priv;
3300 struct ieee80211_conf *conf = &hw->conf;
3301
3302 if (idx != 0)
3303 return -ENOENT;
3131 3304
3132 memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); 3305 survey->channel = conf->channel;
3306 survey->filled = SURVEY_INFO_NOISE_DBM;
3307 survey->noise = sc->ah->ah_noise_floor;
3133 3308
3134 return 0; 3309 return 0;
3135} 3310}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a5abdb..56221bc7c8cd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
50 50
51#include "ath5k.h" 51#include "ath5k.h"
52#include "debug.h" 52#include "debug.h"
53#include "ani.h"
53 54
54#include "../regd.h" 55#include "../regd.h"
55#include "../ath.h" 56#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
105 struct tasklet_struct toggleq; 106 struct tasklet_struct toggleq;
106}; 107};
107 108
109/* statistics */
110struct ath5k_statistics {
111 /* antenna use */
112 unsigned int antenna_rx[5]; /* frames count per antenna RX */
113 unsigned int antenna_tx[5]; /* frames count per antenna TX */
114
115 /* frame errors */
116 unsigned int rx_all_count; /* all RX frames, including errors */
117 unsigned int tx_all_count; /* all TX frames, including errors */
118 unsigned int rxerr_crc;
119 unsigned int rxerr_phy;
120 unsigned int rxerr_phy_code[32];
121 unsigned int rxerr_fifo;
122 unsigned int rxerr_decrypt;
123 unsigned int rxerr_mic;
124 unsigned int rxerr_proc;
125 unsigned int rxerr_jumbo;
126 unsigned int txerr_retry;
127 unsigned int txerr_fifo;
128 unsigned int txerr_filt;
129
130 /* MIB counters */
131 unsigned int ack_fail;
132 unsigned int rts_fail;
133 unsigned int rts_ok;
134 unsigned int fcs_error;
135 unsigned int beacons;
136
137 unsigned int mib_intr;
138 unsigned int rxorn_intr;
139};
140
108#if CHAN_DEBUG 141#if CHAN_DEBUG
109#define ATH_CHAN_MAX (26+26+26+200+200) 142#define ATH_CHAN_MAX (26+26+26+200+200)
110#else 143#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
117 struct pci_dev *pdev; /* for dma mapping */ 150 struct pci_dev *pdev; /* for dma mapping */
118 void __iomem *iobase; /* address of the device */ 151 void __iomem *iobase; /* address of the device */
119 struct mutex lock; /* dev-level lock */ 152 struct mutex lock; /* dev-level lock */
120 struct ieee80211_low_level_stats ll_stats;
121 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 153 struct ieee80211_hw *hw; /* IEEE 802.11 common */
122 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 154 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
123 struct ieee80211_channel channels[ATH_CHAN_MAX]; 155 struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
191 int power_level; /* Requested tx power in dbm */ 223 int power_level; /* Requested tx power in dbm */
192 bool assoc; /* associate state */ 224 bool assoc; /* associate state */
193 bool enable_beacon; /* true if beacons are on */ 225 bool enable_beacon; /* true if beacons are on */
226
227 struct ath5k_statistics stats;
228
229 struct ath5k_ani_state ani_state;
230 struct tasklet_struct ani_tasklet; /* ANI calibration */
194}; 231};
195 232
196#define ath5k_hw_hasbssidmask(_ah) \ 233#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc7..74f007126f41 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
102 } 102 }
103 } 103 }
104 104
105 /* GPIO */
106 ah->ah_gpio_npins = AR5K_NUM_GPIO;
107
108 /* Set number of supported TX queues */ 105 /* Set number of supported TX queues */
109 if (ah->ah_version == AR5K_AR5210) 106 if (ah->ah_version == AR5K_AR5210)
110 ah->ah_capabilities.cap_queues.q_tx_num = 107 ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
112 else 109 else
113 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; 110 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
114 111
112 /* newer hardware has PHY error counters */
113 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
114 ah->ah_capabilities.cap_has_phyerr_counters = true;
115 else
116 ah->ah_capabilities.cap_has_phyerr_counters = false;
117
115 return 0; 118 return 0;
116} 119}
117 120
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c15d34..6fb5c5ffa5b1 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
69 69
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include "reg.h" 71#include "reg.h"
72#include "ani.h"
72 73
73static struct dentry *ath5k_global_debugfs; 74static struct dentry *ath5k_global_debugfs;
74 75
@@ -307,6 +308,7 @@ static const struct {
307 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 308 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
308 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 309 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
309 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" }, 310 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
311 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
310 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 312 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
311}; 313};
312 314
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
364}; 366};
365 367
366 368
369/* debugfs: antenna */
370
371static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
372 size_t count, loff_t *ppos)
373{
374 struct ath5k_softc *sc = file->private_data;
375 char buf[700];
376 unsigned int len = 0;
377 unsigned int i;
378 unsigned int v;
379
380 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
381 sc->ah->ah_ant_mode);
382 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
383 sc->ah->ah_def_ant);
384 len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
385 sc->ah->ah_tx_ant);
386
387 len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
388 for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
389 len += snprintf(buf+len, sizeof(buf)-len,
390 "[antenna %d]\t%d\t%d\n",
391 i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
392 }
393 len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
394 sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
395
396 v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
397 len += snprintf(buf+len, sizeof(buf)-len,
398 "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
399
400 v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
401 len += snprintf(buf+len, sizeof(buf)-len,
402 "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
403 (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
404 len += snprintf(buf+len, sizeof(buf)-len,
405 "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
406 (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
407 len += snprintf(buf+len, sizeof(buf)-len,
408 "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
409 (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
410 len += snprintf(buf+len, sizeof(buf)-len,
411 "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
412 (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
413
414 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
415 len += snprintf(buf+len, sizeof(buf)-len,
416 "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
417 (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
418
419 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
420 len += snprintf(buf+len, sizeof(buf)-len,
421 "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
422 (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
423
424 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
425 len += snprintf(buf+len, sizeof(buf)-len,
426 "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
427 (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
428
429 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
430}
431
432static ssize_t write_file_antenna(struct file *file,
433 const char __user *userbuf,
434 size_t count, loff_t *ppos)
435{
436 struct ath5k_softc *sc = file->private_data;
437 unsigned int i;
438 char buf[20];
439
440 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
441 return -EFAULT;
442
443 if (strncmp(buf, "diversity", 9) == 0) {
444 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
445 printk(KERN_INFO "ath5k debug: enable diversity\n");
446 } else if (strncmp(buf, "fixed-a", 7) == 0) {
447 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
448 printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
449 } else if (strncmp(buf, "fixed-b", 7) == 0) {
450 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
451 printk(KERN_INFO "ath5k debug: fixed antenna B\n");
452 } else if (strncmp(buf, "clear", 5) == 0) {
453 for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
454 sc->stats.antenna_rx[i] = 0;
455 sc->stats.antenna_tx[i] = 0;
456 }
457 printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
458 }
459 return count;
460}
461
462static const struct file_operations fops_antenna = {
463 .read = read_file_antenna,
464 .write = write_file_antenna,
465 .open = ath5k_debugfs_open,
466 .owner = THIS_MODULE,
467};
468
469
470/* debugfs: frameerrors */
471
472static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
473 size_t count, loff_t *ppos)
474{
475 struct ath5k_softc *sc = file->private_data;
476 struct ath5k_statistics *st = &sc->stats;
477 char buf[700];
478 unsigned int len = 0;
479 int i;
480
481 len += snprintf(buf+len, sizeof(buf)-len,
482 "RX\n---------------------\n");
483 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
484 st->rxerr_crc,
485 st->rx_all_count > 0 ?
486 st->rxerr_crc*100/st->rx_all_count : 0);
487 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
488 st->rxerr_phy,
489 st->rx_all_count > 0 ?
490 st->rxerr_phy*100/st->rx_all_count : 0);
491 for (i = 0; i < 32; i++) {
492 if (st->rxerr_phy_code[i])
493 len += snprintf(buf+len, sizeof(buf)-len,
494 " phy_err[%d]\t%d\n",
495 i, st->rxerr_phy_code[i]);
496 }
497
498 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
499 st->rxerr_fifo,
500 st->rx_all_count > 0 ?
501 st->rxerr_fifo*100/st->rx_all_count : 0);
502 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
503 st->rxerr_decrypt,
504 st->rx_all_count > 0 ?
505 st->rxerr_decrypt*100/st->rx_all_count : 0);
506 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
507 st->rxerr_mic,
508 st->rx_all_count > 0 ?
509 st->rxerr_mic*100/st->rx_all_count : 0);
510 len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
511 st->rxerr_proc,
512 st->rx_all_count > 0 ?
513 st->rxerr_proc*100/st->rx_all_count : 0);
514 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
515 st->rxerr_jumbo,
516 st->rx_all_count > 0 ?
517 st->rxerr_jumbo*100/st->rx_all_count : 0);
518 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
519 st->rx_all_count);
520
521 len += snprintf(buf+len, sizeof(buf)-len,
522 "\nTX\n---------------------\n");
523 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
524 st->txerr_retry,
525 st->tx_all_count > 0 ?
526 st->txerr_retry*100/st->tx_all_count : 0);
527 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
528 st->txerr_fifo,
529 st->tx_all_count > 0 ?
530 st->txerr_fifo*100/st->tx_all_count : 0);
531 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
532 st->txerr_filt,
533 st->tx_all_count > 0 ?
534 st->txerr_filt*100/st->tx_all_count : 0);
535 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
536 st->tx_all_count);
537
538 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
539}
540
541static ssize_t write_file_frameerrors(struct file *file,
542 const char __user *userbuf,
543 size_t count, loff_t *ppos)
544{
545 struct ath5k_softc *sc = file->private_data;
546 struct ath5k_statistics *st = &sc->stats;
547 char buf[20];
548
549 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
550 return -EFAULT;
551
552 if (strncmp(buf, "clear", 5) == 0) {
553 st->rxerr_crc = 0;
554 st->rxerr_phy = 0;
555 st->rxerr_fifo = 0;
556 st->rxerr_decrypt = 0;
557 st->rxerr_mic = 0;
558 st->rxerr_proc = 0;
559 st->rxerr_jumbo = 0;
560 st->rx_all_count = 0;
561 st->txerr_retry = 0;
562 st->txerr_fifo = 0;
563 st->txerr_filt = 0;
564 st->tx_all_count = 0;
565 printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
566 }
567 return count;
568}
569
570static const struct file_operations fops_frameerrors = {
571 .read = read_file_frameerrors,
572 .write = write_file_frameerrors,
573 .open = ath5k_debugfs_open,
574 .owner = THIS_MODULE,
575};
576
577
578/* debugfs: ani */
579
580static ssize_t read_file_ani(struct file *file, char __user *user_buf,
581 size_t count, loff_t *ppos)
582{
583 struct ath5k_softc *sc = file->private_data;
584 struct ath5k_statistics *st = &sc->stats;
585 struct ath5k_ani_state *as = &sc->ani_state;
586
587 char buf[700];
588 unsigned int len = 0;
589
590 len += snprintf(buf+len, sizeof(buf)-len,
591 "HW has PHY error counters:\t%s\n",
592 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
593 "yes" : "no");
594 len += snprintf(buf+len, sizeof(buf)-len,
595 "HW max spur immunity level:\t%d\n",
596 as->max_spur_level);
597 len += snprintf(buf+len, sizeof(buf)-len,
598 "\nANI state\n--------------------------------------------\n");
599 len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
600 switch (as->ani_mode) {
601 case ATH5K_ANI_MODE_OFF:
602 len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
603 break;
604 case ATH5K_ANI_MODE_MANUAL_LOW:
605 len += snprintf(buf+len, sizeof(buf)-len,
606 "MANUAL LOW\n");
607 break;
608 case ATH5K_ANI_MODE_MANUAL_HIGH:
609 len += snprintf(buf+len, sizeof(buf)-len,
610 "MANUAL HIGH\n");
611 break;
612 case ATH5K_ANI_MODE_AUTO:
613 len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
614 break;
615 default:
616 len += snprintf(buf+len, sizeof(buf)-len,
617 "??? (not good)\n");
618 break;
619 }
620 len += snprintf(buf+len, sizeof(buf)-len,
621 "noise immunity level:\t\t%d\n",
622 as->noise_imm_level);
623 len += snprintf(buf+len, sizeof(buf)-len,
624 "spur immunity level:\t\t%d\n",
625 as->spur_level);
626 len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
627 as->firstep_level);
628 len += snprintf(buf+len, sizeof(buf)-len,
629 "OFDM weak signal detection:\t%s\n",
630 as->ofdm_weak_sig ? "on" : "off");
631 len += snprintf(buf+len, sizeof(buf)-len,
632 "CCK weak signal detection:\t%s\n",
633 as->cck_weak_sig ? "on" : "off");
634
635 len += snprintf(buf+len, sizeof(buf)-len,
636 "\nMIB INTERRUPTS:\t\t%u\n",
637 st->mib_intr);
638 len += snprintf(buf+len, sizeof(buf)-len,
639 "beacon RSSI average:\t%d\n",
640 sc->ah->ah_beacon_rssi_avg.avg);
641 len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
642 as->pfc_tx,
643 as->pfc_cycles > 0 ?
644 as->pfc_tx*100/as->pfc_cycles : 0);
645 len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
646 as->pfc_rx,
647 as->pfc_cycles > 0 ?
648 as->pfc_rx*100/as->pfc_cycles : 0);
649 len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
650 as->pfc_busy,
651 as->pfc_cycles > 0 ?
652 as->pfc_busy*100/as->pfc_cycles : 0);
653 len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
654 as->pfc_cycles);
655 len += snprintf(buf+len, sizeof(buf)-len,
656 "listen time\t\t%d\tlast: %d\n",
657 as->listen_time, as->last_listen);
658 len += snprintf(buf+len, sizeof(buf)-len,
659 "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
660 as->ofdm_errors, as->last_ofdm_errors,
661 as->sum_ofdm_errors);
662 len += snprintf(buf+len, sizeof(buf)-len,
663 "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
664 as->cck_errors, as->last_cck_errors,
665 as->sum_cck_errors);
666 len += snprintf(buf+len, sizeof(buf)-len,
667 "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
668 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
669 ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
670 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
671 len += snprintf(buf+len, sizeof(buf)-len,
672 "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
673 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
674 ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
675 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
676
677 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
678}
679
680static ssize_t write_file_ani(struct file *file,
681 const char __user *userbuf,
682 size_t count, loff_t *ppos)
683{
684 struct ath5k_softc *sc = file->private_data;
685 char buf[20];
686
687 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
688 return -EFAULT;
689
690 if (strncmp(buf, "sens-low", 8) == 0) {
691 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
692 } else if (strncmp(buf, "sens-high", 9) == 0) {
693 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
694 } else if (strncmp(buf, "ani-off", 7) == 0) {
695 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
696 } else if (strncmp(buf, "ani-on", 6) == 0) {
697 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
698 } else if (strncmp(buf, "noise-low", 9) == 0) {
699 ath5k_ani_set_noise_immunity_level(sc->ah, 0);
700 } else if (strncmp(buf, "noise-high", 10) == 0) {
701 ath5k_ani_set_noise_immunity_level(sc->ah,
702 ATH5K_ANI_MAX_NOISE_IMM_LVL);
703 } else if (strncmp(buf, "spur-low", 8) == 0) {
704 ath5k_ani_set_spur_immunity_level(sc->ah, 0);
705 } else if (strncmp(buf, "spur-high", 9) == 0) {
706 ath5k_ani_set_spur_immunity_level(sc->ah,
707 sc->ani_state.max_spur_level);
708 } else if (strncmp(buf, "fir-low", 7) == 0) {
709 ath5k_ani_set_firstep_level(sc->ah, 0);
710 } else if (strncmp(buf, "fir-high", 8) == 0) {
711 ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
712 } else if (strncmp(buf, "ofdm-off", 8) == 0) {
713 ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
714 } else if (strncmp(buf, "ofdm-on", 7) == 0) {
715 ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
716 } else if (strncmp(buf, "cck-off", 7) == 0) {
717 ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
718 } else if (strncmp(buf, "cck-on", 6) == 0) {
719 ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
720 }
721 return count;
722}
723
724static const struct file_operations fops_ani = {
725 .read = read_file_ani,
726 .write = write_file_ani,
727 .open = ath5k_debugfs_open,
728 .owner = THIS_MODULE,
729};
730
731
367/* init */ 732/* init */
368 733
369void 734void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
393 758
394 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR, 759 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
395 sc->debug.debugfs_phydir, sc, &fops_reset); 760 sc->debug.debugfs_phydir, sc, &fops_reset);
761
762 sc->debug.debugfs_antenna = debugfs_create_file("antenna",
763 S_IWUSR | S_IRUSR,
764 sc->debug.debugfs_phydir, sc, &fops_antenna);
765
766 sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
767 S_IWUSR | S_IRUSR,
768 sc->debug.debugfs_phydir, sc,
769 &fops_frameerrors);
770
771 sc->debug.debugfs_ani = debugfs_create_file("ani",
772 S_IWUSR | S_IRUSR,
773 sc->debug.debugfs_phydir, sc,
774 &fops_ani);
396} 775}
397 776
398void 777void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
408 debugfs_remove(sc->debug.debugfs_registers); 787 debugfs_remove(sc->debug.debugfs_registers);
409 debugfs_remove(sc->debug.debugfs_beacon); 788 debugfs_remove(sc->debug.debugfs_beacon);
410 debugfs_remove(sc->debug.debugfs_reset); 789 debugfs_remove(sc->debug.debugfs_reset);
790 debugfs_remove(sc->debug.debugfs_antenna);
791 debugfs_remove(sc->debug.debugfs_frameerrors);
792 debugfs_remove(sc->debug.debugfs_ani);
411 debugfs_remove(sc->debug.debugfs_phydir); 793 debugfs_remove(sc->debug.debugfs_phydir);
412} 794}
413 795
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55e..ddd5b3a99e8d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
74 struct dentry *debugfs_registers; 74 struct dentry *debugfs_registers;
75 struct dentry *debugfs_beacon; 75 struct dentry *debugfs_beacon;
76 struct dentry *debugfs_reset; 76 struct dentry *debugfs_reset;
77 struct dentry *debugfs_antenna;
78 struct dentry *debugfs_frameerrors;
79 struct dentry *debugfs_ani;
77}; 80};
78 81
79/** 82/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
113 ATH5K_DEBUG_DUMP_TX = 0x00000200, 116 ATH5K_DEBUG_DUMP_TX = 0x00000200,
114 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 117 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
115 ATH5K_DEBUG_TRACE = 0x00001000, 118 ATH5K_DEBUG_TRACE = 0x00001000,
119 ATH5K_DEBUG_ANI = 0x00002000,
116 ATH5K_DEBUG_ANY = 0xffffffff 120 ATH5K_DEBUG_ANY = 0xffffffff
117}; 121};
118 122
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6b..7d7b646ab65a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
35 */ 35 */
36static int 36static int
37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
38 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type, 38 unsigned int pkt_len, unsigned int hdr_len, int padsize,
39 enum ath5k_pkt_type type,
39 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, 40 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
40 unsigned int key_index, unsigned int antenna_mode, unsigned int flags, 41 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
41 unsigned int rtscts_rate, unsigned int rtscts_duration) 42 unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
71 /* Verify and set frame length */ 72 /* Verify and set frame length */
72 73
73 /* remove padding we might have added before */ 74 /* remove padding we might have added before */
74 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; 75 frame_len = pkt_len - padsize + FCS_LEN;
75 76
76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) 77 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
77 return -EINVAL; 78 return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
100 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); 101 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
101 } 102 }
102 103
103 /*Diferences between 5210-5211*/ 104 /*Differences between 5210-5211*/
104 if (ah->ah_version == AR5K_AR5210) { 105 if (ah->ah_version == AR5K_AR5210) {
105 switch (type) { 106 switch (type) {
106 case AR5K_PKT_TYPE_BEACON: 107 case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
165 */ 166 */
166static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, 167static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
167 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, 168 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
169 int padsize,
168 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, 170 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
169 unsigned int tx_tries0, unsigned int key_index, 171 unsigned int tx_tries0, unsigned int key_index,
170 unsigned int antenna_mode, unsigned int flags, 172 unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
206 /* Verify and set frame length */ 208 /* Verify and set frame length */
207 209
208 /* remove padding we might have added before */ 210 /* remove padding we might have added before */
209 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; 211 frame_len = pkt_len - padsize + FCS_LEN;
210 212
211 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 213 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
212 return -EINVAL; 214 return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
229 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); 231 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
230 tx_ctl->tx_control_1 |= AR5K_REG_SM(type, 232 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
231 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); 233 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
232 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES, 234 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
233 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); 235 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
234 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; 236 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
235 237
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
643 rs->rs_status |= AR5K_RXERR_PHY; 645 rs->rs_status |= AR5K_RXERR_PHY;
644 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, 646 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
645 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); 647 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
648 ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
646 } 649 }
647 650
648 if (rx_status->rx_status_1 & 651 if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
668 ah->ah_version != AR5K_AR5212) 671 ah->ah_version != AR5K_AR5212)
669 return -ENOTSUPP; 672 return -ENOTSUPP;
670 673
671 /* XXX: What is this magic value and where is it used ? */
672 if (ah->ah_version == AR5K_AR5212)
673 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
674 else if (ah->ah_version == AR5K_AR5211)
675 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
676
677 if (ah->ah_version == AR5K_AR5212) { 674 if (ah->ah_version == AR5K_AR5212) {
678 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; 675 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
679 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; 676 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3e..64538fbe4167 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
114 114
115/* PHY Error codes */ 115/**
116#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00 116 * enum ath5k_phy_error_code - PHY Error codes
117#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20 117 */
118#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40 118enum ath5k_phy_error_code {
119#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60 119 AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
120#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80 120 AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
121#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0 121 AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
122#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0 122 AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
123#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0 123 AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
124 AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
125 AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
126 AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
127 /* these are specific to the 5212 */
128 AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
129 AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
130 AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
131 AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
132 AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
133 AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
134 AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
135 AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
136 AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
137 AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
138 AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
139 AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
140};
124 141
125/* 142/*
126 * 5210/5211 hardware 2-word TX control descriptor 143 * 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cdc7afe..ed0263672d6d 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
331 ee->ee_x_gain[mode] = (val >> 1) & 0xf; 331 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
332 ee->ee_xpd[mode] = val & 0x1; 332 ee->ee_xpd[mode] = val & 0x1;
333 333
334 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) 334 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
335 mode != AR5K_EEPROM_MODE_11B)
335 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1; 336 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
336 337
337 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) { 338 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
341 if (mode == AR5K_EEPROM_MODE_11A) 342 if (mode == AR5K_EEPROM_MODE_11A)
342 ee->ee_xr_power[mode] = val & 0x3f; 343 ee->ee_xr_power[mode] = val & 0x3f;
343 else { 344 else {
345 /* b_DB_11[bg] and b_OB_11[bg] */
344 ee->ee_ob[mode][0] = val & 0x7; 346 ee->ee_ob[mode][0] = val & 0x7;
345 ee->ee_db[mode][0] = (val >> 3) & 0x7; 347 ee->ee_db[mode][0] = (val >> 3) & 0x7;
346 } 348 }
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483bb9c3..c4a6d5f26af4 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
24 * SERDES infos are present */ 24 * SERDES infos are present */
25#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */ 25#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
26#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */ 26#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
27#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
28#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
29#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
30 27
31#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */ 28#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
32 29
@@ -78,9 +75,9 @@
78#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1) 75#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
79#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1) 76#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
80#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1) 77#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
81#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */ 78#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
82#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */ 79#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
83#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) 80#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
84#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */ 81#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
85#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */ 82#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
86 83
@@ -101,7 +98,7 @@
101 98
102#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5) 99#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
103#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff) 100#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
104#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) 101#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
105#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1) 102#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
106 103
107#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6) 104#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
114 111
115#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8) 112#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
116#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff) 113#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
117#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) 114#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
118#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) 115#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
119 116
120#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9) 117#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
121#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) 118#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
122#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) 119#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
123#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) 120#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
124#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) 121#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
125#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) 122#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
126#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) 123#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
127#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) 124#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
128 125
129#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10) 126#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
130#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8) 127#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
131#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8) 128#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
132#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) 129#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
133#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) 130#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
134#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) 131#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
135#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1) 132#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
136#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1) 133#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
134#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
137 135
138/* calibration settings */ 136/* calibration settings */
139#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4) 137#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
389 bool flag; 387 bool flag;
390}; 388};
391 389
392/* EEPROM calibration data */ 390/**
391 * struct ath5k_eeprom_info - EEPROM calibration data
392 *
393 * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
394 * flags
395 * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
396 * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
397 * OFDM and CCK packets
398 * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
399 * (11Mbps) rate in G mode. 0.1dB steps
400 * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
401 *
402 * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
403 * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
404 * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
405 * @ee_switch_settling: RX/TX Switch settling time
406 * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
407 * @ee_ant_control: Antenna Control Settings
408 * @ee_ob: Bias current for Output stage of PA
409 * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
410 * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
411 * @ee_db: Bias current for Output stage of PA. see @ee_ob
412 * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
413 * to when the external LNA is activated
414 * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
415 * to when the external PA switch is deactivated
416 * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
417 * external PA switch is activated
418 * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
419 * (IEEE802.11a section 17.3.10.5 )
420 * @ee_xlna_gain: Total gain of the LNA (information only)
421 * @ee_xpd: Use external (1) or internal power detector
422 * @ee_x_gain: Gain for external power detector output (differences in EEMAP
423 * versions!)
424 * @ee_i_gain: Initial gain value after reset
425 * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
426 *
427 * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
428 * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
429 * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
430 * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
431 * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
432 */
393struct ath5k_eeprom_info { 433struct ath5k_eeprom_info {
394 434
395 /* Header information */ 435 /* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f9c04b..5212e275f1c7 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
39 * ath5k_hw_set_opmode - Set PCU operating mode 39 * ath5k_hw_set_opmode - Set PCU operating mode
40 * 40 *
41 * @ah: The &struct ath5k_hw 41 * @ah: The &struct ath5k_hw
42 * @op_mode: &enum nl80211_iftype operating mode
42 * 43 *
43 * Initialize PCU for the various operating modes (AP/STA etc) 44 * Initialize PCU for the various operating modes (AP/STA etc)
44 *
45 * NOTE: ah->ah_op_mode must be set before calling this.
46 */ 45 */
47int ath5k_hw_set_opmode(struct ath5k_hw *ah) 46int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
48{ 47{
49 struct ath_common *common = ath5k_hw_common(ah); 48 struct ath_common *common = ath5k_hw_common(ah);
50 u32 pcu_reg, beacon_reg, low_id, high_id; 49 u32 pcu_reg, beacon_reg, low_id, high_id;
51 50
51 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
52 52
53 /* Preserve rest settings */ 53 /* Preserve rest settings */
54 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; 54 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
61 61
62 ATH5K_TRACE(ah->ah_sc); 62 ATH5K_TRACE(ah->ah_sc);
63 63
64 switch (ah->ah_op_mode) { 64 switch (op_mode) {
65 case NL80211_IFTYPE_ADHOC: 65 case NL80211_IFTYPE_ADHOC:
66 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; 66 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
67 beacon_reg |= AR5K_BCR_ADHOC; 67 beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
113} 113}
114 114
115/** 115/**
116 * ath5k_hw_update - Update mib counters (mac layer statistics) 116 * ath5k_hw_update - Update MIB counters (mac layer statistics)
117 * 117 *
118 * @ah: The &struct ath5k_hw 118 * @ah: The &struct ath5k_hw
119 * @stats: The &struct ieee80211_low_level_stats we use to track
120 * statistics on the driver
121 * 119 *
122 * Reads MIB counters from PCU and updates sw statistics. Must be 120 * Reads MIB counters from PCU and updates sw statistics. Is called after a
123 * called after a MIB interrupt. 121 * MIB interrupt, because one of these counters might have reached their maximum
122 * and triggered the MIB interrupt, to let us read and clear the counter.
123 *
124 * Is called in interrupt context!
124 */ 125 */
125void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, 126void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
126 struct ieee80211_low_level_stats *stats)
127{ 127{
128 ATH5K_TRACE(ah->ah_sc); 128 struct ath5k_statistics *stats = &ah->ah_sc->stats;
129 129
130 /* Read-And-Clear */ 130 /* Read-And-Clear */
131 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); 131 stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
132 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); 132 stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
133 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK); 133 stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
134 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); 134 stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
135 135 stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
136 /* XXX: Should we use this to track beacon count ?
137 * -we read it anyway to clear the register */
138 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
139
140 /* Reset profile count registers on 5212*/
141 if (ah->ah_version == AR5K_AR5212) {
142 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
143 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
144 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
145 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
146 }
147
148 /* TODO: Handle ANI stats */
149} 136}
150 137
151/** 138/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
167 else { 154 else {
168 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; 155 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
169 if (high) 156 if (high)
170 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
171 else
172 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); 157 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
158 else
159 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
173 } 160 }
174} 161}
175 162
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
179\******************/ 166\******************/
180 167
181/** 168/**
182 * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
183 *
184 * @ah: The &struct ath5k_hw
185 */
186unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
187{
188 ATH5K_TRACE(ah->ah_sc);
189
190 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
192}
193
194/**
195 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU 169 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
196 * 170 *
197 * @ah: The &struct ath5k_hw 171 * @ah: The &struct ath5k_hw
198 * @timeout: Timeout in usec 172 * @timeout: Timeout in usec
199 */ 173 */
200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 174static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
201{ 175{
202 ATH5K_TRACE(ah->ah_sc); 176 ATH5K_TRACE(ah->ah_sc);
203 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) 177 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
211} 185}
212 186
213/** 187/**
214 * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
215 *
216 * @ah: The &struct ath5k_hw
217 */
218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
219{
220 ATH5K_TRACE(ah->ah_sc);
221 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
223}
224
225/**
226 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU 188 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
227 * 189 *
228 * @ah: The &struct ath5k_hw 190 * @ah: The &struct ath5k_hw
229 * @timeout: Timeout in usec 191 * @timeout: Timeout in usec
230 */ 192 */
231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 193static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
232{ 194{
233 ATH5K_TRACE(ah->ah_sc); 195 ATH5K_TRACE(ah->ah_sc);
234 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) 196 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
290 * 252 *
291 * @ah: The &struct ath5k_hw 253 * @ah: The &struct ath5k_hw
292 */ 254 */
293unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) 255static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
294{ 256{
295 struct ieee80211_channel *channel = ah->ah_current_channel; 257 struct ieee80211_channel *channel = ah->ah_current_channel;
296 258
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
308 * 270 *
309 * @ah: The &struct ath5k_hw 271 * @ah: The &struct ath5k_hw
310 */ 272 */
311unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) 273static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
312{ 274{
313 struct ieee80211_channel *channel = ah->ah_current_channel; 275 struct ieee80211_channel *channel = ah->ah_current_channel;
314 276
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
417 * (ACK etc). 379 * (ACK etc).
418 * 380 *
419 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma 381 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
420 * TODO: Init ANI here
421 */ 382 */
422void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) 383void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
423{ 384{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
451 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); 412 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
452} 413}
453 414
454/*
455 * Set multicast filter by index
456 */
457int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
458{
459
460 ATH5K_TRACE(ah->ah_sc);
461 if (index >= 64)
462 return -EINVAL;
463 else if (index >= 32)
464 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
465 (1 << (index - 32)));
466 else
467 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
468
469 return 0;
470}
471
472/*
473 * Clear Multicast filter by index
474 */
475int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
476{
477
478 ATH5K_TRACE(ah->ah_sc);
479 if (index >= 64)
480 return -EINVAL;
481 else if (index >= 32)
482 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
483 (1 << (index - 32)));
484 else
485 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
486
487 return 0;
488}
489
490/** 415/**
491 * ath5k_hw_get_rx_filter - Get current rx filter 416 * ath5k_hw_get_rx_filter - Get current rx filter
492 * 417 *
@@ -571,18 +496,7 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
571* Beacon control * 496* Beacon control *
572\****************/ 497\****************/
573 498
574/** 499#define ATH5K_MAX_TSF_READ 10
575 * ath5k_hw_get_tsf32 - Get a 32bit TSF
576 *
577 * @ah: The &struct ath5k_hw
578 *
579 * Returns lower 32 bits of current TSF
580 */
581u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
582{
583 ATH5K_TRACE(ah->ah_sc);
584 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
585}
586 500
587/** 501/**
588 * ath5k_hw_get_tsf64 - Get the full 64bit TSF 502 * ath5k_hw_get_tsf64 - Get the full 64bit TSF
@@ -593,10 +507,35 @@ u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
593 */ 507 */
594u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah) 508u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
595{ 509{
596 u64 tsf = ath5k_hw_reg_read(ah, AR5K_TSF_U32); 510 u32 tsf_lower, tsf_upper1, tsf_upper2;
511 int i;
512
513 /*
514 * While reading TSF upper and then lower part, the clock is still
515 * counting (or jumping in case of IBSS merge) so we might get
516 * inconsistent values. To avoid this, we read the upper part again
517 * and check it has not been changed. We make the hypothesis that a
518 * maximum of 3 changes can happens in a row (we use 10 as a safe
519 * value).
520 *
521 * Impact on performance is pretty small, since in most cases, only
522 * 3 register reads are needed.
523 */
524
525 tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
526 for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
527 tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
528 tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
529 if (tsf_upper2 == tsf_upper1)
530 break;
531 tsf_upper1 = tsf_upper2;
532 }
533
534 WARN_ON( i == ATH5K_MAX_TSF_READ );
535
597 ATH5K_TRACE(ah->ah_sc); 536 ATH5K_TRACE(ah->ah_sc);
598 537
599 return ath5k_hw_reg_read(ah, AR5K_TSF_L32) | (tsf << 32); 538 return (((u64)tsf_upper1 << 32) | tsf_lower);
600} 539}
601 540
602/** 541/**
@@ -651,7 +590,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
651 /* 590 /*
652 * Set the additional timers by mode 591 * Set the additional timers by mode
653 */ 592 */
654 switch (ah->ah_op_mode) { 593 switch (ah->ah_sc->opmode) {
655 case NL80211_IFTYPE_MONITOR: 594 case NL80211_IFTYPE_MONITOR:
656 case NL80211_IFTYPE_STATION: 595 case NL80211_IFTYPE_STATION:
657 /* In STA mode timer1 is used as next wakeup 596 /* In STA mode timer1 is used as next wakeup
@@ -688,8 +627,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
688 * Set the beacon register and enable all timers. 627 * Set the beacon register and enable all timers.
689 */ 628 */
690 /* When in AP or Mesh Point mode zero timer0 to start TSF */ 629 /* When in AP or Mesh Point mode zero timer0 to start TSF */
691 if (ah->ah_op_mode == NL80211_IFTYPE_AP || 630 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
692 ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT) 631 ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
693 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); 632 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
694 633
695 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); 634 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +661,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
722 661
723} 662}
724 663
725#if 0
726/*
727 * Set beacon timers
728 */
729int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
730 const struct ath5k_beacon_state *state)
731{
732 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
733
734 /*
735 * TODO: should be changed through *state
736 * review struct ath5k_beacon_state struct
737 *
738 * XXX: These are used for cfp period bellow, are they
739 * ok ? Is it O.K. for tsf here to be 0 or should we use
740 * get_tsf ?
741 */
742 u32 dtim_count = 0; /* XXX */
743 u32 cfp_count = 0; /* XXX */
744 u32 tsf = 0; /* XXX */
745
746 ATH5K_TRACE(ah->ah_sc);
747 /* Return on an invalid beacon state */
748 if (state->bs_interval < 1)
749 return -EINVAL;
750
751 interval = state->bs_interval;
752 dtim = state->bs_dtim_period;
753
754 /*
755 * PCF support?
756 */
757 if (state->bs_cfp_period > 0) {
758 /*
759 * Enable PCF mode and set the CFP
760 * (Contention Free Period) and timer registers
761 */
762 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
763 state->bs_interval;
764 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
765 state->bs_interval;
766
767 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
768 AR5K_STA_ID1_DEFAULT_ANTENNA |
769 AR5K_STA_ID1_PCF);
770 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
771 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
772 AR5K_CFP_DUR);
773 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
774 next_cfp)) << 3, AR5K_TIMER2);
775 } else {
776 /* Disable PCF mode */
777 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
778 AR5K_STA_ID1_DEFAULT_ANTENNA |
779 AR5K_STA_ID1_PCF);
780 }
781
782 /*
783 * Enable the beacon timer register
784 */
785 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
786
787 /*
788 * Start the beacon timers
789 */
790 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
791 ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
792 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
793 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
794 AR5K_BEACON_PERIOD), AR5K_BEACON);
795
796 /*
797 * Write new beacon miss threshold, if it appears to be valid
798 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
799 * and return if its not in range. We can test this by reading value and
800 * setting value to a largest value and seeing which values register.
801 */
802
803 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
804 state->bs_bmiss_threshold);
805
806 /*
807 * Set sleep control register
808 * XXX: Didn't find this in 5210 code but since this register
809 * exists also in ar5k's 5210 headers i leave it as common code.
810 */
811 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
812 (state->bs_sleep_duration - 3) << 3);
813
814 /*
815 * Set enhanced sleep registers on 5212
816 */
817 if (ah->ah_version == AR5K_AR5212) {
818 if (state->bs_sleep_duration > state->bs_interval &&
819 roundup(state->bs_sleep_duration, interval) ==
820 state->bs_sleep_duration)
821 interval = state->bs_sleep_duration;
822
823 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
824 roundup(state->bs_sleep_duration, dtim) ==
825 state->bs_sleep_duration))
826 dtim = state->bs_sleep_duration;
827
828 if (interval > dtim)
829 return -EINVAL;
830
831 next_beacon = interval == dtim ? state->bs_next_dtim :
832 state->bs_next_beacon;
833
834 ath5k_hw_reg_write(ah,
835 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
836 AR5K_SLEEP0_NEXT_DTIM) |
837 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
838 AR5K_SLEEP0_ENH_SLEEP_EN |
839 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
840
841 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
842 AR5K_SLEEP1_NEXT_TIM) |
843 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
844
845 ath5k_hw_reg_write(ah,
846 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
847 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
848 }
849
850 return 0;
851}
852
853/*
854 * Reset beacon timers
855 */
856void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
857{
858 ATH5K_TRACE(ah->ah_sc);
859 /*
860 * Disable beacon timer
861 */
862 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
863
864 /*
865 * Disable some beacon register values
866 */
867 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
868 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
869 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
870}
871
872/*
873 * Wait for beacon queue to finish
874 */
875int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
876{
877 unsigned int i;
878 int ret;
879
880 ATH5K_TRACE(ah->ah_sc);
881
882 /* 5210 doesn't have QCU*/
883 if (ah->ah_version == AR5K_AR5210) {
884 /*
885 * Wait for beaconn queue to finish by checking
886 * Control Register and Beacon Status Register.
887 */
888 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
889 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
890 ||
891 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
892 break;
893 udelay(10);
894 }
895
896 /* Timeout... */
897 if (i <= 0) {
898 /*
899 * Re-schedule the beacon queue
900 */
901 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
902 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
903 AR5K_BCR);
904
905 return -EIO;
906 }
907 ret = 0;
908 } else {
909 /*5211/5212*/
910 ret = ath5k_hw_register_timeout(ah,
911 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
912 AR5K_QCU_STS_FRMPENDCNT, 0, false);
913
914 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
915 return -EIO;
916 }
917
918 return ret;
919}
920#endif
921
922 664
923/*********************\ 665/*********************\
924* Key table functions * 666* Key table functions *
@@ -971,19 +713,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
971 return 0; 713 return 0;
972} 714}
973 715
974/*
975 * Check if a table entry is valid
976 */
977int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
978{
979 ATH5K_TRACE(ah->ah_sc);
980 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
981
982 /* Check the validation flag at the end of the entry */
983 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
984 AR5K_KEYTABLE_VALID;
985}
986
987static 716static
988int ath5k_keycache_type(const struct ieee80211_key_conf *key) 717int ath5k_keycache_type(const struct ieee80211_key_conf *key)
989{ 718{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bccd90d3..1b81c4778800 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
20 * 20 *
21 */ 21 */
22 22
23#define _ATH5K_PHY
24
25#include <linux/delay.h> 23#include <linux/delay.h>
26#include <linux/slab.h> 24#include <linux/slab.h>
27 25
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
982 return -EINVAL; 980 return -EINVAL;
983 981
984 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); 982 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
985 } else if ((c - (c % 5)) != 2 || c > 5435) { 983 } else if ((c % 5) != 2 || c > 5435) {
986 if (!(c % 20) && c >= 5120) { 984 if (!(c % 20) && c >= 5120) {
987 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 985 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
988 data2 = ath5k_hw_bitswap(3, 2); 986 data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
995 } else 993 } else
996 return -EINVAL; 994 return -EINVAL;
997 } else { 995 } else {
998 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); 996 data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
999 data2 = ath5k_hw_bitswap(0, 2); 997 data2 = ath5k_hw_bitswap(0, 2);
1000 } 998 }
1001 999
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1023 data0 = ath5k_hw_bitswap((c - 2272), 8); 1021 data0 = ath5k_hw_bitswap((c - 2272), 8);
1024 data2 = 0; 1022 data2 = 0;
1025 /* ? 5GHz ? */ 1023 /* ? 5GHz ? */
1026 } else if ((c - (c % 5)) != 2 || c > 5435) { 1024 } else if ((c % 5) != 2 || c > 5435) {
1027 if (!(c % 20) && c < 5120) 1025 if (!(c % 20) && c < 5120)
1028 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 1026 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1029 else if (!(c % 10)) 1027 else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1034 return -EINVAL; 1032 return -EINVAL;
1035 data2 = ath5k_hw_bitswap(1, 2); 1033 data2 = ath5k_hw_bitswap(1, 2);
1036 } else { 1034 } else {
1037 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); 1035 data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
1038 data2 = ath5k_hw_bitswap(0, 2); 1036 data2 = ath5k_hw_bitswap(0, 2);
1039 } 1037 }
1040 1038
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1105 PHY calibration 1103 PHY calibration
1106\*****************/ 1104\*****************/
1107 1105
1108void
1109ath5k_hw_calibration_poll(struct ath5k_hw *ah)
1110{
1111 /* Calibration interval in jiffies */
1112 unsigned long cal_intval;
1113
1114 cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
1115
1116 /* Initialize timestamp if needed */
1117 if (!ah->ah_cal_tstamp)
1118 ah->ah_cal_tstamp = jiffies;
1119
1120 /* For now we always do full calibration
1121 * Mark software interrupt mask and fire software
1122 * interrupt (bit gets auto-cleared) */
1123 if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
1124 ah->ah_cal_tstamp = jiffies;
1125 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
1126 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
1127 }
1128}
1129
1130static int sign_extend(int val, const int nbits) 1106static int sign_extend(int val, const int nbits)
1131{ 1107{
1132 int order = BIT(nbits-1); 1108 int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1191 * The median of the values in the history is then loaded into the 1167 * The median of the values in the history is then loaded into the
1192 * hardware for its own use for RSSI and CCA measurements. 1168 * hardware for its own use for RSSI and CCA measurements.
1193 */ 1169 */
1194void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) 1170static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1195{ 1171{
1196 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1172 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1197 u32 val; 1173 u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1400 } 1376 }
1401 1377
1402 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7; 1378 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
1403 q_coffd = q_pwr >> 7; 1379
1380 if (ah->ah_version == AR5K_AR5211)
1381 q_coffd = q_pwr >> 6;
1382 else
1383 q_coffd = q_pwr >> 7;
1404 1384
1405 /* protect against divide by 0 and loss of sign bits */ 1385 /* protect against divide by 0 and loss of sign bits */
1406 if (i_coffd == 0 || q_coffd < 2) 1386 if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1409 i_coff = (-iq_corr) / i_coffd; 1389 i_coff = (-iq_corr) / i_coffd;
1410 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1390 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
1411 1391
1412 q_coff = (i_pwr / q_coffd) - 128; 1392 if (ah->ah_version == AR5K_AR5211)
1393 q_coff = (i_pwr / q_coffd) - 64;
1394 else
1395 q_coff = (i_pwr / q_coffd) - 128;
1413 q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */ 1396 q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
1414 1397
1415 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, 1398 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
1769* Antenna control * 1752* Antenna control *
1770\*****************/ 1753\*****************/
1771 1754
1772void /*TODO:Boundary check*/ 1755static void /*TODO:Boundary check*/
1773ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) 1756ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1774{ 1757{
1775 ATH5K_TRACE(ah->ah_sc); 1758 ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1778 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); 1761 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
1779} 1762}
1780 1763
1781unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
1782{
1783 ATH5K_TRACE(ah->ah_sc);
1784
1785 if (ah->ah_version != AR5K_AR5210)
1786 return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
1787
1788 return false; /*XXX: What do we return for 5210 ?*/
1789}
1790
1791/* 1764/*
1792 * Enable/disable fast rx antenna diversity 1765 * Enable/disable fast rx antenna diversity
1793 */ 1766 */
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1931 1904
1932 ah->ah_tx_ant = tx_ant; 1905 ah->ah_tx_ant = tx_ant;
1933 ah->ah_ant_mode = ant_mode; 1906 ah->ah_ant_mode = ant_mode;
1907 ah->ah_def_ant = def_ant;
1934 1908
1935 sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0; 1909 sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
1936 sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0; 1910 sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2171,8 +2145,6 @@ ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
2171done: 2145done:
2172 *pcinfo_l = &pcinfo[idx_l]; 2146 *pcinfo_l = &pcinfo[idx_l];
2173 *pcinfo_r = &pcinfo[idx_r]; 2147 *pcinfo_r = &pcinfo[idx_r];
2174
2175 return;
2176} 2148}
2177 2149
2178/* 2150/*
@@ -2441,19 +2413,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
2441 pcdac_tmp = pcdac_high_pwr; 2413 pcdac_tmp = pcdac_high_pwr;
2442 2414
2443 edge_flag = 0x40; 2415 edge_flag = 0x40;
2444#if 0
2445 /* If both min and max power limits are in lower
2446 * power curve's range, only use the low power curve.
2447 * TODO: min/max levels are related to target
2448 * power values requested from driver/user
2449 * XXX: Is this really needed ? */
2450 if (min_pwr < table_max[1] &&
2451 max_pwr < table_max[1]) {
2452 edge_flag = 0;
2453 pcdac_tmp = pcdac_low_pwr;
2454 max_pwr_idx = (table_max[1] - table_min[1])/2;
2455 }
2456#endif
2457 } else { 2416 } else {
2458 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */ 2417 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
2459 pcdac_high_pwr = ah->ah_txpower.tmpL[0]; 2418 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2559,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2600 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size; 2559 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
2601 2560
2602 /* Fill pdadc_out table */ 2561 /* Fill pdadc_out table */
2603 while (pdadc_0 < max_idx) 2562 while (pdadc_0 < max_idx && pdadc_i < 128)
2604 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++]; 2563 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
2605 2564
2606 /* Need to extrapolate above this pdgain? */ 2565 /* Need to extrapolate above this pdgain? */
@@ -3144,5 +3103,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3144 3103
3145 return ath5k_hw_txpower(ah, channel, ee_mode, txpower); 3104 return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
3146} 3105}
3147
3148#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f45..f5831da33f7b 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
517} 517}
518 518
519/* 519/*
520 * Get slot time from DCU
521 */
522unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
523{
524 unsigned int slot_time_clock;
525
526 ATH5K_TRACE(ah->ah_sc);
527
528 if (ah->ah_version == AR5K_AR5210)
529 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
530 else
531 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
532
533 return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
534}
535
536/*
537 * Set slot time on DCU 520 * Set slot time on DCU
538 */ 521 */
539int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 522int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89b249c..55b4ac6d236f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
212 * MIB control register 212 * MIB control register
213 */ 213 */
214#define AR5K_MIBC 0x0040 /* Register Address */ 214#define AR5K_MIBC 0x0040 /* Register Address */
215#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */ 215#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
216#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */ 216#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
217#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */ 217#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
218#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */ 218#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
219 219
220/* 220/*
221 * Timeout prescale register 221 * Timeout prescale register
@@ -1139,8 +1139,8 @@
1139#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */ 1139#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
1140#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ 1140#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
1141#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */ 1141#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
1142#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */ 1142#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
1143#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */ 1143#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
1144#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */ 1144#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
1145#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */ 1145#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
1146#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */ 1146#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
1516 AR5K_NAV_5210 : AR5K_NAV_5211) 1516 AR5K_NAV_5210 : AR5K_NAV_5211)
1517 1517
1518/* 1518/*
1519 * RTS success register 1519 * MIB counters:
1520 *
1521 * max value is 0xc000, if this is reached we get a MIB interrupt.
1522 * they can be controlled via AR5K_MIBC and are cleared on read.
1523 */
1524
1525/*
1526 * RTS success (MIB counter)
1520 */ 1527 */
1521#define AR5K_RTS_OK_5210 0x8090 1528#define AR5K_RTS_OK_5210 0x8090
1522#define AR5K_RTS_OK_5211 0x8088 1529#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
1524 AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211) 1531 AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
1525 1532
1526/* 1533/*
1527 * RTS failure register 1534 * RTS failure (MIB counter)
1528 */ 1535 */
1529#define AR5K_RTS_FAIL_5210 0x8094 1536#define AR5K_RTS_FAIL_5210 0x8094
1530#define AR5K_RTS_FAIL_5211 0x808c 1537#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
1532 AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211) 1539 AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
1533 1540
1534/* 1541/*
1535 * ACK failure register 1542 * ACK failure (MIB counter)
1536 */ 1543 */
1537#define AR5K_ACK_FAIL_5210 0x8098 1544#define AR5K_ACK_FAIL_5210 0x8098
1538#define AR5K_ACK_FAIL_5211 0x8090 1545#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
1540 AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211) 1547 AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
1541 1548
1542/* 1549/*
1543 * FCS failure register 1550 * FCS failure (MIB counter)
1544 */ 1551 */
1545#define AR5K_FCS_FAIL_5210 0x809c 1552#define AR5K_FCS_FAIL_5210 0x809c
1546#define AR5K_FCS_FAIL_5211 0x8094 1553#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
1667 1674
1668/* 1675/*
1669 * Profile count registers 1676 * Profile count registers
1677 *
1678 * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
1679 * generate a MIB interrupt.
1680 * Instead of overflowing, they shift by one bit to the right. All registers
1681 * shift together, i.e. when one reaches the max, all shift at the same time by
1682 * one bit to the right. This way we should always get consistent values.
1670 */ 1683 */
1671#define AR5K_PROFCNT_TX 0x80ec /* Tx count */ 1684#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
1672#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */ 1685#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
1673#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */ 1686#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
1674#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */ 1687#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
1675 1688
1676/* 1689/*
1677 * Quiet period control registers 1690 * Quiet period control registers
@@ -1758,7 +1771,7 @@
1758#define AR5K_CCK_FIL_CNT 0x8128 1771#define AR5K_CCK_FIL_CNT 0x8128
1759 1772
1760/* 1773/*
1761 * PHY Error Counters (?) 1774 * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
1762 */ 1775 */
1763#define AR5K_PHYERR_CNT1 0x812c 1776#define AR5K_PHYERR_CNT1 0x812c
1764#define AR5K_PHYERR_CNT1_MASK 0x8130 1777#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
1766#define AR5K_PHYERR_CNT2 0x8134 1779#define AR5K_PHYERR_CNT2 0x8134
1767#define AR5K_PHYERR_CNT2_MASK 0x8138 1780#define AR5K_PHYERR_CNT2_MASK 0x8138
1768 1781
1782/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
1783#define ATH5K_PHYERR_CNT_MAX 0x00c00000
1784
1769/* 1785/*
1770 * TSF Threshold register (?) 1786 * TSF Threshold register (?)
1771 */ 1787 */
@@ -1974,7 +1990,7 @@
1974#define AR5K_PHY_SETTLING 0x9844 /* Register Address */ 1990#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
1975#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */ 1991#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
1976#define AR5K_PHY_SETTLING_AGC_S 0 1992#define AR5K_PHY_SETTLING_AGC_S 0
1977#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */ 1993#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
1978#define AR5K_PHY_SETTLING_SWITCH_S 7 1994#define AR5K_PHY_SETTLING_SWITCH_S 7
1979 1995
1980/* 1996/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e379843..307f80e83f94 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#define _ATH5K_RESET
23
24/*****************************\ 22/*****************************\
25 Reset functions and helpers 23 Reset functions and helpers
26\*****************************/ 24\*****************************/
@@ -34,6 +32,27 @@
34#include "base.h" 32#include "base.h"
35#include "debug.h" 33#include "debug.h"
36 34
35/*
36 * Check if a register write has been completed
37 */
38int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
39 bool is_set)
40{
41 int i;
42 u32 data;
43
44 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
45 data = ath5k_hw_reg_read(ah, reg);
46 if (is_set && (data & flag))
47 break;
48 else if ((data & flag) == val)
49 break;
50 udelay(15);
51 }
52
53 return (i <= 0) ? -EAGAIN : 0;
54}
55
37/** 56/**
38 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 57 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
39 * 58 *
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
221/* 240/*
222 * Sleep control 241 * Sleep control
223 */ 242 */
224int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, 243static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
225 bool set_chip, u16 sleep_duration) 244 bool set_chip, u16 sleep_duration)
226{ 245{
227 unsigned int i; 246 unsigned int i;
228 u32 staid, data; 247 u32 staid, data;
@@ -608,7 +627,6 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
608 627
609 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1); 628 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
610 } 629 }
611 return;
612} 630}
613 631
614/* TODO: Half/Quarter rate */ 632/* TODO: Half/Quarter rate */
@@ -864,8 +882,6 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
864 /* Heavy clipping -disable for now */ 882 /* Heavy clipping -disable for now */
865 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1) 883 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
866 ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE); 884 ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
867
868 return;
869} 885}
870 886
871/* 887/*
@@ -1017,11 +1033,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1017 if (ret) 1033 if (ret)
1018 return ret; 1034 return ret;
1019 1035
1020 /*
1021 * Initialize operating mode
1022 */
1023 ah->ah_op_mode = op_mode;
1024
1025 /* PHY access enable */ 1036 /* PHY access enable */
1026 if (ah->ah_mac_srev >= AR5K_SREV_AR5211) 1037 if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
1027 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); 1038 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1203,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1192 ath5k_hw_set_associd(ah); 1203 ath5k_hw_set_associd(ah);
1193 1204
1194 /* Set PCU config */ 1205 /* Set PCU config */
1195 ath5k_hw_set_opmode(ah); 1206 ath5k_hw_set_opmode(ah, op_mode);
1196 1207
1197 /* Clear any pending interrupts 1208 /* Clear any pending interrupts
1198 * PISR/SISR Not available on 5210 */ 1209 * PISR/SISR Not available on 5210 */
@@ -1378,7 +1389,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1378 * external 32KHz crystal when sleeping if one 1389 * external 32KHz crystal when sleeping if one
1379 * exists */ 1390 * exists */
1380 if (ah->ah_version == AR5K_AR5212 && 1391 if (ah->ah_version == AR5K_AR5212 &&
1381 ah->ah_op_mode != NL80211_IFTYPE_AP) 1392 op_mode != NL80211_IFTYPE_AP)
1382 ath5k_hw_set_sleep_clock(ah, true); 1393 ath5k_hw_set_sleep_clock(ah, true);
1383 1394
1384 /* 1395 /*
@@ -1388,5 +1399,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1388 ath5k_hw_reset_tsf(ah); 1399 ath5k_hw_reset_tsf(ah);
1389 return 0; 1400 return 0;
1390} 1401}
1391
1392#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea23a3b..35f23bdc442f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
32 32
33 Also required for changing debug message flags at run time. 33 Also required for changing debug message flags at run time.
34 34
35config ATH9K_HTC
36 tristate "Atheros HTC based wireless cards support"
37 depends on USB && MAC80211
38 select ATH9K_HW
39 select MAC80211_LEDS
40 select LEDS_CLASS
41 select NEW_LEDS
42 select ATH9K_COMMON
43 ---help---
44 Support for Atheros HTC based cards.
45 Chipsets supported: AR9271
46
47 For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
48
49 The built module will be ath9k_htc.
50
51config ATH9K_HTC_DEBUGFS
52 bool "Atheros ath9k_htc debugging"
53 depends on ATH9K_HTC && DEBUG_FS
54 ---help---
55 Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5eb9ec3..dd112be218ab 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -13,18 +13,38 @@ ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
13 13
14obj-$(CONFIG_ATH9K) += ath9k.o 14obj-$(CONFIG_ATH9K) += ath9k.o
15 15
16ath9k_hw-y:= hw.o \ 16ath9k_hw-y:= \
17 ar9002_hw.o \
18 ar9003_hw.o \
19 hw.o \
20 ar9003_phy.o \
21 ar9002_phy.o \
22 ar5008_phy.o \
23 ar9002_calib.o \
24 ar9003_calib.o \
25 calib.o \
17 eeprom.o \ 26 eeprom.o \
18 eeprom_def.o \ 27 eeprom_def.o \
19 eeprom_4k.o \ 28 eeprom_4k.o \
20 eeprom_9287.o \ 29 eeprom_9287.o \
21 calib.o \
22 ani.o \ 30 ani.o \
23 phy.o \
24 btcoex.o \ 31 btcoex.o \
25 mac.o \ 32 mac.o \
33 ar9002_mac.o \
34 ar9003_mac.o \
35 ar9003_eeprom.o
26 36
27obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o 37obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
28 38
29obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o 39obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
30ath9k_common-y:= common.o 40ath9k_common-y:= common.o
41
42ath9k_htc-y += htc_hst.o \
43 hif_usb.o \
44 wmi.o \
45 htc_drv_txrx.o \
46 htc_drv_main.o \
47 htc_drv_beacon.o \
48 htc_drv_init.o
49
50obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f13151..85fdd26039c8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
47} 47}
48 48
49static struct ath_bus_ops ath_ahb_bus_ops = { 49static struct ath_bus_ops ath_ahb_bus_ops = {
50 .ath_bus_type = ATH_AHB,
50 .read_cachesize = ath_ahb_read_cachesize, 51 .read_cachesize = ath_ahb_read_cachesize,
51 .eeprom_read = ath_ahb_eeprom_read, 52 .eeprom_read = ath_ahb_eeprom_read,
52}; 53};
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2a0cd64c2bfb..ba8b20f01594 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h"
18 19
19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 20static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
20 struct ath9k_channel *chan) 21 struct ath9k_channel *chan)
@@ -37,190 +38,6 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
37 return 0; 38 return 0;
38} 39}
39 40
40static bool ath9k_hw_ani_control(struct ath_hw *ah,
41 enum ath9k_ani_cmd cmd, int param)
42{
43 struct ar5416AniState *aniState = ah->curani;
44 struct ath_common *common = ath9k_hw_common(ah);
45
46 switch (cmd & ah->ani_function) {
47 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
48 u32 level = param;
49
50 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
51 ath_print(common, ATH_DBG_ANI,
52 "level out of range (%u > %u)\n",
53 level,
54 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
55 return false;
56 }
57
58 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
59 AR_PHY_DESIRED_SZ_TOT_DES,
60 ah->totalSizeDesired[level]);
61 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
62 AR_PHY_AGC_CTL1_COARSE_LOW,
63 ah->coarse_low[level]);
64 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
65 AR_PHY_AGC_CTL1_COARSE_HIGH,
66 ah->coarse_high[level]);
67 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
68 AR_PHY_FIND_SIG_FIRPWR,
69 ah->firpwr[level]);
70
71 if (level > aniState->noiseImmunityLevel)
72 ah->stats.ast_ani_niup++;
73 else if (level < aniState->noiseImmunityLevel)
74 ah->stats.ast_ani_nidown++;
75 aniState->noiseImmunityLevel = level;
76 break;
77 }
78 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
79 const int m1ThreshLow[] = { 127, 50 };
80 const int m2ThreshLow[] = { 127, 40 };
81 const int m1Thresh[] = { 127, 0x4d };
82 const int m2Thresh[] = { 127, 0x40 };
83 const int m2CountThr[] = { 31, 16 };
84 const int m2CountThrLow[] = { 63, 48 };
85 u32 on = param ? 1 : 0;
86
87 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
88 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
89 m1ThreshLow[on]);
90 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
91 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
92 m2ThreshLow[on]);
93 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
94 AR_PHY_SFCORR_M1_THRESH,
95 m1Thresh[on]);
96 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
97 AR_PHY_SFCORR_M2_THRESH,
98 m2Thresh[on]);
99 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
100 AR_PHY_SFCORR_M2COUNT_THR,
101 m2CountThr[on]);
102 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
103 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
104 m2CountThrLow[on]);
105
106 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
107 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
108 m1ThreshLow[on]);
109 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
110 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
111 m2ThreshLow[on]);
112 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
113 AR_PHY_SFCORR_EXT_M1_THRESH,
114 m1Thresh[on]);
115 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
116 AR_PHY_SFCORR_EXT_M2_THRESH,
117 m2Thresh[on]);
118
119 if (on)
120 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
121 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
122 else
123 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
124 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
125
126 if (!on != aniState->ofdmWeakSigDetectOff) {
127 if (on)
128 ah->stats.ast_ani_ofdmon++;
129 else
130 ah->stats.ast_ani_ofdmoff++;
131 aniState->ofdmWeakSigDetectOff = !on;
132 }
133 break;
134 }
135 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
136 const int weakSigThrCck[] = { 8, 6 };
137 u32 high = param ? 1 : 0;
138
139 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
140 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
141 weakSigThrCck[high]);
142 if (high != aniState->cckWeakSigThreshold) {
143 if (high)
144 ah->stats.ast_ani_cckhigh++;
145 else
146 ah->stats.ast_ani_ccklow++;
147 aniState->cckWeakSigThreshold = high;
148 }
149 break;
150 }
151 case ATH9K_ANI_FIRSTEP_LEVEL:{
152 const int firstep[] = { 0, 4, 8 };
153 u32 level = param;
154
155 if (level >= ARRAY_SIZE(firstep)) {
156 ath_print(common, ATH_DBG_ANI,
157 "level out of range (%u > %u)\n",
158 level,
159 (unsigned) ARRAY_SIZE(firstep));
160 return false;
161 }
162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
163 AR_PHY_FIND_SIG_FIRSTEP,
164 firstep[level]);
165 if (level > aniState->firstepLevel)
166 ah->stats.ast_ani_stepup++;
167 else if (level < aniState->firstepLevel)
168 ah->stats.ast_ani_stepdown++;
169 aniState->firstepLevel = level;
170 break;
171 }
172 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
173 const int cycpwrThr1[] =
174 { 2, 4, 6, 8, 10, 12, 14, 16 };
175 u32 level = param;
176
177 if (level >= ARRAY_SIZE(cycpwrThr1)) {
178 ath_print(common, ATH_DBG_ANI,
179 "level out of range (%u > %u)\n",
180 level,
181 (unsigned) ARRAY_SIZE(cycpwrThr1));
182 return false;
183 }
184 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
185 AR_PHY_TIMING5_CYCPWR_THR1,
186 cycpwrThr1[level]);
187 if (level > aniState->spurImmunityLevel)
188 ah->stats.ast_ani_spurup++;
189 else if (level < aniState->spurImmunityLevel)
190 ah->stats.ast_ani_spurdown++;
191 aniState->spurImmunityLevel = level;
192 break;
193 }
194 case ATH9K_ANI_PRESENT:
195 break;
196 default:
197 ath_print(common, ATH_DBG_ANI,
198 "invalid cmd %u\n", cmd);
199 return false;
200 }
201
202 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
203 ath_print(common, ATH_DBG_ANI,
204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
205 "ofdmWeakSigDetectOff=%d\n",
206 aniState->noiseImmunityLevel,
207 aniState->spurImmunityLevel,
208 !aniState->ofdmWeakSigDetectOff);
209 ath_print(common, ATH_DBG_ANI,
210 "cckWeakSigThreshold=%d, "
211 "firstepLevel=%d, listenTime=%d\n",
212 aniState->cckWeakSigThreshold,
213 aniState->firstepLevel,
214 aniState->listenTime);
215 ath_print(common, ATH_DBG_ANI,
216 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
217 aniState->cycleCount,
218 aniState->ofdmPhyErrCount,
219 aniState->cckPhyErrCount);
220
221 return true;
222}
223
224static void ath9k_hw_update_mibstats(struct ath_hw *ah, 41static void ath9k_hw_update_mibstats(struct ath_hw *ah,
225 struct ath9k_mib_stats *stats) 42 struct ath9k_mib_stats *stats)
226{ 43{
@@ -262,11 +79,17 @@ static void ath9k_ani_restart(struct ath_hw *ah)
262 "Writing ofdmbase=%u cckbase=%u\n", 79 "Writing ofdmbase=%u cckbase=%u\n",
263 aniState->ofdmPhyErrBase, 80 aniState->ofdmPhyErrBase,
264 aniState->cckPhyErrBase); 81 aniState->cckPhyErrBase);
82
83 ENABLE_REGWRITE_BUFFER(ah);
84
265 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); 85 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
266 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); 86 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
267 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 87 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
268 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 88 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
269 89
90 REGWRITE_BUFFER_FLUSH(ah);
91 DISABLE_REGWRITE_BUFFER(ah);
92
270 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 93 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
271 94
272 aniState->ofdmPhyErrCount = 0; 95 aniState->ofdmPhyErrCount = 0;
@@ -540,8 +363,14 @@ void ath9k_ani_reset(struct ath_hw *ah)
540 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) & 363 ath9k_hw_setrxfilter(ah, ath9k_hw_getrxfilter(ah) &
541 ~ATH9K_RX_FILTER_PHYERR); 364 ~ATH9K_RX_FILTER_PHYERR);
542 ath9k_ani_restart(ah); 365 ath9k_ani_restart(ah);
366
367 ENABLE_REGWRITE_BUFFER(ah);
368
543 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 369 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
544 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 370 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
371
372 REGWRITE_BUFFER_FLUSH(ah);
373 DISABLE_REGWRITE_BUFFER(ah);
545} 374}
546 375
547void ath9k_hw_ani_monitor(struct ath_hw *ah, 376void ath9k_hw_ani_monitor(struct ath_hw *ah,
@@ -639,6 +468,8 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
639 468
640 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 469 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
641 470
471 ENABLE_REGWRITE_BUFFER(ah);
472
642 REG_WRITE(ah, AR_FILT_OFDM, 0); 473 REG_WRITE(ah, AR_FILT_OFDM, 0);
643 REG_WRITE(ah, AR_FILT_CCK, 0); 474 REG_WRITE(ah, AR_FILT_CCK, 0);
644 REG_WRITE(ah, AR_MIBC, 475 REG_WRITE(ah, AR_MIBC,
@@ -646,6 +477,9 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
646 & 0x0f); 477 & 0x0f);
647 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 478 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
648 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING); 479 REG_WRITE(ah, AR_PHY_ERR_MASK_2, AR_PHY_ERR_CCK_TIMING);
480
481 REGWRITE_BUFFER_FLUSH(ah);
482 DISABLE_REGWRITE_BUFFER(ah);
649} 483}
650 484
651/* Freeze the MIB counters, get the stats and then clear them */ 485/* Freeze the MIB counters, get the stats and then clear them */
@@ -809,20 +643,17 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
809 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", 643 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
810 ah->ani[0].cckPhyErrBase); 644 ah->ani[0].cckPhyErrBase);
811 645
646 ENABLE_REGWRITE_BUFFER(ah);
647
812 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase); 648 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
813 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase); 649 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
650
651 REGWRITE_BUFFER_FLUSH(ah);
652 DISABLE_REGWRITE_BUFFER(ah);
653
814 ath9k_enable_mib_counters(ah); 654 ath9k_enable_mib_counters(ah);
815 655
816 ah->aniperiod = ATH9K_ANI_PERIOD; 656 ah->aniperiod = ATH9K_ANI_PERIOD;
817 if (ah->config.enable_ani) 657 if (ah->config.enable_ani)
818 ah->proc_phyerr |= HAL_PROCESS_ANI; 658 ah->proc_phyerr |= HAL_PROCESS_ANI;
819} 659}
820
821void ath9k_hw_ani_disable(struct ath_hw *ah)
822{
823 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
824
825 ath9k_hw_disable_mib_counters(ah);
826 REG_WRITE(ah, AR_PHY_ERR_1, 0);
827 REG_WRITE(ah, AR_PHY_ERR_2, 0);
828}
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index 4e1ab94a5153..3356762ea384 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -118,6 +118,5 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah, u32 *rxc_pcnt,
118void ath9k_hw_procmibevent(struct ath_hw *ah); 118void ath9k_hw_procmibevent(struct ath_hw *ah);
119void ath9k_hw_ani_setup(struct ath_hw *ah); 119void ath9k_hw_ani_setup(struct ath_hw *ah);
120void ath9k_hw_ani_init(struct ath_hw *ah); 120void ath9k_hw_ani_init(struct ath_hw *ah);
121void ath9k_hw_ani_disable(struct ath_hw *ah);
122 121
123#endif /* ANI_H */ 122#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_initvals.h b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
new file mode 100644
index 000000000000..025c31ac6146
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_initvals.h
@@ -0,0 +1,742 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_AR5008_H
18#define INITVALS_AR5008_H
19
20static const u32 ar5416Modes[][6] = {
21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
23 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
24 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
25 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
26 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
27 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
28 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
29 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
30 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
31 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
32 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
33 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
34 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
35 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
36 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
37 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
38 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
39 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
40 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
41 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
42 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
43 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
44 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
45 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
46 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
47 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
48 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
49 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
50 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
51 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
52 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
53 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
54 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
55 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
56 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
57 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
58 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
59 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
60 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
61 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
62 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
63 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
64 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
65 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
66 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
67 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
68 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
69 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
70 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
71 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
72 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
73 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
74 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
75 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
76 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
77 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
78 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
79 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
80 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
81 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
82 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
83 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
84};
85
86static const u32 ar5416Common[][2] = {
87 { 0x0000000c, 0x00000000 },
88 { 0x00000030, 0x00020015 },
89 { 0x00000034, 0x00000005 },
90 { 0x00000040, 0x00000000 },
91 { 0x00000044, 0x00000008 },
92 { 0x00000048, 0x00000008 },
93 { 0x0000004c, 0x00000010 },
94 { 0x00000050, 0x00000000 },
95 { 0x00000054, 0x0000001f },
96 { 0x00000800, 0x00000000 },
97 { 0x00000804, 0x00000000 },
98 { 0x00000808, 0x00000000 },
99 { 0x0000080c, 0x00000000 },
100 { 0x00000810, 0x00000000 },
101 { 0x00000814, 0x00000000 },
102 { 0x00000818, 0x00000000 },
103 { 0x0000081c, 0x00000000 },
104 { 0x00000820, 0x00000000 },
105 { 0x00000824, 0x00000000 },
106 { 0x00001040, 0x002ffc0f },
107 { 0x00001044, 0x002ffc0f },
108 { 0x00001048, 0x002ffc0f },
109 { 0x0000104c, 0x002ffc0f },
110 { 0x00001050, 0x002ffc0f },
111 { 0x00001054, 0x002ffc0f },
112 { 0x00001058, 0x002ffc0f },
113 { 0x0000105c, 0x002ffc0f },
114 { 0x00001060, 0x002ffc0f },
115 { 0x00001064, 0x002ffc0f },
116 { 0x00001230, 0x00000000 },
117 { 0x00001270, 0x00000000 },
118 { 0x00001038, 0x00000000 },
119 { 0x00001078, 0x00000000 },
120 { 0x000010b8, 0x00000000 },
121 { 0x000010f8, 0x00000000 },
122 { 0x00001138, 0x00000000 },
123 { 0x00001178, 0x00000000 },
124 { 0x000011b8, 0x00000000 },
125 { 0x000011f8, 0x00000000 },
126 { 0x00001238, 0x00000000 },
127 { 0x00001278, 0x00000000 },
128 { 0x000012b8, 0x00000000 },
129 { 0x000012f8, 0x00000000 },
130 { 0x00001338, 0x00000000 },
131 { 0x00001378, 0x00000000 },
132 { 0x000013b8, 0x00000000 },
133 { 0x000013f8, 0x00000000 },
134 { 0x00001438, 0x00000000 },
135 { 0x00001478, 0x00000000 },
136 { 0x000014b8, 0x00000000 },
137 { 0x000014f8, 0x00000000 },
138 { 0x00001538, 0x00000000 },
139 { 0x00001578, 0x00000000 },
140 { 0x000015b8, 0x00000000 },
141 { 0x000015f8, 0x00000000 },
142 { 0x00001638, 0x00000000 },
143 { 0x00001678, 0x00000000 },
144 { 0x000016b8, 0x00000000 },
145 { 0x000016f8, 0x00000000 },
146 { 0x00001738, 0x00000000 },
147 { 0x00001778, 0x00000000 },
148 { 0x000017b8, 0x00000000 },
149 { 0x000017f8, 0x00000000 },
150 { 0x0000103c, 0x00000000 },
151 { 0x0000107c, 0x00000000 },
152 { 0x000010bc, 0x00000000 },
153 { 0x000010fc, 0x00000000 },
154 { 0x0000113c, 0x00000000 },
155 { 0x0000117c, 0x00000000 },
156 { 0x000011bc, 0x00000000 },
157 { 0x000011fc, 0x00000000 },
158 { 0x0000123c, 0x00000000 },
159 { 0x0000127c, 0x00000000 },
160 { 0x000012bc, 0x00000000 },
161 { 0x000012fc, 0x00000000 },
162 { 0x0000133c, 0x00000000 },
163 { 0x0000137c, 0x00000000 },
164 { 0x000013bc, 0x00000000 },
165 { 0x000013fc, 0x00000000 },
166 { 0x0000143c, 0x00000000 },
167 { 0x0000147c, 0x00000000 },
168 { 0x00004030, 0x00000002 },
169 { 0x0000403c, 0x00000002 },
170 { 0x00007010, 0x00000000 },
171 { 0x00007038, 0x000004c2 },
172 { 0x00008004, 0x00000000 },
173 { 0x00008008, 0x00000000 },
174 { 0x0000800c, 0x00000000 },
175 { 0x00008018, 0x00000700 },
176 { 0x00008020, 0x00000000 },
177 { 0x00008038, 0x00000000 },
178 { 0x0000803c, 0x00000000 },
179 { 0x00008048, 0x40000000 },
180 { 0x00008054, 0x00000000 },
181 { 0x00008058, 0x00000000 },
182 { 0x0000805c, 0x000fc78f },
183 { 0x00008060, 0x0000000f },
184 { 0x00008064, 0x00000000 },
185 { 0x000080c0, 0x2a82301a },
186 { 0x000080c4, 0x05dc01e0 },
187 { 0x000080c8, 0x1f402710 },
188 { 0x000080cc, 0x01f40000 },
189 { 0x000080d0, 0x00001e00 },
190 { 0x000080d4, 0x00000000 },
191 { 0x000080d8, 0x00400000 },
192 { 0x000080e0, 0xffffffff },
193 { 0x000080e4, 0x0000ffff },
194 { 0x000080e8, 0x003f3f3f },
195 { 0x000080ec, 0x00000000 },
196 { 0x000080f0, 0x00000000 },
197 { 0x000080f4, 0x00000000 },
198 { 0x000080f8, 0x00000000 },
199 { 0x000080fc, 0x00020000 },
200 { 0x00008100, 0x00020000 },
201 { 0x00008104, 0x00000001 },
202 { 0x00008108, 0x00000052 },
203 { 0x0000810c, 0x00000000 },
204 { 0x00008110, 0x00000168 },
205 { 0x00008118, 0x000100aa },
206 { 0x0000811c, 0x00003210 },
207 { 0x00008124, 0x00000000 },
208 { 0x00008128, 0x00000000 },
209 { 0x0000812c, 0x00000000 },
210 { 0x00008130, 0x00000000 },
211 { 0x00008134, 0x00000000 },
212 { 0x00008138, 0x00000000 },
213 { 0x0000813c, 0x00000000 },
214 { 0x00008144, 0xffffffff },
215 { 0x00008168, 0x00000000 },
216 { 0x0000816c, 0x00000000 },
217 { 0x00008170, 0x32143320 },
218 { 0x00008174, 0xfaa4fa50 },
219 { 0x00008178, 0x00000100 },
220 { 0x0000817c, 0x00000000 },
221 { 0x000081c4, 0x00000000 },
222 { 0x000081ec, 0x00000000 },
223 { 0x000081f0, 0x00000000 },
224 { 0x000081f4, 0x00000000 },
225 { 0x000081f8, 0x00000000 },
226 { 0x000081fc, 0x00000000 },
227 { 0x00008200, 0x00000000 },
228 { 0x00008204, 0x00000000 },
229 { 0x00008208, 0x00000000 },
230 { 0x0000820c, 0x00000000 },
231 { 0x00008210, 0x00000000 },
232 { 0x00008214, 0x00000000 },
233 { 0x00008218, 0x00000000 },
234 { 0x0000821c, 0x00000000 },
235 { 0x00008220, 0x00000000 },
236 { 0x00008224, 0x00000000 },
237 { 0x00008228, 0x00000000 },
238 { 0x0000822c, 0x00000000 },
239 { 0x00008230, 0x00000000 },
240 { 0x00008234, 0x00000000 },
241 { 0x00008238, 0x00000000 },
242 { 0x0000823c, 0x00000000 },
243 { 0x00008240, 0x00100000 },
244 { 0x00008244, 0x0010f400 },
245 { 0x00008248, 0x00000100 },
246 { 0x0000824c, 0x0001e800 },
247 { 0x00008250, 0x00000000 },
248 { 0x00008254, 0x00000000 },
249 { 0x00008258, 0x00000000 },
250 { 0x0000825c, 0x400000ff },
251 { 0x00008260, 0x00080922 },
252 { 0x00008264, 0x88000010 },
253 { 0x00008270, 0x00000000 },
254 { 0x00008274, 0x40000000 },
255 { 0x00008278, 0x003e4180 },
256 { 0x0000827c, 0x00000000 },
257 { 0x00008284, 0x0000002c },
258 { 0x00008288, 0x0000002c },
259 { 0x0000828c, 0x00000000 },
260 { 0x00008294, 0x00000000 },
261 { 0x00008298, 0x00000000 },
262 { 0x00008300, 0x00000000 },
263 { 0x00008304, 0x00000000 },
264 { 0x00008308, 0x00000000 },
265 { 0x0000830c, 0x00000000 },
266 { 0x00008310, 0x00000000 },
267 { 0x00008314, 0x00000000 },
268 { 0x00008318, 0x00000000 },
269 { 0x00008328, 0x00000000 },
270 { 0x0000832c, 0x00000007 },
271 { 0x00008330, 0x00000302 },
272 { 0x00008334, 0x00000e00 },
273 { 0x00008338, 0x00070000 },
274 { 0x0000833c, 0x00000000 },
275 { 0x00008340, 0x000107ff },
276 { 0x00009808, 0x00000000 },
277 { 0x0000980c, 0xad848e19 },
278 { 0x00009810, 0x7d14e000 },
279 { 0x00009814, 0x9c0a9f6b },
280 { 0x0000981c, 0x00000000 },
281 { 0x0000982c, 0x0000a000 },
282 { 0x00009830, 0x00000000 },
283 { 0x0000983c, 0x00200400 },
284 { 0x00009840, 0x206a002e },
285 { 0x0000984c, 0x1284233c },
286 { 0x00009854, 0x00000859 },
287 { 0x00009900, 0x00000000 },
288 { 0x00009904, 0x00000000 },
289 { 0x00009908, 0x00000000 },
290 { 0x0000990c, 0x00000000 },
291 { 0x0000991c, 0x10000fff },
292 { 0x00009920, 0x05100000 },
293 { 0x0000a920, 0x05100000 },
294 { 0x0000b920, 0x05100000 },
295 { 0x00009928, 0x00000001 },
296 { 0x0000992c, 0x00000004 },
297 { 0x00009934, 0x1e1f2022 },
298 { 0x00009938, 0x0a0b0c0d },
299 { 0x0000993c, 0x00000000 },
300 { 0x00009948, 0x9280b212 },
301 { 0x0000994c, 0x00020028 },
302 { 0x00009954, 0x5d50e188 },
303 { 0x00009958, 0x00081fff },
304 { 0x0000c95c, 0x004b6a8e },
305 { 0x0000c968, 0x000003ce },
306 { 0x00009970, 0x190fb515 },
307 { 0x00009974, 0x00000000 },
308 { 0x00009978, 0x00000001 },
309 { 0x0000997c, 0x00000000 },
310 { 0x00009980, 0x00000000 },
311 { 0x00009984, 0x00000000 },
312 { 0x00009988, 0x00000000 },
313 { 0x0000998c, 0x00000000 },
314 { 0x00009990, 0x00000000 },
315 { 0x00009994, 0x00000000 },
316 { 0x00009998, 0x00000000 },
317 { 0x0000999c, 0x00000000 },
318 { 0x000099a0, 0x00000000 },
319 { 0x000099a4, 0x00000001 },
320 { 0x000099a8, 0x001fff00 },
321 { 0x000099ac, 0x00000000 },
322 { 0x000099b0, 0x03051000 },
323 { 0x000099dc, 0x00000000 },
324 { 0x000099e0, 0x00000200 },
325 { 0x000099e4, 0xaaaaaaaa },
326 { 0x000099e8, 0x3c466478 },
327 { 0x000099ec, 0x000000aa },
328 { 0x000099fc, 0x00001042 },
329 { 0x00009b00, 0x00000000 },
330 { 0x00009b04, 0x00000001 },
331 { 0x00009b08, 0x00000002 },
332 { 0x00009b0c, 0x00000003 },
333 { 0x00009b10, 0x00000004 },
334 { 0x00009b14, 0x00000005 },
335 { 0x00009b18, 0x00000008 },
336 { 0x00009b1c, 0x00000009 },
337 { 0x00009b20, 0x0000000a },
338 { 0x00009b24, 0x0000000b },
339 { 0x00009b28, 0x0000000c },
340 { 0x00009b2c, 0x0000000d },
341 { 0x00009b30, 0x00000010 },
342 { 0x00009b34, 0x00000011 },
343 { 0x00009b38, 0x00000012 },
344 { 0x00009b3c, 0x00000013 },
345 { 0x00009b40, 0x00000014 },
346 { 0x00009b44, 0x00000015 },
347 { 0x00009b48, 0x00000018 },
348 { 0x00009b4c, 0x00000019 },
349 { 0x00009b50, 0x0000001a },
350 { 0x00009b54, 0x0000001b },
351 { 0x00009b58, 0x0000001c },
352 { 0x00009b5c, 0x0000001d },
353 { 0x00009b60, 0x00000020 },
354 { 0x00009b64, 0x00000021 },
355 { 0x00009b68, 0x00000022 },
356 { 0x00009b6c, 0x00000023 },
357 { 0x00009b70, 0x00000024 },
358 { 0x00009b74, 0x00000025 },
359 { 0x00009b78, 0x00000028 },
360 { 0x00009b7c, 0x00000029 },
361 { 0x00009b80, 0x0000002a },
362 { 0x00009b84, 0x0000002b },
363 { 0x00009b88, 0x0000002c },
364 { 0x00009b8c, 0x0000002d },
365 { 0x00009b90, 0x00000030 },
366 { 0x00009b94, 0x00000031 },
367 { 0x00009b98, 0x00000032 },
368 { 0x00009b9c, 0x00000033 },
369 { 0x00009ba0, 0x00000034 },
370 { 0x00009ba4, 0x00000035 },
371 { 0x00009ba8, 0x00000035 },
372 { 0x00009bac, 0x00000035 },
373 { 0x00009bb0, 0x00000035 },
374 { 0x00009bb4, 0x00000035 },
375 { 0x00009bb8, 0x00000035 },
376 { 0x00009bbc, 0x00000035 },
377 { 0x00009bc0, 0x00000035 },
378 { 0x00009bc4, 0x00000035 },
379 { 0x00009bc8, 0x00000035 },
380 { 0x00009bcc, 0x00000035 },
381 { 0x00009bd0, 0x00000035 },
382 { 0x00009bd4, 0x00000035 },
383 { 0x00009bd8, 0x00000035 },
384 { 0x00009bdc, 0x00000035 },
385 { 0x00009be0, 0x00000035 },
386 { 0x00009be4, 0x00000035 },
387 { 0x00009be8, 0x00000035 },
388 { 0x00009bec, 0x00000035 },
389 { 0x00009bf0, 0x00000035 },
390 { 0x00009bf4, 0x00000035 },
391 { 0x00009bf8, 0x00000010 },
392 { 0x00009bfc, 0x0000001a },
393 { 0x0000a210, 0x40806333 },
394 { 0x0000a214, 0x00106c10 },
395 { 0x0000a218, 0x009c4060 },
396 { 0x0000a220, 0x018830c6 },
397 { 0x0000a224, 0x00000400 },
398 { 0x0000a228, 0x00000bb5 },
399 { 0x0000a22c, 0x00000011 },
400 { 0x0000a234, 0x20202020 },
401 { 0x0000a238, 0x20202020 },
402 { 0x0000a23c, 0x13c889af },
403 { 0x0000a240, 0x38490a20 },
404 { 0x0000a244, 0x00007bb6 },
405 { 0x0000a248, 0x0fff3ffc },
406 { 0x0000a24c, 0x00000001 },
407 { 0x0000a250, 0x0000a000 },
408 { 0x0000a254, 0x00000000 },
409 { 0x0000a258, 0x0cc75380 },
410 { 0x0000a25c, 0x0f0f0f01 },
411 { 0x0000a260, 0xdfa91f01 },
412 { 0x0000a268, 0x00000000 },
413 { 0x0000a26c, 0x0e79e5c6 },
414 { 0x0000b26c, 0x0e79e5c6 },
415 { 0x0000c26c, 0x0e79e5c6 },
416 { 0x0000d270, 0x00820820 },
417 { 0x0000a278, 0x1ce739ce },
418 { 0x0000a27c, 0x051701ce },
419 { 0x0000a338, 0x00000000 },
420 { 0x0000a33c, 0x00000000 },
421 { 0x0000a340, 0x00000000 },
422 { 0x0000a344, 0x00000000 },
423 { 0x0000a348, 0x3fffffff },
424 { 0x0000a34c, 0x3fffffff },
425 { 0x0000a350, 0x3fffffff },
426 { 0x0000a354, 0x0003ffff },
427 { 0x0000a358, 0x79a8aa1f },
428 { 0x0000d35c, 0x07ffffef },
429 { 0x0000d360, 0x0fffffe7 },
430 { 0x0000d364, 0x17ffffe5 },
431 { 0x0000d368, 0x1fffffe4 },
432 { 0x0000d36c, 0x37ffffe3 },
433 { 0x0000d370, 0x3fffffe3 },
434 { 0x0000d374, 0x57ffffe3 },
435 { 0x0000d378, 0x5fffffe2 },
436 { 0x0000d37c, 0x7fffffe2 },
437 { 0x0000d380, 0x7f3c7bba },
438 { 0x0000d384, 0xf3307ff0 },
439 { 0x0000a388, 0x08000000 },
440 { 0x0000a38c, 0x20202020 },
441 { 0x0000a390, 0x20202020 },
442 { 0x0000a394, 0x1ce739ce },
443 { 0x0000a398, 0x000001ce },
444 { 0x0000a39c, 0x00000001 },
445 { 0x0000a3a0, 0x00000000 },
446 { 0x0000a3a4, 0x00000000 },
447 { 0x0000a3a8, 0x00000000 },
448 { 0x0000a3ac, 0x00000000 },
449 { 0x0000a3b0, 0x00000000 },
450 { 0x0000a3b4, 0x00000000 },
451 { 0x0000a3b8, 0x00000000 },
452 { 0x0000a3bc, 0x00000000 },
453 { 0x0000a3c0, 0x00000000 },
454 { 0x0000a3c4, 0x00000000 },
455 { 0x0000a3c8, 0x00000246 },
456 { 0x0000a3cc, 0x20202020 },
457 { 0x0000a3d0, 0x20202020 },
458 { 0x0000a3d4, 0x20202020 },
459 { 0x0000a3dc, 0x1ce739ce },
460 { 0x0000a3e0, 0x000001ce },
461};
462
463static const u32 ar5416Bank0[][2] = {
464 { 0x000098b0, 0x1e5795e5 },
465 { 0x000098e0, 0x02008020 },
466};
467
468static const u32 ar5416BB_RfGain[][3] = {
469 { 0x00009a00, 0x00000000, 0x00000000 },
470 { 0x00009a04, 0x00000040, 0x00000040 },
471 { 0x00009a08, 0x00000080, 0x00000080 },
472 { 0x00009a0c, 0x000001a1, 0x00000141 },
473 { 0x00009a10, 0x000001e1, 0x00000181 },
474 { 0x00009a14, 0x00000021, 0x000001c1 },
475 { 0x00009a18, 0x00000061, 0x00000001 },
476 { 0x00009a1c, 0x00000168, 0x00000041 },
477 { 0x00009a20, 0x000001a8, 0x000001a8 },
478 { 0x00009a24, 0x000001e8, 0x000001e8 },
479 { 0x00009a28, 0x00000028, 0x00000028 },
480 { 0x00009a2c, 0x00000068, 0x00000068 },
481 { 0x00009a30, 0x00000189, 0x000000a8 },
482 { 0x00009a34, 0x000001c9, 0x00000169 },
483 { 0x00009a38, 0x00000009, 0x000001a9 },
484 { 0x00009a3c, 0x00000049, 0x000001e9 },
485 { 0x00009a40, 0x00000089, 0x00000029 },
486 { 0x00009a44, 0x00000170, 0x00000069 },
487 { 0x00009a48, 0x000001b0, 0x00000190 },
488 { 0x00009a4c, 0x000001f0, 0x000001d0 },
489 { 0x00009a50, 0x00000030, 0x00000010 },
490 { 0x00009a54, 0x00000070, 0x00000050 },
491 { 0x00009a58, 0x00000191, 0x00000090 },
492 { 0x00009a5c, 0x000001d1, 0x00000151 },
493 { 0x00009a60, 0x00000011, 0x00000191 },
494 { 0x00009a64, 0x00000051, 0x000001d1 },
495 { 0x00009a68, 0x00000091, 0x00000011 },
496 { 0x00009a6c, 0x000001b8, 0x00000051 },
497 { 0x00009a70, 0x000001f8, 0x00000198 },
498 { 0x00009a74, 0x00000038, 0x000001d8 },
499 { 0x00009a78, 0x00000078, 0x00000018 },
500 { 0x00009a7c, 0x00000199, 0x00000058 },
501 { 0x00009a80, 0x000001d9, 0x00000098 },
502 { 0x00009a84, 0x00000019, 0x00000159 },
503 { 0x00009a88, 0x00000059, 0x00000199 },
504 { 0x00009a8c, 0x00000099, 0x000001d9 },
505 { 0x00009a90, 0x000000d9, 0x00000019 },
506 { 0x00009a94, 0x000000f9, 0x00000059 },
507 { 0x00009a98, 0x000000f9, 0x00000099 },
508 { 0x00009a9c, 0x000000f9, 0x000000d9 },
509 { 0x00009aa0, 0x000000f9, 0x000000f9 },
510 { 0x00009aa4, 0x000000f9, 0x000000f9 },
511 { 0x00009aa8, 0x000000f9, 0x000000f9 },
512 { 0x00009aac, 0x000000f9, 0x000000f9 },
513 { 0x00009ab0, 0x000000f9, 0x000000f9 },
514 { 0x00009ab4, 0x000000f9, 0x000000f9 },
515 { 0x00009ab8, 0x000000f9, 0x000000f9 },
516 { 0x00009abc, 0x000000f9, 0x000000f9 },
517 { 0x00009ac0, 0x000000f9, 0x000000f9 },
518 { 0x00009ac4, 0x000000f9, 0x000000f9 },
519 { 0x00009ac8, 0x000000f9, 0x000000f9 },
520 { 0x00009acc, 0x000000f9, 0x000000f9 },
521 { 0x00009ad0, 0x000000f9, 0x000000f9 },
522 { 0x00009ad4, 0x000000f9, 0x000000f9 },
523 { 0x00009ad8, 0x000000f9, 0x000000f9 },
524 { 0x00009adc, 0x000000f9, 0x000000f9 },
525 { 0x00009ae0, 0x000000f9, 0x000000f9 },
526 { 0x00009ae4, 0x000000f9, 0x000000f9 },
527 { 0x00009ae8, 0x000000f9, 0x000000f9 },
528 { 0x00009aec, 0x000000f9, 0x000000f9 },
529 { 0x00009af0, 0x000000f9, 0x000000f9 },
530 { 0x00009af4, 0x000000f9, 0x000000f9 },
531 { 0x00009af8, 0x000000f9, 0x000000f9 },
532 { 0x00009afc, 0x000000f9, 0x000000f9 },
533};
534
535static const u32 ar5416Bank1[][2] = {
536 { 0x000098b0, 0x02108421 },
537 { 0x000098ec, 0x00000008 },
538};
539
540static const u32 ar5416Bank2[][2] = {
541 { 0x000098b0, 0x0e73ff17 },
542 { 0x000098e0, 0x00000420 },
543};
544
545static const u32 ar5416Bank3[][3] = {
546 { 0x000098f0, 0x01400018, 0x01c00018 },
547};
548
549static const u32 ar5416Bank6[][3] = {
550
551 { 0x0000989c, 0x00000000, 0x00000000 },
552 { 0x0000989c, 0x00000000, 0x00000000 },
553 { 0x0000989c, 0x00000000, 0x00000000 },
554 { 0x0000989c, 0x00e00000, 0x00e00000 },
555 { 0x0000989c, 0x005e0000, 0x005e0000 },
556 { 0x0000989c, 0x00120000, 0x00120000 },
557 { 0x0000989c, 0x00620000, 0x00620000 },
558 { 0x0000989c, 0x00020000, 0x00020000 },
559 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
560 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
561 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
562 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
563 { 0x0000989c, 0x005f0000, 0x005f0000 },
564 { 0x0000989c, 0x00870000, 0x00870000 },
565 { 0x0000989c, 0x00f90000, 0x00f90000 },
566 { 0x0000989c, 0x007b0000, 0x007b0000 },
567 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
568 { 0x0000989c, 0x00f50000, 0x00f50000 },
569 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
570 { 0x0000989c, 0x00110000, 0x00110000 },
571 { 0x0000989c, 0x006100a8, 0x006100a8 },
572 { 0x0000989c, 0x004210a2, 0x004210a2 },
573 { 0x0000989c, 0x0014008f, 0x0014008f },
574 { 0x0000989c, 0x00c40003, 0x00c40003 },
575 { 0x0000989c, 0x003000f2, 0x003000f2 },
576 { 0x0000989c, 0x00440016, 0x00440016 },
577 { 0x0000989c, 0x00410040, 0x00410040 },
578 { 0x0000989c, 0x0001805e, 0x0001805e },
579 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
580 { 0x0000989c, 0x000000f1, 0x000000f1 },
581 { 0x0000989c, 0x00002081, 0x00002081 },
582 { 0x0000989c, 0x000000d4, 0x000000d4 },
583 { 0x000098d0, 0x0000000f, 0x0010000f },
584};
585
586static const u32 ar5416Bank6TPC[][3] = {
587 { 0x0000989c, 0x00000000, 0x00000000 },
588 { 0x0000989c, 0x00000000, 0x00000000 },
589 { 0x0000989c, 0x00000000, 0x00000000 },
590 { 0x0000989c, 0x00e00000, 0x00e00000 },
591 { 0x0000989c, 0x005e0000, 0x005e0000 },
592 { 0x0000989c, 0x00120000, 0x00120000 },
593 { 0x0000989c, 0x00620000, 0x00620000 },
594 { 0x0000989c, 0x00020000, 0x00020000 },
595 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
596 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
597 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
598 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
599 { 0x0000989c, 0x005f0000, 0x005f0000 },
600 { 0x0000989c, 0x00870000, 0x00870000 },
601 { 0x0000989c, 0x00f90000, 0x00f90000 },
602 { 0x0000989c, 0x007b0000, 0x007b0000 },
603 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
604 { 0x0000989c, 0x00f50000, 0x00f50000 },
605 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
606 { 0x0000989c, 0x00110000, 0x00110000 },
607 { 0x0000989c, 0x006100a8, 0x006100a8 },
608 { 0x0000989c, 0x00423022, 0x00423022 },
609 { 0x0000989c, 0x201400df, 0x201400df },
610 { 0x0000989c, 0x00c40002, 0x00c40002 },
611 { 0x0000989c, 0x003000f2, 0x003000f2 },
612 { 0x0000989c, 0x00440016, 0x00440016 },
613 { 0x0000989c, 0x00410040, 0x00410040 },
614 { 0x0000989c, 0x0001805e, 0x0001805e },
615 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
616 { 0x0000989c, 0x000000e1, 0x000000e1 },
617 { 0x0000989c, 0x00007081, 0x00007081 },
618 { 0x0000989c, 0x000000d4, 0x000000d4 },
619 { 0x000098d0, 0x0000000f, 0x0010000f },
620};
621
622static const u32 ar5416Bank7[][2] = {
623 { 0x0000989c, 0x00000500 },
624 { 0x0000989c, 0x00000800 },
625 { 0x000098cc, 0x0000000e },
626};
627
628static const u32 ar5416Addac[][2] = {
629 {0x0000989c, 0x00000000 },
630 {0x0000989c, 0x00000003 },
631 {0x0000989c, 0x00000000 },
632 {0x0000989c, 0x0000000c },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000030 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000000 },
644 {0x0000989c, 0x00000000 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000060 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000000 },
657 {0x0000989c, 0x00000000 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000058 },
661 {0x0000989c, 0x00000000 },
662 {0x0000989c, 0x00000000 },
663 {0x0000989c, 0x00000000 },
664 {0x0000989c, 0x00000000 },
665 {0x000098cc, 0x00000000 },
666};
667
668static const u32 ar5416Modes_9100[][6] = {
669 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
670 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
671 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
672 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
673 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
674 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
675 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
676 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
677 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
678 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
679 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
680 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
681 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
682 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
683 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
684 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
685 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
686 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
687 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
688 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
689 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
690 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
691 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
692 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
693 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
694 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
695 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
696 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
697 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
698 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
699#ifdef TB243
700 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
701 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
702 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
703 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
704#else
705 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
706 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
707 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
708 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
709#endif
710 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
711 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
712 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
713 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
714 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
715 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
716 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
717 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
718 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
719 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
720 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
721 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
722 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
723 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
724 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
725 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
726 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
727 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
728 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
729 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
730 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
731 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
732 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
733 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
734 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
735 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
736 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
737 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
738 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
739 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
740};
741
742#endif /* INITVALS_AR5008_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
new file mode 100644
index 000000000000..b2c17c98bb38
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -0,0 +1,1374 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "../regd.h"
20#include "ar9002_phy.h"
21
22/* All code below is for non single-chip solutions */
23
24/**
25 * ar5008_hw_phy_modify_rx_buffer() - perform analog swizzling of parameters
26 * @rfbuf:
27 * @reg32:
28 * @numBits:
29 * @firstBit:
30 * @column:
31 *
32 * Performs analog "swizzling" of parameters into their location.
33 * Used on external AR2133/AR5133 radios.
34 */
35static void ar5008_hw_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
36 u32 numBits, u32 firstBit,
37 u32 column)
38{
39 u32 tmp32, mask, arrayEntry, lastBit;
40 int32_t bitPosition, bitsLeft;
41
42 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
43 arrayEntry = (firstBit - 1) / 8;
44 bitPosition = (firstBit - 1) % 8;
45 bitsLeft = numBits;
46 while (bitsLeft > 0) {
47 lastBit = (bitPosition + bitsLeft > 8) ?
48 8 : bitPosition + bitsLeft;
49 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
50 (column * 8);
51 rfBuf[arrayEntry] &= ~mask;
52 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
53 (column * 8)) & mask;
54 bitsLeft -= 8 - bitPosition;
55 tmp32 = tmp32 >> (8 - bitPosition);
56 bitPosition = 0;
57 arrayEntry++;
58 }
59}
60
61/*
62 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
63 * rf_pwd_icsyndiv.
64 *
65 * Theoretical Rules:
66 * if 2 GHz band
67 * if forceBiasAuto
68 * if synth_freq < 2412
69 * bias = 0
70 * else if 2412 <= synth_freq <= 2422
71 * bias = 1
72 * else // synth_freq > 2422
73 * bias = 2
74 * else if forceBias > 0
75 * bias = forceBias & 7
76 * else
77 * no change, use value from ini file
78 * else
79 * no change, invalid band
80 *
81 * 1st Mod:
82 * 2422 also uses value of 2
83 * <approved>
84 *
85 * 2nd Mod:
86 * Less than 2412 uses value of 0, 2412 and above uses value of 2
87 */
88static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
89{
90 struct ath_common *common = ath9k_hw_common(ah);
91 u32 tmp_reg;
92 int reg_writes = 0;
93 u32 new_bias = 0;
94
95 if (!AR_SREV_5416(ah) || synth_freq >= 3000)
96 return;
97
98 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
99
100 if (synth_freq < 2412)
101 new_bias = 0;
102 else if (synth_freq < 2422)
103 new_bias = 1;
104 else
105 new_bias = 2;
106
107 /* pre-reverse this field */
108 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
109
110 ath_print(common, ATH_DBG_CONFIG,
111 "Force rf_pwd_icsyndiv to %1d on %4d\n",
112 new_bias, synth_freq);
113
114 /* swizzle rf_pwd_icsyndiv */
115 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
116
117 /* write Bank 6 with new params */
118 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
119}
120
121/**
122 * ar5008_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
123 * @ah: atheros hardware stucture
124 * @chan:
125 *
126 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
127 * the channel value. Assumes writes enabled to analog bus and bank6 register
128 * cache in ah->analogBank6Data.
129 */
130static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
131{
132 struct ath_common *common = ath9k_hw_common(ah);
133 u32 channelSel = 0;
134 u32 bModeSynth = 0;
135 u32 aModeRefSel = 0;
136 u32 reg32 = 0;
137 u16 freq;
138 struct chan_centers centers;
139
140 ath9k_hw_get_channel_centers(ah, chan, &centers);
141 freq = centers.synth_center;
142
143 if (freq < 4800) {
144 u32 txctl;
145
146 if (((freq - 2192) % 5) == 0) {
147 channelSel = ((freq - 672) * 2 - 3040) / 10;
148 bModeSynth = 0;
149 } else if (((freq - 2224) % 5) == 0) {
150 channelSel = ((freq - 704) * 2 - 3040) / 10;
151 bModeSynth = 1;
152 } else {
153 ath_print(common, ATH_DBG_FATAL,
154 "Invalid channel %u MHz\n", freq);
155 return -EINVAL;
156 }
157
158 channelSel = (channelSel << 2) & 0xff;
159 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
160
161 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
162 if (freq == 2484) {
163
164 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
165 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
166 } else {
167 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
168 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
169 }
170
171 } else if ((freq % 20) == 0 && freq >= 5120) {
172 channelSel =
173 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
174 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
175 } else if ((freq % 10) == 0) {
176 channelSel =
177 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
178 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
179 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
180 else
181 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
182 } else if ((freq % 5) == 0) {
183 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
184 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
185 } else {
186 ath_print(common, ATH_DBG_FATAL,
187 "Invalid channel %u MHz\n", freq);
188 return -EINVAL;
189 }
190
191 ar5008_hw_force_bias(ah, freq);
192
193 reg32 =
194 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
195 (1 << 5) | 0x1;
196
197 REG_WRITE(ah, AR_PHY(0x37), reg32);
198
199 ah->curchan = chan;
200 ah->curchan_rad_index = -1;
201
202 return 0;
203}
204
205/**
206 * ar5008_hw_spur_mitigate - convert baseband spur frequency for external radios
207 * @ah: atheros hardware structure
208 * @chan:
209 *
210 * For non single-chip solutions. Converts to baseband spur frequency given the
211 * input channel frequency and compute register settings below.
212 */
213static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
214 struct ath9k_channel *chan)
215{
216 int bb_spur = AR_NO_SPUR;
217 int bin, cur_bin;
218 int spur_freq_sd;
219 int spur_delta_phase;
220 int denominator;
221 int upper, lower, cur_vit_mask;
222 int tmp, new;
223 int i;
224 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
225 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
226 };
227 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
228 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
229 };
230 int inc[4] = { 0, 100, 0, 0 };
231
232 int8_t mask_m[123];
233 int8_t mask_p[123];
234 int8_t mask_amt;
235 int tmp_mask;
236 int cur_bb_spur;
237 bool is2GHz = IS_CHAN_2GHZ(chan);
238
239 memset(&mask_m, 0, sizeof(int8_t) * 123);
240 memset(&mask_p, 0, sizeof(int8_t) * 123);
241
242 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
243 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
244 if (AR_NO_SPUR == cur_bb_spur)
245 break;
246 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
247 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
248 bb_spur = cur_bb_spur;
249 break;
250 }
251 }
252
253 if (AR_NO_SPUR == bb_spur)
254 return;
255
256 bin = bb_spur * 32;
257
258 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
259 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
260 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
261 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
262 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
263
264 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
265
266 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
267 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
268 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
269 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
270 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
271 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
272
273 spur_delta_phase = ((bb_spur * 524288) / 100) &
274 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
275
276 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
277 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
278
279 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
280 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
281 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
282 REG_WRITE(ah, AR_PHY_TIMING11, new);
283
284 cur_bin = -6000;
285 upper = bin + 100;
286 lower = bin - 100;
287
288 for (i = 0; i < 4; i++) {
289 int pilot_mask = 0;
290 int chan_mask = 0;
291 int bp = 0;
292 for (bp = 0; bp < 30; bp++) {
293 if ((cur_bin > lower) && (cur_bin < upper)) {
294 pilot_mask = pilot_mask | 0x1 << bp;
295 chan_mask = chan_mask | 0x1 << bp;
296 }
297 cur_bin += 100;
298 }
299 cur_bin += inc[i];
300 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
301 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
302 }
303
304 cur_vit_mask = 6100;
305 upper = bin + 120;
306 lower = bin - 120;
307
308 for (i = 0; i < 123; i++) {
309 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
310
311 /* workaround for gcc bug #37014 */
312 volatile int tmp_v = abs(cur_vit_mask - bin);
313
314 if (tmp_v < 75)
315 mask_amt = 1;
316 else
317 mask_amt = 0;
318 if (cur_vit_mask < 0)
319 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
320 else
321 mask_p[cur_vit_mask / 100] = mask_amt;
322 }
323 cur_vit_mask -= 100;
324 }
325
326 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
327 | (mask_m[48] << 26) | (mask_m[49] << 24)
328 | (mask_m[50] << 22) | (mask_m[51] << 20)
329 | (mask_m[52] << 18) | (mask_m[53] << 16)
330 | (mask_m[54] << 14) | (mask_m[55] << 12)
331 | (mask_m[56] << 10) | (mask_m[57] << 8)
332 | (mask_m[58] << 6) | (mask_m[59] << 4)
333 | (mask_m[60] << 2) | (mask_m[61] << 0);
334 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
335 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
336
337 tmp_mask = (mask_m[31] << 28)
338 | (mask_m[32] << 26) | (mask_m[33] << 24)
339 | (mask_m[34] << 22) | (mask_m[35] << 20)
340 | (mask_m[36] << 18) | (mask_m[37] << 16)
341 | (mask_m[48] << 14) | (mask_m[39] << 12)
342 | (mask_m[40] << 10) | (mask_m[41] << 8)
343 | (mask_m[42] << 6) | (mask_m[43] << 4)
344 | (mask_m[44] << 2) | (mask_m[45] << 0);
345 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
346 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
347
348 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
349 | (mask_m[18] << 26) | (mask_m[18] << 24)
350 | (mask_m[20] << 22) | (mask_m[20] << 20)
351 | (mask_m[22] << 18) | (mask_m[22] << 16)
352 | (mask_m[24] << 14) | (mask_m[24] << 12)
353 | (mask_m[25] << 10) | (mask_m[26] << 8)
354 | (mask_m[27] << 6) | (mask_m[28] << 4)
355 | (mask_m[29] << 2) | (mask_m[30] << 0);
356 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
357 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
358
359 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
360 | (mask_m[2] << 26) | (mask_m[3] << 24)
361 | (mask_m[4] << 22) | (mask_m[5] << 20)
362 | (mask_m[6] << 18) | (mask_m[7] << 16)
363 | (mask_m[8] << 14) | (mask_m[9] << 12)
364 | (mask_m[10] << 10) | (mask_m[11] << 8)
365 | (mask_m[12] << 6) | (mask_m[13] << 4)
366 | (mask_m[14] << 2) | (mask_m[15] << 0);
367 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
368 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
369
370 tmp_mask = (mask_p[15] << 28)
371 | (mask_p[14] << 26) | (mask_p[13] << 24)
372 | (mask_p[12] << 22) | (mask_p[11] << 20)
373 | (mask_p[10] << 18) | (mask_p[9] << 16)
374 | (mask_p[8] << 14) | (mask_p[7] << 12)
375 | (mask_p[6] << 10) | (mask_p[5] << 8)
376 | (mask_p[4] << 6) | (mask_p[3] << 4)
377 | (mask_p[2] << 2) | (mask_p[1] << 0);
378 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
379 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
380
381 tmp_mask = (mask_p[30] << 28)
382 | (mask_p[29] << 26) | (mask_p[28] << 24)
383 | (mask_p[27] << 22) | (mask_p[26] << 20)
384 | (mask_p[25] << 18) | (mask_p[24] << 16)
385 | (mask_p[23] << 14) | (mask_p[22] << 12)
386 | (mask_p[21] << 10) | (mask_p[20] << 8)
387 | (mask_p[19] << 6) | (mask_p[18] << 4)
388 | (mask_p[17] << 2) | (mask_p[16] << 0);
389 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
390 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
391
392 tmp_mask = (mask_p[45] << 28)
393 | (mask_p[44] << 26) | (mask_p[43] << 24)
394 | (mask_p[42] << 22) | (mask_p[41] << 20)
395 | (mask_p[40] << 18) | (mask_p[39] << 16)
396 | (mask_p[38] << 14) | (mask_p[37] << 12)
397 | (mask_p[36] << 10) | (mask_p[35] << 8)
398 | (mask_p[34] << 6) | (mask_p[33] << 4)
399 | (mask_p[32] << 2) | (mask_p[31] << 0);
400 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
401 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
402
403 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
404 | (mask_p[59] << 26) | (mask_p[58] << 24)
405 | (mask_p[57] << 22) | (mask_p[56] << 20)
406 | (mask_p[55] << 18) | (mask_p[54] << 16)
407 | (mask_p[53] << 14) | (mask_p[52] << 12)
408 | (mask_p[51] << 10) | (mask_p[50] << 8)
409 | (mask_p[49] << 6) | (mask_p[48] << 4)
410 | (mask_p[47] << 2) | (mask_p[46] << 0);
411 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
412 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
413}
414
415/**
416 * ar5008_hw_rf_alloc_ext_banks - allocates banks for external radio programming
417 * @ah: atheros hardware structure
418 *
419 * Only required for older devices with external AR2133/AR5133 radios.
420 */
421static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
422{
423#define ATH_ALLOC_BANK(bank, size) do { \
424 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
425 if (!bank) { \
426 ath_print(common, ATH_DBG_FATAL, \
427 "Cannot allocate RF banks\n"); \
428 return -ENOMEM; \
429 } \
430 } while (0);
431
432 struct ath_common *common = ath9k_hw_common(ah);
433
434 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
435
436 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
437 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
438 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
439 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
440 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
441 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
442 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
443 ATH_ALLOC_BANK(ah->addac5416_21,
444 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
445 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
446
447 return 0;
448#undef ATH_ALLOC_BANK
449}
450
451
452/**
453 * ar5008_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
454 * @ah: atheros hardware struture
455 * For the external AR2133/AR5133 radios banks.
456 */
457static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
458{
459#define ATH_FREE_BANK(bank) do { \
460 kfree(bank); \
461 bank = NULL; \
462 } while (0);
463
464 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
465
466 ATH_FREE_BANK(ah->analogBank0Data);
467 ATH_FREE_BANK(ah->analogBank1Data);
468 ATH_FREE_BANK(ah->analogBank2Data);
469 ATH_FREE_BANK(ah->analogBank3Data);
470 ATH_FREE_BANK(ah->analogBank6Data);
471 ATH_FREE_BANK(ah->analogBank6TPCData);
472 ATH_FREE_BANK(ah->analogBank7Data);
473 ATH_FREE_BANK(ah->addac5416_21);
474 ATH_FREE_BANK(ah->bank6Temp);
475
476#undef ATH_FREE_BANK
477}
478
479/* *
480 * ar5008_hw_set_rf_regs - programs rf registers based on EEPROM
481 * @ah: atheros hardware structure
482 * @chan:
483 * @modesIndex:
484 *
485 * Used for the external AR2133/AR5133 radios.
486 *
487 * Reads the EEPROM header info from the device structure and programs
488 * all rf registers. This routine requires access to the analog
489 * rf device. This is not required for single-chip devices.
490 */
491static bool ar5008_hw_set_rf_regs(struct ath_hw *ah,
492 struct ath9k_channel *chan,
493 u16 modesIndex)
494{
495 u32 eepMinorRev;
496 u32 ob5GHz = 0, db5GHz = 0;
497 u32 ob2GHz = 0, db2GHz = 0;
498 int regWrites = 0;
499
500 /*
501 * Software does not need to program bank data
502 * for single chip devices, that is AR9280 or anything
503 * after that.
504 */
505 if (AR_SREV_9280_10_OR_LATER(ah))
506 return true;
507
508 /* Setup rf parameters */
509 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
510
511 /* Setup Bank 0 Write */
512 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
513
514 /* Setup Bank 1 Write */
515 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
516
517 /* Setup Bank 2 Write */
518 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
519
520 /* Setup Bank 6 Write */
521 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
522 modesIndex);
523 {
524 int i;
525 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
526 ah->analogBank6Data[i] =
527 INI_RA(&ah->iniBank6TPC, i, modesIndex);
528 }
529 }
530
531 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
532 if (eepMinorRev >= 2) {
533 if (IS_CHAN_2GHZ(chan)) {
534 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
535 db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
536 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
537 ob2GHz, 3, 197, 0);
538 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
539 db2GHz, 3, 194, 0);
540 } else {
541 ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
542 db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
543 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
544 ob5GHz, 3, 203, 0);
545 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data,
546 db5GHz, 3, 200, 0);
547 }
548 }
549
550 /* Setup Bank 7 Setup */
551 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
552
553 /* Write Analog registers */
554 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
555 regWrites);
556 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
557 regWrites);
558 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
559 regWrites);
560 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
561 regWrites);
562 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
563 regWrites);
564 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
565 regWrites);
566
567 return true;
568}
569
570static void ar5008_hw_init_bb(struct ath_hw *ah,
571 struct ath9k_channel *chan)
572{
573 u32 synthDelay;
574
575 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
576 if (IS_CHAN_B(chan))
577 synthDelay = (4 * synthDelay) / 22;
578 else
579 synthDelay /= 10;
580
581 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
582
583 udelay(synthDelay + BASE_ACTIVATE_DELAY);
584}
585
586static void ar5008_hw_init_chain_masks(struct ath_hw *ah)
587{
588 int rx_chainmask, tx_chainmask;
589
590 rx_chainmask = ah->rxchainmask;
591 tx_chainmask = ah->txchainmask;
592
593 ENABLE_REGWRITE_BUFFER(ah);
594
595 switch (rx_chainmask) {
596 case 0x5:
597 DISABLE_REGWRITE_BUFFER(ah);
598 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
599 AR_PHY_SWAP_ALT_CHAIN);
600 ENABLE_REGWRITE_BUFFER(ah);
601 case 0x3:
602 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
603 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
604 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
605 break;
606 }
607 case 0x1:
608 case 0x2:
609 case 0x7:
610 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
611 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
612 break;
613 default:
614 break;
615 }
616
617 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
618
619 REGWRITE_BUFFER_FLUSH(ah);
620 DISABLE_REGWRITE_BUFFER(ah);
621
622 if (tx_chainmask == 0x5) {
623 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
624 AR_PHY_SWAP_ALT_CHAIN);
625 }
626 if (AR_SREV_9100(ah))
627 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
628 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
629}
630
631static void ar5008_hw_override_ini(struct ath_hw *ah,
632 struct ath9k_channel *chan)
633{
634 u32 val;
635
636 /*
637 * Set the RX_ABORT and RX_DIS and clear if off only after
638 * RXE is set for MAC. This prevents frames with corrupted
639 * descriptor status.
640 */
641 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
642
643 if (AR_SREV_9280_10_OR_LATER(ah)) {
644 val = REG_READ(ah, AR_PCU_MISC_MODE2);
645
646 if (!AR_SREV_9271(ah))
647 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
648
649 if (AR_SREV_9287_10_OR_LATER(ah))
650 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
651
652 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
653 }
654
655 if (!AR_SREV_5416_20_OR_LATER(ah) ||
656 AR_SREV_9280_10_OR_LATER(ah))
657 return;
658 /*
659 * Disable BB clock gating
660 * Necessary to avoid issues on AR5416 2.0
661 */
662 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
663
664 /*
665 * Disable RIFS search on some chips to avoid baseband
666 * hang issues.
667 */
668 if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
669 val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
670 val &= ~AR_PHY_RIFS_INIT_DELAY;
671 REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
672 }
673}
674
675static void ar5008_hw_set_channel_regs(struct ath_hw *ah,
676 struct ath9k_channel *chan)
677{
678 u32 phymode;
679 u32 enableDacFifo = 0;
680
681 if (AR_SREV_9285_10_OR_LATER(ah))
682 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
683 AR_PHY_FC_ENABLE_DAC_FIFO);
684
685 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
686 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
687
688 if (IS_CHAN_HT40(chan)) {
689 phymode |= AR_PHY_FC_DYN2040_EN;
690
691 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
692 (chan->chanmode == CHANNEL_G_HT40PLUS))
693 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
694
695 }
696 REG_WRITE(ah, AR_PHY_TURBO, phymode);
697
698 ath9k_hw_set11nmac2040(ah);
699
700 ENABLE_REGWRITE_BUFFER(ah);
701
702 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
703 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
704
705 REGWRITE_BUFFER_FLUSH(ah);
706 DISABLE_REGWRITE_BUFFER(ah);
707}
708
709
710static int ar5008_hw_process_ini(struct ath_hw *ah,
711 struct ath9k_channel *chan)
712{
713 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
714 int i, regWrites = 0;
715 struct ieee80211_channel *channel = chan->chan;
716 u32 modesIndex, freqIndex;
717
718 switch (chan->chanmode) {
719 case CHANNEL_A:
720 case CHANNEL_A_HT20:
721 modesIndex = 1;
722 freqIndex = 1;
723 break;
724 case CHANNEL_A_HT40PLUS:
725 case CHANNEL_A_HT40MINUS:
726 modesIndex = 2;
727 freqIndex = 1;
728 break;
729 case CHANNEL_G:
730 case CHANNEL_G_HT20:
731 case CHANNEL_B:
732 modesIndex = 4;
733 freqIndex = 2;
734 break;
735 case CHANNEL_G_HT40PLUS:
736 case CHANNEL_G_HT40MINUS:
737 modesIndex = 3;
738 freqIndex = 2;
739 break;
740
741 default:
742 return -EINVAL;
743 }
744
745 if (AR_SREV_9287_12_OR_LATER(ah)) {
746 /* Enable ASYNC FIFO */
747 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
748 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
749 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
750 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
751 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
752 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
753 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
754 }
755
756 /*
757 * Set correct baseband to analog shift setting to
758 * access analog chips.
759 */
760 REG_WRITE(ah, AR_PHY(0), 0x00000007);
761
762 /* Write ADDAC shifts */
763 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
764 ah->eep_ops->set_addac(ah, chan);
765
766 if (AR_SREV_5416_22_OR_LATER(ah)) {
767 REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
768 } else {
769 struct ar5416IniArray temp;
770 u32 addacSize =
771 sizeof(u32) * ah->iniAddac.ia_rows *
772 ah->iniAddac.ia_columns;
773
774 /* For AR5416 2.0/2.1 */
775 memcpy(ah->addac5416_21,
776 ah->iniAddac.ia_array, addacSize);
777
778 /* override CLKDRV value at [row, column] = [31, 1] */
779 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
780
781 temp.ia_array = ah->addac5416_21;
782 temp.ia_columns = ah->iniAddac.ia_columns;
783 temp.ia_rows = ah->iniAddac.ia_rows;
784 REG_WRITE_ARRAY(&temp, 1, regWrites);
785 }
786
787 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
788
789 ENABLE_REGWRITE_BUFFER(ah);
790
791 for (i = 0; i < ah->iniModes.ia_rows; i++) {
792 u32 reg = INI_RA(&ah->iniModes, i, 0);
793 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
794
795 if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
796 val &= ~AR_AN_TOP2_PWDCLKIND;
797
798 REG_WRITE(ah, reg, val);
799
800 if (reg >= 0x7800 && reg < 0x78a0
801 && ah->config.analog_shiftreg) {
802 udelay(100);
803 }
804
805 DO_DELAY(regWrites);
806 }
807
808 REGWRITE_BUFFER_FLUSH(ah);
809 DISABLE_REGWRITE_BUFFER(ah);
810
811 if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
812 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
813
814 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
815 AR_SREV_9287_10_OR_LATER(ah))
816 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
817
818 if (AR_SREV_9271_10(ah))
819 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
820 modesIndex, regWrites);
821
822 ENABLE_REGWRITE_BUFFER(ah);
823
824 /* Write common array parameters */
825 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
826 u32 reg = INI_RA(&ah->iniCommon, i, 0);
827 u32 val = INI_RA(&ah->iniCommon, i, 1);
828
829 REG_WRITE(ah, reg, val);
830
831 if (reg >= 0x7800 && reg < 0x78a0
832 && ah->config.analog_shiftreg) {
833 udelay(100);
834 }
835
836 DO_DELAY(regWrites);
837 }
838
839 REGWRITE_BUFFER_FLUSH(ah);
840 DISABLE_REGWRITE_BUFFER(ah);
841
842 if (AR_SREV_9271(ah)) {
843 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
844 REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
845 modesIndex, regWrites);
846 else
847 REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
848 modesIndex, regWrites);
849 }
850
851 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
852
853 if (IS_CHAN_A_FAST_CLOCK(ah, chan)) {
854 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
855 regWrites);
856 }
857
858 ar5008_hw_override_ini(ah, chan);
859 ar5008_hw_set_channel_regs(ah, chan);
860 ar5008_hw_init_chain_masks(ah);
861 ath9k_olc_init(ah);
862
863 /* Set TX power */
864 ah->eep_ops->set_txpower(ah, chan,
865 ath9k_regd_get_ctl(regulatory, chan),
866 channel->max_antenna_gain * 2,
867 channel->max_power * 2,
868 min((u32) MAX_RATE_POWER,
869 (u32) regulatory->power_limit));
870
871 /* Write analog registers */
872 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
873 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
874 "ar5416SetRfRegs failed\n");
875 return -EIO;
876 }
877
878 return 0;
879}
880
881static void ar5008_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
882{
883 u32 rfMode = 0;
884
885 if (chan == NULL)
886 return;
887
888 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
889 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
890
891 if (!AR_SREV_9280_10_OR_LATER(ah))
892 rfMode |= (IS_CHAN_5GHZ(chan)) ?
893 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
894
895 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
896 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
897
898 REG_WRITE(ah, AR_PHY_MODE, rfMode);
899}
900
901static void ar5008_hw_mark_phy_inactive(struct ath_hw *ah)
902{
903 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
904}
905
906static void ar5008_hw_set_delta_slope(struct ath_hw *ah,
907 struct ath9k_channel *chan)
908{
909 u32 coef_scaled, ds_coef_exp, ds_coef_man;
910 u32 clockMhzScaled = 0x64000000;
911 struct chan_centers centers;
912
913 if (IS_CHAN_HALF_RATE(chan))
914 clockMhzScaled = clockMhzScaled >> 1;
915 else if (IS_CHAN_QUARTER_RATE(chan))
916 clockMhzScaled = clockMhzScaled >> 2;
917
918 ath9k_hw_get_channel_centers(ah, chan, &centers);
919 coef_scaled = clockMhzScaled / centers.synth_center;
920
921 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
922 &ds_coef_exp);
923
924 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
925 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
926 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
927 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
928
929 coef_scaled = (9 * coef_scaled) / 10;
930
931 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
932 &ds_coef_exp);
933
934 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
935 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
936 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
937 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
938}
939
940static bool ar5008_hw_rfbus_req(struct ath_hw *ah)
941{
942 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
943 return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
944 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
945}
946
947static void ar5008_hw_rfbus_done(struct ath_hw *ah)
948{
949 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
950 if (IS_CHAN_B(ah->curchan))
951 synthDelay = (4 * synthDelay) / 22;
952 else
953 synthDelay /= 10;
954
955 udelay(synthDelay + BASE_ACTIVATE_DELAY);
956
957 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
958}
959
960static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
961{
962 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
963 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
964
965 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
966 AR_GPIO_INPUT_MUX2_RFSILENT);
967
968 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
969 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
970}
971
972static void ar5008_restore_chainmask(struct ath_hw *ah)
973{
974 int rx_chainmask = ah->rxchainmask;
975
976 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
977 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
978 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
979 }
980}
981
982static void ar5008_set_diversity(struct ath_hw *ah, bool value)
983{
984 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
985 if (value)
986 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
987 else
988 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
989 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
990}
991
992static u32 ar9100_hw_compute_pll_control(struct ath_hw *ah,
993 struct ath9k_channel *chan)
994{
995 if (chan && IS_CHAN_5GHZ(chan))
996 return 0x1450;
997 return 0x1458;
998}
999
1000static u32 ar9160_hw_compute_pll_control(struct ath_hw *ah,
1001 struct ath9k_channel *chan)
1002{
1003 u32 pll;
1004
1005 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1006
1007 if (chan && IS_CHAN_HALF_RATE(chan))
1008 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1009 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1010 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1011
1012 if (chan && IS_CHAN_5GHZ(chan))
1013 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1014 else
1015 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1016
1017 return pll;
1018}
1019
1020static u32 ar5008_hw_compute_pll_control(struct ath_hw *ah,
1021 struct ath9k_channel *chan)
1022{
1023 u32 pll;
1024
1025 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1026
1027 if (chan && IS_CHAN_HALF_RATE(chan))
1028 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1029 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1030 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1031
1032 if (chan && IS_CHAN_5GHZ(chan))
1033 pll |= SM(0xa, AR_RTC_PLL_DIV);
1034 else
1035 pll |= SM(0xb, AR_RTC_PLL_DIV);
1036
1037 return pll;
1038}
1039
1040static bool ar5008_hw_ani_control(struct ath_hw *ah,
1041 enum ath9k_ani_cmd cmd, int param)
1042{
1043 struct ar5416AniState *aniState = ah->curani;
1044 struct ath_common *common = ath9k_hw_common(ah);
1045
1046 switch (cmd & ah->ani_function) {
1047 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
1048 u32 level = param;
1049
1050 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
1051 ath_print(common, ATH_DBG_ANI,
1052 "level out of range (%u > %u)\n",
1053 level,
1054 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
1055 return false;
1056 }
1057
1058 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
1059 AR_PHY_DESIRED_SZ_TOT_DES,
1060 ah->totalSizeDesired[level]);
1061 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1062 AR_PHY_AGC_CTL1_COARSE_LOW,
1063 ah->coarse_low[level]);
1064 REG_RMW_FIELD(ah, AR_PHY_AGC_CTL1,
1065 AR_PHY_AGC_CTL1_COARSE_HIGH,
1066 ah->coarse_high[level]);
1067 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1068 AR_PHY_FIND_SIG_FIRPWR,
1069 ah->firpwr[level]);
1070
1071 if (level > aniState->noiseImmunityLevel)
1072 ah->stats.ast_ani_niup++;
1073 else if (level < aniState->noiseImmunityLevel)
1074 ah->stats.ast_ani_nidown++;
1075 aniState->noiseImmunityLevel = level;
1076 break;
1077 }
1078 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1079 const int m1ThreshLow[] = { 127, 50 };
1080 const int m2ThreshLow[] = { 127, 40 };
1081 const int m1Thresh[] = { 127, 0x4d };
1082 const int m2Thresh[] = { 127, 0x40 };
1083 const int m2CountThr[] = { 31, 16 };
1084 const int m2CountThrLow[] = { 63, 48 };
1085 u32 on = param ? 1 : 0;
1086
1087 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1088 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
1089 m1ThreshLow[on]);
1090 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1091 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
1092 m2ThreshLow[on]);
1093 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1094 AR_PHY_SFCORR_M1_THRESH,
1095 m1Thresh[on]);
1096 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1097 AR_PHY_SFCORR_M2_THRESH,
1098 m2Thresh[on]);
1099 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
1100 AR_PHY_SFCORR_M2COUNT_THR,
1101 m2CountThr[on]);
1102 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
1103 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
1104 m2CountThrLow[on]);
1105
1106 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1107 AR_PHY_SFCORR_EXT_M1_THRESH_LOW,
1108 m1ThreshLow[on]);
1109 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1110 AR_PHY_SFCORR_EXT_M2_THRESH_LOW,
1111 m2ThreshLow[on]);
1112 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1113 AR_PHY_SFCORR_EXT_M1_THRESH,
1114 m1Thresh[on]);
1115 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
1116 AR_PHY_SFCORR_EXT_M2_THRESH,
1117 m2Thresh[on]);
1118
1119 if (on)
1120 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
1121 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1122 else
1123 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
1124 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1125
1126 if (!on != aniState->ofdmWeakSigDetectOff) {
1127 if (on)
1128 ah->stats.ast_ani_ofdmon++;
1129 else
1130 ah->stats.ast_ani_ofdmoff++;
1131 aniState->ofdmWeakSigDetectOff = !on;
1132 }
1133 break;
1134 }
1135 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1136 const int weakSigThrCck[] = { 8, 6 };
1137 u32 high = param ? 1 : 0;
1138
1139 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
1140 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
1141 weakSigThrCck[high]);
1142 if (high != aniState->cckWeakSigThreshold) {
1143 if (high)
1144 ah->stats.ast_ani_cckhigh++;
1145 else
1146 ah->stats.ast_ani_ccklow++;
1147 aniState->cckWeakSigThreshold = high;
1148 }
1149 break;
1150 }
1151 case ATH9K_ANI_FIRSTEP_LEVEL:{
1152 const int firstep[] = { 0, 4, 8 };
1153 u32 level = param;
1154
1155 if (level >= ARRAY_SIZE(firstep)) {
1156 ath_print(common, ATH_DBG_ANI,
1157 "level out of range (%u > %u)\n",
1158 level,
1159 (unsigned) ARRAY_SIZE(firstep));
1160 return false;
1161 }
1162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
1163 AR_PHY_FIND_SIG_FIRSTEP,
1164 firstep[level]);
1165 if (level > aniState->firstepLevel)
1166 ah->stats.ast_ani_stepup++;
1167 else if (level < aniState->firstepLevel)
1168 ah->stats.ast_ani_stepdown++;
1169 aniState->firstepLevel = level;
1170 break;
1171 }
1172 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1173 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1174 u32 level = param;
1175
1176 if (level >= ARRAY_SIZE(cycpwrThr1)) {
1177 ath_print(common, ATH_DBG_ANI,
1178 "level out of range (%u > %u)\n",
1179 level,
1180 (unsigned) ARRAY_SIZE(cycpwrThr1));
1181 return false;
1182 }
1183 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
1184 AR_PHY_TIMING5_CYCPWR_THR1,
1185 cycpwrThr1[level]);
1186 if (level > aniState->spurImmunityLevel)
1187 ah->stats.ast_ani_spurup++;
1188 else if (level < aniState->spurImmunityLevel)
1189 ah->stats.ast_ani_spurdown++;
1190 aniState->spurImmunityLevel = level;
1191 break;
1192 }
1193 case ATH9K_ANI_PRESENT:
1194 break;
1195 default:
1196 ath_print(common, ATH_DBG_ANI,
1197 "invalid cmd %u\n", cmd);
1198 return false;
1199 }
1200
1201 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
1202 ath_print(common, ATH_DBG_ANI,
1203 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
1204 "ofdmWeakSigDetectOff=%d\n",
1205 aniState->noiseImmunityLevel,
1206 aniState->spurImmunityLevel,
1207 !aniState->ofdmWeakSigDetectOff);
1208 ath_print(common, ATH_DBG_ANI,
1209 "cckWeakSigThreshold=%d, "
1210 "firstepLevel=%d, listenTime=%d\n",
1211 aniState->cckWeakSigThreshold,
1212 aniState->firstepLevel,
1213 aniState->listenTime);
1214 ath_print(common, ATH_DBG_ANI,
1215 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1216 aniState->cycleCount,
1217 aniState->ofdmPhyErrCount,
1218 aniState->cckPhyErrCount);
1219
1220 return true;
1221}
1222
1223static void ar5008_hw_do_getnf(struct ath_hw *ah,
1224 int16_t nfarray[NUM_NF_READINGS])
1225{
1226 struct ath_common *common = ath9k_hw_common(ah);
1227 int16_t nf;
1228
1229 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1230 if (nf & 0x100)
1231 nf = 0 - ((nf ^ 0x1ff) + 1);
1232 ath_print(common, ATH_DBG_CALIBRATE,
1233 "NF calibrated [ctl] [chain 0] is %d\n", nf);
1234 nfarray[0] = nf;
1235
1236 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
1237 if (nf & 0x100)
1238 nf = 0 - ((nf ^ 0x1ff) + 1);
1239 ath_print(common, ATH_DBG_CALIBRATE,
1240 "NF calibrated [ctl] [chain 1] is %d\n", nf);
1241 nfarray[1] = nf;
1242
1243 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
1244 if (nf & 0x100)
1245 nf = 0 - ((nf ^ 0x1ff) + 1);
1246 ath_print(common, ATH_DBG_CALIBRATE,
1247 "NF calibrated [ctl] [chain 2] is %d\n", nf);
1248 nfarray[2] = nf;
1249
1250 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1251 if (nf & 0x100)
1252 nf = 0 - ((nf ^ 0x1ff) + 1);
1253 ath_print(common, ATH_DBG_CALIBRATE,
1254 "NF calibrated [ext] [chain 0] is %d\n", nf);
1255 nfarray[3] = nf;
1256
1257 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
1258 if (nf & 0x100)
1259 nf = 0 - ((nf ^ 0x1ff) + 1);
1260 ath_print(common, ATH_DBG_CALIBRATE,
1261 "NF calibrated [ext] [chain 1] is %d\n", nf);
1262 nfarray[4] = nf;
1263
1264 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
1265 if (nf & 0x100)
1266 nf = 0 - ((nf ^ 0x1ff) + 1);
1267 ath_print(common, ATH_DBG_CALIBRATE,
1268 "NF calibrated [ext] [chain 2] is %d\n", nf);
1269 nfarray[5] = nf;
1270}
1271
1272static void ar5008_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
1273{
1274 struct ath9k_nfcal_hist *h;
1275 int i, j;
1276 int32_t val;
1277 const u32 ar5416_cca_regs[6] = {
1278 AR_PHY_CCA,
1279 AR_PHY_CH1_CCA,
1280 AR_PHY_CH2_CCA,
1281 AR_PHY_EXT_CCA,
1282 AR_PHY_CH1_EXT_CCA,
1283 AR_PHY_CH2_EXT_CCA
1284 };
1285 u8 chainmask, rx_chain_status;
1286
1287 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
1288 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
1289 chainmask = 0x9;
1290 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
1291 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
1292 chainmask = 0x1B;
1293 else
1294 chainmask = 0x09;
1295 } else {
1296 if (rx_chain_status & 0x4)
1297 chainmask = 0x3F;
1298 else if (rx_chain_status & 0x2)
1299 chainmask = 0x1B;
1300 else
1301 chainmask = 0x09;
1302 }
1303
1304 h = ah->nfCalHist;
1305
1306 for (i = 0; i < NUM_NF_READINGS; i++) {
1307 if (chainmask & (1 << i)) {
1308 val = REG_READ(ah, ar5416_cca_regs[i]);
1309 val &= 0xFFFFFE00;
1310 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1311 REG_WRITE(ah, ar5416_cca_regs[i], val);
1312 }
1313 }
1314
1315 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1316 AR_PHY_AGC_CONTROL_ENABLE_NF);
1317 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1318 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1319 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1320
1321 for (j = 0; j < 5; j++) {
1322 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1323 AR_PHY_AGC_CONTROL_NF) == 0)
1324 break;
1325 udelay(50);
1326 }
1327
1328 ENABLE_REGWRITE_BUFFER(ah);
1329
1330 for (i = 0; i < NUM_NF_READINGS; i++) {
1331 if (chainmask & (1 << i)) {
1332 val = REG_READ(ah, ar5416_cca_regs[i]);
1333 val &= 0xFFFFFE00;
1334 val |= (((u32) (-50) << 1) & 0x1ff);
1335 REG_WRITE(ah, ar5416_cca_regs[i], val);
1336 }
1337 }
1338
1339 REGWRITE_BUFFER_FLUSH(ah);
1340 DISABLE_REGWRITE_BUFFER(ah);
1341}
1342
1343void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1344{
1345 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1346
1347 priv_ops->rf_set_freq = ar5008_hw_set_channel;
1348 priv_ops->spur_mitigate_freq = ar5008_hw_spur_mitigate;
1349
1350 priv_ops->rf_alloc_ext_banks = ar5008_hw_rf_alloc_ext_banks;
1351 priv_ops->rf_free_ext_banks = ar5008_hw_rf_free_ext_banks;
1352 priv_ops->set_rf_regs = ar5008_hw_set_rf_regs;
1353 priv_ops->set_channel_regs = ar5008_hw_set_channel_regs;
1354 priv_ops->init_bb = ar5008_hw_init_bb;
1355 priv_ops->process_ini = ar5008_hw_process_ini;
1356 priv_ops->set_rfmode = ar5008_hw_set_rfmode;
1357 priv_ops->mark_phy_inactive = ar5008_hw_mark_phy_inactive;
1358 priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
1359 priv_ops->rfbus_req = ar5008_hw_rfbus_req;
1360 priv_ops->rfbus_done = ar5008_hw_rfbus_done;
1361 priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
1362 priv_ops->restore_chainmask = ar5008_restore_chainmask;
1363 priv_ops->set_diversity = ar5008_set_diversity;
1364 priv_ops->ani_control = ar5008_hw_ani_control;
1365 priv_ops->do_getnf = ar5008_hw_do_getnf;
1366 priv_ops->loadnf = ar5008_hw_loadnf;
1367
1368 if (AR_SREV_9100(ah))
1369 priv_ops->compute_pll_control = ar9100_hw_compute_pll_control;
1370 else if (AR_SREV_9160_10_OR_LATER(ah))
1371 priv_ops->compute_pll_control = ar9160_hw_compute_pll_control;
1372 else
1373 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
1374}
diff --git a/drivers/net/wireless/ath/ath9k/ar9001_initvals.h b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
new file mode 100644
index 000000000000..0b94bd385b0a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9001_initvals.h
@@ -0,0 +1,1254 @@
1
2static const u32 ar5416Common_9100[][2] = {
3 { 0x0000000c, 0x00000000 },
4 { 0x00000030, 0x00020015 },
5 { 0x00000034, 0x00000005 },
6 { 0x00000040, 0x00000000 },
7 { 0x00000044, 0x00000008 },
8 { 0x00000048, 0x00000008 },
9 { 0x0000004c, 0x00000010 },
10 { 0x00000050, 0x00000000 },
11 { 0x00000054, 0x0000001f },
12 { 0x00000800, 0x00000000 },
13 { 0x00000804, 0x00000000 },
14 { 0x00000808, 0x00000000 },
15 { 0x0000080c, 0x00000000 },
16 { 0x00000810, 0x00000000 },
17 { 0x00000814, 0x00000000 },
18 { 0x00000818, 0x00000000 },
19 { 0x0000081c, 0x00000000 },
20 { 0x00000820, 0x00000000 },
21 { 0x00000824, 0x00000000 },
22 { 0x00001040, 0x002ffc0f },
23 { 0x00001044, 0x002ffc0f },
24 { 0x00001048, 0x002ffc0f },
25 { 0x0000104c, 0x002ffc0f },
26 { 0x00001050, 0x002ffc0f },
27 { 0x00001054, 0x002ffc0f },
28 { 0x00001058, 0x002ffc0f },
29 { 0x0000105c, 0x002ffc0f },
30 { 0x00001060, 0x002ffc0f },
31 { 0x00001064, 0x002ffc0f },
32 { 0x00001230, 0x00000000 },
33 { 0x00001270, 0x00000000 },
34 { 0x00001038, 0x00000000 },
35 { 0x00001078, 0x00000000 },
36 { 0x000010b8, 0x00000000 },
37 { 0x000010f8, 0x00000000 },
38 { 0x00001138, 0x00000000 },
39 { 0x00001178, 0x00000000 },
40 { 0x000011b8, 0x00000000 },
41 { 0x000011f8, 0x00000000 },
42 { 0x00001238, 0x00000000 },
43 { 0x00001278, 0x00000000 },
44 { 0x000012b8, 0x00000000 },
45 { 0x000012f8, 0x00000000 },
46 { 0x00001338, 0x00000000 },
47 { 0x00001378, 0x00000000 },
48 { 0x000013b8, 0x00000000 },
49 { 0x000013f8, 0x00000000 },
50 { 0x00001438, 0x00000000 },
51 { 0x00001478, 0x00000000 },
52 { 0x000014b8, 0x00000000 },
53 { 0x000014f8, 0x00000000 },
54 { 0x00001538, 0x00000000 },
55 { 0x00001578, 0x00000000 },
56 { 0x000015b8, 0x00000000 },
57 { 0x000015f8, 0x00000000 },
58 { 0x00001638, 0x00000000 },
59 { 0x00001678, 0x00000000 },
60 { 0x000016b8, 0x00000000 },
61 { 0x000016f8, 0x00000000 },
62 { 0x00001738, 0x00000000 },
63 { 0x00001778, 0x00000000 },
64 { 0x000017b8, 0x00000000 },
65 { 0x000017f8, 0x00000000 },
66 { 0x0000103c, 0x00000000 },
67 { 0x0000107c, 0x00000000 },
68 { 0x000010bc, 0x00000000 },
69 { 0x000010fc, 0x00000000 },
70 { 0x0000113c, 0x00000000 },
71 { 0x0000117c, 0x00000000 },
72 { 0x000011bc, 0x00000000 },
73 { 0x000011fc, 0x00000000 },
74 { 0x0000123c, 0x00000000 },
75 { 0x0000127c, 0x00000000 },
76 { 0x000012bc, 0x00000000 },
77 { 0x000012fc, 0x00000000 },
78 { 0x0000133c, 0x00000000 },
79 { 0x0000137c, 0x00000000 },
80 { 0x000013bc, 0x00000000 },
81 { 0x000013fc, 0x00000000 },
82 { 0x0000143c, 0x00000000 },
83 { 0x0000147c, 0x00000000 },
84 { 0x00020010, 0x00000003 },
85 { 0x00020038, 0x000004c2 },
86 { 0x00008004, 0x00000000 },
87 { 0x00008008, 0x00000000 },
88 { 0x0000800c, 0x00000000 },
89 { 0x00008018, 0x00000700 },
90 { 0x00008020, 0x00000000 },
91 { 0x00008038, 0x00000000 },
92 { 0x0000803c, 0x00000000 },
93 { 0x00008048, 0x40000000 },
94 { 0x00008054, 0x00004000 },
95 { 0x00008058, 0x00000000 },
96 { 0x0000805c, 0x000fc78f },
97 { 0x00008060, 0x0000000f },
98 { 0x00008064, 0x00000000 },
99 { 0x000080c0, 0x2a82301a },
100 { 0x000080c4, 0x05dc01e0 },
101 { 0x000080c8, 0x1f402710 },
102 { 0x000080cc, 0x01f40000 },
103 { 0x000080d0, 0x00001e00 },
104 { 0x000080d4, 0x00000000 },
105 { 0x000080d8, 0x00400000 },
106 { 0x000080e0, 0xffffffff },
107 { 0x000080e4, 0x0000ffff },
108 { 0x000080e8, 0x003f3f3f },
109 { 0x000080ec, 0x00000000 },
110 { 0x000080f0, 0x00000000 },
111 { 0x000080f4, 0x00000000 },
112 { 0x000080f8, 0x00000000 },
113 { 0x000080fc, 0x00020000 },
114 { 0x00008100, 0x00020000 },
115 { 0x00008104, 0x00000001 },
116 { 0x00008108, 0x00000052 },
117 { 0x0000810c, 0x00000000 },
118 { 0x00008110, 0x00000168 },
119 { 0x00008118, 0x000100aa },
120 { 0x0000811c, 0x00003210 },
121 { 0x00008120, 0x08f04800 },
122 { 0x00008124, 0x00000000 },
123 { 0x00008128, 0x00000000 },
124 { 0x0000812c, 0x00000000 },
125 { 0x00008130, 0x00000000 },
126 { 0x00008134, 0x00000000 },
127 { 0x00008138, 0x00000000 },
128 { 0x0000813c, 0x00000000 },
129 { 0x00008144, 0x00000000 },
130 { 0x00008168, 0x00000000 },
131 { 0x0000816c, 0x00000000 },
132 { 0x00008170, 0x32143320 },
133 { 0x00008174, 0xfaa4fa50 },
134 { 0x00008178, 0x00000100 },
135 { 0x0000817c, 0x00000000 },
136 { 0x000081c4, 0x00000000 },
137 { 0x000081d0, 0x00003210 },
138 { 0x000081ec, 0x00000000 },
139 { 0x000081f0, 0x00000000 },
140 { 0x000081f4, 0x00000000 },
141 { 0x000081f8, 0x00000000 },
142 { 0x000081fc, 0x00000000 },
143 { 0x00008200, 0x00000000 },
144 { 0x00008204, 0x00000000 },
145 { 0x00008208, 0x00000000 },
146 { 0x0000820c, 0x00000000 },
147 { 0x00008210, 0x00000000 },
148 { 0x00008214, 0x00000000 },
149 { 0x00008218, 0x00000000 },
150 { 0x0000821c, 0x00000000 },
151 { 0x00008220, 0x00000000 },
152 { 0x00008224, 0x00000000 },
153 { 0x00008228, 0x00000000 },
154 { 0x0000822c, 0x00000000 },
155 { 0x00008230, 0x00000000 },
156 { 0x00008234, 0x00000000 },
157 { 0x00008238, 0x00000000 },
158 { 0x0000823c, 0x00000000 },
159 { 0x00008240, 0x00100000 },
160 { 0x00008244, 0x0010f400 },
161 { 0x00008248, 0x00000100 },
162 { 0x0000824c, 0x0001e800 },
163 { 0x00008250, 0x00000000 },
164 { 0x00008254, 0x00000000 },
165 { 0x00008258, 0x00000000 },
166 { 0x0000825c, 0x400000ff },
167 { 0x00008260, 0x00080922 },
168 { 0x00008270, 0x00000000 },
169 { 0x00008274, 0x40000000 },
170 { 0x00008278, 0x003e4180 },
171 { 0x0000827c, 0x00000000 },
172 { 0x00008284, 0x0000002c },
173 { 0x00008288, 0x0000002c },
174 { 0x0000828c, 0x00000000 },
175 { 0x00008294, 0x00000000 },
176 { 0x00008298, 0x00000000 },
177 { 0x00008300, 0x00000000 },
178 { 0x00008304, 0x00000000 },
179 { 0x00008308, 0x00000000 },
180 { 0x0000830c, 0x00000000 },
181 { 0x00008310, 0x00000000 },
182 { 0x00008314, 0x00000000 },
183 { 0x00008318, 0x00000000 },
184 { 0x00008328, 0x00000000 },
185 { 0x0000832c, 0x00000007 },
186 { 0x00008330, 0x00000302 },
187 { 0x00008334, 0x00000e00 },
188 { 0x00008338, 0x00000000 },
189 { 0x0000833c, 0x00000000 },
190 { 0x00008340, 0x000107ff },
191 { 0x00009808, 0x00000000 },
192 { 0x0000980c, 0xad848e19 },
193 { 0x00009810, 0x7d14e000 },
194 { 0x00009814, 0x9c0a9f6b },
195 { 0x0000981c, 0x00000000 },
196 { 0x0000982c, 0x0000a000 },
197 { 0x00009830, 0x00000000 },
198 { 0x0000983c, 0x00200400 },
199 { 0x00009840, 0x206a01ae },
200 { 0x0000984c, 0x1284233c },
201 { 0x00009854, 0x00000859 },
202 { 0x00009900, 0x00000000 },
203 { 0x00009904, 0x00000000 },
204 { 0x00009908, 0x00000000 },
205 { 0x0000990c, 0x00000000 },
206 { 0x0000991c, 0x10000fff },
207 { 0x00009920, 0x05100000 },
208 { 0x0000a920, 0x05100000 },
209 { 0x0000b920, 0x05100000 },
210 { 0x00009928, 0x00000001 },
211 { 0x0000992c, 0x00000004 },
212 { 0x00009934, 0x1e1f2022 },
213 { 0x00009938, 0x0a0b0c0d },
214 { 0x0000993c, 0x00000000 },
215 { 0x00009948, 0x9280b212 },
216 { 0x0000994c, 0x00020028 },
217 { 0x0000c95c, 0x004b6a8e },
218 { 0x0000c968, 0x000003ce },
219 { 0x00009970, 0x190fb515 },
220 { 0x00009974, 0x00000000 },
221 { 0x00009978, 0x00000001 },
222 { 0x0000997c, 0x00000000 },
223 { 0x00009980, 0x00000000 },
224 { 0x00009984, 0x00000000 },
225 { 0x00009988, 0x00000000 },
226 { 0x0000998c, 0x00000000 },
227 { 0x00009990, 0x00000000 },
228 { 0x00009994, 0x00000000 },
229 { 0x00009998, 0x00000000 },
230 { 0x0000999c, 0x00000000 },
231 { 0x000099a0, 0x00000000 },
232 { 0x000099a4, 0x00000001 },
233 { 0x000099a8, 0x201fff00 },
234 { 0x000099ac, 0x006f0000 },
235 { 0x000099b0, 0x03051000 },
236 { 0x000099dc, 0x00000000 },
237 { 0x000099e0, 0x00000200 },
238 { 0x000099e4, 0xaaaaaaaa },
239 { 0x000099e8, 0x3c466478 },
240 { 0x000099ec, 0x0cc80caa },
241 { 0x000099fc, 0x00001042 },
242 { 0x00009b00, 0x00000000 },
243 { 0x00009b04, 0x00000001 },
244 { 0x00009b08, 0x00000002 },
245 { 0x00009b0c, 0x00000003 },
246 { 0x00009b10, 0x00000004 },
247 { 0x00009b14, 0x00000005 },
248 { 0x00009b18, 0x00000008 },
249 { 0x00009b1c, 0x00000009 },
250 { 0x00009b20, 0x0000000a },
251 { 0x00009b24, 0x0000000b },
252 { 0x00009b28, 0x0000000c },
253 { 0x00009b2c, 0x0000000d },
254 { 0x00009b30, 0x00000010 },
255 { 0x00009b34, 0x00000011 },
256 { 0x00009b38, 0x00000012 },
257 { 0x00009b3c, 0x00000013 },
258 { 0x00009b40, 0x00000014 },
259 { 0x00009b44, 0x00000015 },
260 { 0x00009b48, 0x00000018 },
261 { 0x00009b4c, 0x00000019 },
262 { 0x00009b50, 0x0000001a },
263 { 0x00009b54, 0x0000001b },
264 { 0x00009b58, 0x0000001c },
265 { 0x00009b5c, 0x0000001d },
266 { 0x00009b60, 0x00000020 },
267 { 0x00009b64, 0x00000021 },
268 { 0x00009b68, 0x00000022 },
269 { 0x00009b6c, 0x00000023 },
270 { 0x00009b70, 0x00000024 },
271 { 0x00009b74, 0x00000025 },
272 { 0x00009b78, 0x00000028 },
273 { 0x00009b7c, 0x00000029 },
274 { 0x00009b80, 0x0000002a },
275 { 0x00009b84, 0x0000002b },
276 { 0x00009b88, 0x0000002c },
277 { 0x00009b8c, 0x0000002d },
278 { 0x00009b90, 0x00000030 },
279 { 0x00009b94, 0x00000031 },
280 { 0x00009b98, 0x00000032 },
281 { 0x00009b9c, 0x00000033 },
282 { 0x00009ba0, 0x00000034 },
283 { 0x00009ba4, 0x00000035 },
284 { 0x00009ba8, 0x00000035 },
285 { 0x00009bac, 0x00000035 },
286 { 0x00009bb0, 0x00000035 },
287 { 0x00009bb4, 0x00000035 },
288 { 0x00009bb8, 0x00000035 },
289 { 0x00009bbc, 0x00000035 },
290 { 0x00009bc0, 0x00000035 },
291 { 0x00009bc4, 0x00000035 },
292 { 0x00009bc8, 0x00000035 },
293 { 0x00009bcc, 0x00000035 },
294 { 0x00009bd0, 0x00000035 },
295 { 0x00009bd4, 0x00000035 },
296 { 0x00009bd8, 0x00000035 },
297 { 0x00009bdc, 0x00000035 },
298 { 0x00009be0, 0x00000035 },
299 { 0x00009be4, 0x00000035 },
300 { 0x00009be8, 0x00000035 },
301 { 0x00009bec, 0x00000035 },
302 { 0x00009bf0, 0x00000035 },
303 { 0x00009bf4, 0x00000035 },
304 { 0x00009bf8, 0x00000010 },
305 { 0x00009bfc, 0x0000001a },
306 { 0x0000a210, 0x40806333 },
307 { 0x0000a214, 0x00106c10 },
308 { 0x0000a218, 0x009c4060 },
309 { 0x0000a220, 0x018830c6 },
310 { 0x0000a224, 0x00000400 },
311 { 0x0000a228, 0x001a0bb5 },
312 { 0x0000a22c, 0x00000000 },
313 { 0x0000a234, 0x20202020 },
314 { 0x0000a238, 0x20202020 },
315 { 0x0000a23c, 0x13c889ae },
316 { 0x0000a240, 0x38490a20 },
317 { 0x0000a244, 0x00007bb6 },
318 { 0x0000a248, 0x0fff3ffc },
319 { 0x0000a24c, 0x00000001 },
320 { 0x0000a250, 0x0000a000 },
321 { 0x0000a254, 0x00000000 },
322 { 0x0000a258, 0x0cc75380 },
323 { 0x0000a25c, 0x0f0f0f01 },
324 { 0x0000a260, 0xdfa91f01 },
325 { 0x0000a268, 0x00000001 },
326 { 0x0000a26c, 0x0ebae9c6 },
327 { 0x0000b26c, 0x0ebae9c6 },
328 { 0x0000c26c, 0x0ebae9c6 },
329 { 0x0000d270, 0x00820820 },
330 { 0x0000a278, 0x1ce739ce },
331 { 0x0000a27c, 0x050701ce },
332 { 0x0000a338, 0x00000000 },
333 { 0x0000a33c, 0x00000000 },
334 { 0x0000a340, 0x00000000 },
335 { 0x0000a344, 0x00000000 },
336 { 0x0000a348, 0x3fffffff },
337 { 0x0000a34c, 0x3fffffff },
338 { 0x0000a350, 0x3fffffff },
339 { 0x0000a354, 0x0003ffff },
340 { 0x0000a358, 0x79a8aa33 },
341 { 0x0000d35c, 0x07ffffef },
342 { 0x0000d360, 0x0fffffe7 },
343 { 0x0000d364, 0x17ffffe5 },
344 { 0x0000d368, 0x1fffffe4 },
345 { 0x0000d36c, 0x37ffffe3 },
346 { 0x0000d370, 0x3fffffe3 },
347 { 0x0000d374, 0x57ffffe3 },
348 { 0x0000d378, 0x5fffffe2 },
349 { 0x0000d37c, 0x7fffffe2 },
350 { 0x0000d380, 0x7f3c7bba },
351 { 0x0000d384, 0xf3307ff0 },
352 { 0x0000a388, 0x0c000000 },
353 { 0x0000a38c, 0x20202020 },
354 { 0x0000a390, 0x20202020 },
355 { 0x0000a394, 0x1ce739ce },
356 { 0x0000a398, 0x000001ce },
357 { 0x0000a39c, 0x00000001 },
358 { 0x0000a3a0, 0x00000000 },
359 { 0x0000a3a4, 0x00000000 },
360 { 0x0000a3a8, 0x00000000 },
361 { 0x0000a3ac, 0x00000000 },
362 { 0x0000a3b0, 0x00000000 },
363 { 0x0000a3b4, 0x00000000 },
364 { 0x0000a3b8, 0x00000000 },
365 { 0x0000a3bc, 0x00000000 },
366 { 0x0000a3c0, 0x00000000 },
367 { 0x0000a3c4, 0x00000000 },
368 { 0x0000a3c8, 0x00000246 },
369 { 0x0000a3cc, 0x20202020 },
370 { 0x0000a3d0, 0x20202020 },
371 { 0x0000a3d4, 0x20202020 },
372 { 0x0000a3dc, 0x1ce739ce },
373 { 0x0000a3e0, 0x000001ce },
374};
375
376static const u32 ar5416Bank0_9100[][2] = {
377 { 0x000098b0, 0x1e5795e5 },
378 { 0x000098e0, 0x02008020 },
379};
380
381static const u32 ar5416BB_RfGain_9100[][3] = {
382 { 0x00009a00, 0x00000000, 0x00000000 },
383 { 0x00009a04, 0x00000040, 0x00000040 },
384 { 0x00009a08, 0x00000080, 0x00000080 },
385 { 0x00009a0c, 0x000001a1, 0x00000141 },
386 { 0x00009a10, 0x000001e1, 0x00000181 },
387 { 0x00009a14, 0x00000021, 0x000001c1 },
388 { 0x00009a18, 0x00000061, 0x00000001 },
389 { 0x00009a1c, 0x00000168, 0x00000041 },
390 { 0x00009a20, 0x000001a8, 0x000001a8 },
391 { 0x00009a24, 0x000001e8, 0x000001e8 },
392 { 0x00009a28, 0x00000028, 0x00000028 },
393 { 0x00009a2c, 0x00000068, 0x00000068 },
394 { 0x00009a30, 0x00000189, 0x000000a8 },
395 { 0x00009a34, 0x000001c9, 0x00000169 },
396 { 0x00009a38, 0x00000009, 0x000001a9 },
397 { 0x00009a3c, 0x00000049, 0x000001e9 },
398 { 0x00009a40, 0x00000089, 0x00000029 },
399 { 0x00009a44, 0x00000170, 0x00000069 },
400 { 0x00009a48, 0x000001b0, 0x00000190 },
401 { 0x00009a4c, 0x000001f0, 0x000001d0 },
402 { 0x00009a50, 0x00000030, 0x00000010 },
403 { 0x00009a54, 0x00000070, 0x00000050 },
404 { 0x00009a58, 0x00000191, 0x00000090 },
405 { 0x00009a5c, 0x000001d1, 0x00000151 },
406 { 0x00009a60, 0x00000011, 0x00000191 },
407 { 0x00009a64, 0x00000051, 0x000001d1 },
408 { 0x00009a68, 0x00000091, 0x00000011 },
409 { 0x00009a6c, 0x000001b8, 0x00000051 },
410 { 0x00009a70, 0x000001f8, 0x00000198 },
411 { 0x00009a74, 0x00000038, 0x000001d8 },
412 { 0x00009a78, 0x00000078, 0x00000018 },
413 { 0x00009a7c, 0x00000199, 0x00000058 },
414 { 0x00009a80, 0x000001d9, 0x00000098 },
415 { 0x00009a84, 0x00000019, 0x00000159 },
416 { 0x00009a88, 0x00000059, 0x00000199 },
417 { 0x00009a8c, 0x00000099, 0x000001d9 },
418 { 0x00009a90, 0x000000d9, 0x00000019 },
419 { 0x00009a94, 0x000000f9, 0x00000059 },
420 { 0x00009a98, 0x000000f9, 0x00000099 },
421 { 0x00009a9c, 0x000000f9, 0x000000d9 },
422 { 0x00009aa0, 0x000000f9, 0x000000f9 },
423 { 0x00009aa4, 0x000000f9, 0x000000f9 },
424 { 0x00009aa8, 0x000000f9, 0x000000f9 },
425 { 0x00009aac, 0x000000f9, 0x000000f9 },
426 { 0x00009ab0, 0x000000f9, 0x000000f9 },
427 { 0x00009ab4, 0x000000f9, 0x000000f9 },
428 { 0x00009ab8, 0x000000f9, 0x000000f9 },
429 { 0x00009abc, 0x000000f9, 0x000000f9 },
430 { 0x00009ac0, 0x000000f9, 0x000000f9 },
431 { 0x00009ac4, 0x000000f9, 0x000000f9 },
432 { 0x00009ac8, 0x000000f9, 0x000000f9 },
433 { 0x00009acc, 0x000000f9, 0x000000f9 },
434 { 0x00009ad0, 0x000000f9, 0x000000f9 },
435 { 0x00009ad4, 0x000000f9, 0x000000f9 },
436 { 0x00009ad8, 0x000000f9, 0x000000f9 },
437 { 0x00009adc, 0x000000f9, 0x000000f9 },
438 { 0x00009ae0, 0x000000f9, 0x000000f9 },
439 { 0x00009ae4, 0x000000f9, 0x000000f9 },
440 { 0x00009ae8, 0x000000f9, 0x000000f9 },
441 { 0x00009aec, 0x000000f9, 0x000000f9 },
442 { 0x00009af0, 0x000000f9, 0x000000f9 },
443 { 0x00009af4, 0x000000f9, 0x000000f9 },
444 { 0x00009af8, 0x000000f9, 0x000000f9 },
445 { 0x00009afc, 0x000000f9, 0x000000f9 },
446};
447
448static const u32 ar5416Bank1_9100[][2] = {
449 { 0x000098b0, 0x02108421},
450 { 0x000098ec, 0x00000008},
451};
452
453static const u32 ar5416Bank2_9100[][2] = {
454 { 0x000098b0, 0x0e73ff17},
455 { 0x000098e0, 0x00000420},
456};
457
458static const u32 ar5416Bank3_9100[][3] = {
459 { 0x000098f0, 0x01400018, 0x01c00018 },
460};
461
462static const u32 ar5416Bank6_9100[][3] = {
463
464 { 0x0000989c, 0x00000000, 0x00000000 },
465 { 0x0000989c, 0x00000000, 0x00000000 },
466 { 0x0000989c, 0x00000000, 0x00000000 },
467 { 0x0000989c, 0x00e00000, 0x00e00000 },
468 { 0x0000989c, 0x005e0000, 0x005e0000 },
469 { 0x0000989c, 0x00120000, 0x00120000 },
470 { 0x0000989c, 0x00620000, 0x00620000 },
471 { 0x0000989c, 0x00020000, 0x00020000 },
472 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
473 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
474 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
475 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
476 { 0x0000989c, 0x005f0000, 0x005f0000 },
477 { 0x0000989c, 0x00870000, 0x00870000 },
478 { 0x0000989c, 0x00f90000, 0x00f90000 },
479 { 0x0000989c, 0x007b0000, 0x007b0000 },
480 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
481 { 0x0000989c, 0x00f50000, 0x00f50000 },
482 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
483 { 0x0000989c, 0x00110000, 0x00110000 },
484 { 0x0000989c, 0x006100a8, 0x006100a8 },
485 { 0x0000989c, 0x004210a2, 0x004210a2 },
486 { 0x0000989c, 0x0014000f, 0x0014000f },
487 { 0x0000989c, 0x00c40002, 0x00c40002 },
488 { 0x0000989c, 0x003000f2, 0x003000f2 },
489 { 0x0000989c, 0x00440016, 0x00440016 },
490 { 0x0000989c, 0x00410040, 0x00410040 },
491 { 0x0000989c, 0x000180d6, 0x000180d6 },
492 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
493 { 0x0000989c, 0x000000b1, 0x000000b1 },
494 { 0x0000989c, 0x00002000, 0x00002000 },
495 { 0x0000989c, 0x000000d4, 0x000000d4 },
496 { 0x000098d0, 0x0000000f, 0x0010000f },
497};
498
499
500static const u32 ar5416Bank6TPC_9100[][3] = {
501
502 { 0x0000989c, 0x00000000, 0x00000000 },
503 { 0x0000989c, 0x00000000, 0x00000000 },
504 { 0x0000989c, 0x00000000, 0x00000000 },
505 { 0x0000989c, 0x00e00000, 0x00e00000 },
506 { 0x0000989c, 0x005e0000, 0x005e0000 },
507 { 0x0000989c, 0x00120000, 0x00120000 },
508 { 0x0000989c, 0x00620000, 0x00620000 },
509 { 0x0000989c, 0x00020000, 0x00020000 },
510 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
511 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
512 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
513 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
514 { 0x0000989c, 0x005f0000, 0x005f0000 },
515 { 0x0000989c, 0x00870000, 0x00870000 },
516 { 0x0000989c, 0x00f90000, 0x00f90000 },
517 { 0x0000989c, 0x007b0000, 0x007b0000 },
518 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
519 { 0x0000989c, 0x00f50000, 0x00f50000 },
520 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
521 { 0x0000989c, 0x00110000, 0x00110000 },
522 { 0x0000989c, 0x006100a8, 0x006100a8 },
523 { 0x0000989c, 0x00423022, 0x00423022 },
524 { 0x0000989c, 0x2014008f, 0x2014008f },
525 { 0x0000989c, 0x00c40002, 0x00c40002 },
526 { 0x0000989c, 0x003000f2, 0x003000f2 },
527 { 0x0000989c, 0x00440016, 0x00440016 },
528 { 0x0000989c, 0x00410040, 0x00410040 },
529 { 0x0000989c, 0x0001805e, 0x0001805e },
530 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
531 { 0x0000989c, 0x000000e1, 0x000000e1 },
532 { 0x0000989c, 0x00007080, 0x00007080 },
533 { 0x0000989c, 0x000000d4, 0x000000d4 },
534 { 0x000098d0, 0x0000000f, 0x0010000f },
535};
536
537static const u32 ar5416Bank7_9100[][2] = {
538 { 0x0000989c, 0x00000500 },
539 { 0x0000989c, 0x00000800 },
540 { 0x000098cc, 0x0000000e },
541};
542
543static const u32 ar5416Addac_9100[][2] = {
544 {0x0000989c, 0x00000000 },
545 {0x0000989c, 0x00000000 },
546 {0x0000989c, 0x00000000 },
547 {0x0000989c, 0x00000000 },
548 {0x0000989c, 0x00000000 },
549 {0x0000989c, 0x00000000 },
550 {0x0000989c, 0x00000000 },
551 {0x0000989c, 0x00000010 },
552 {0x0000989c, 0x00000000 },
553 {0x0000989c, 0x00000000 },
554 {0x0000989c, 0x00000000 },
555 {0x0000989c, 0x00000000 },
556 {0x0000989c, 0x00000000 },
557 {0x0000989c, 0x00000000 },
558 {0x0000989c, 0x00000000 },
559 {0x0000989c, 0x00000000 },
560 {0x0000989c, 0x00000000 },
561 {0x0000989c, 0x00000000 },
562 {0x0000989c, 0x00000000 },
563 {0x0000989c, 0x00000000 },
564 {0x0000989c, 0x00000000 },
565 {0x0000989c, 0x000000c0 },
566 {0x0000989c, 0x00000015 },
567 {0x0000989c, 0x00000000 },
568 {0x0000989c, 0x00000000 },
569 {0x0000989c, 0x00000000 },
570 {0x0000989c, 0x00000000 },
571 {0x0000989c, 0x00000000 },
572 {0x0000989c, 0x00000000 },
573 {0x0000989c, 0x00000000 },
574 {0x0000989c, 0x00000000 },
575 {0x000098cc, 0x00000000 },
576};
577
578static const u32 ar5416Modes_9160[][6] = {
579 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
580 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
581 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
582 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
583 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
584 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
585 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
586 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
587 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
588 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
589 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
590 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
591 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
592 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
593 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
594 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
595 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
596 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
597 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
598 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
599 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
600 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
601 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
602 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
603 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
604 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
605 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
606 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
607 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
608 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
609 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
610 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
611 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
612 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
613 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
614 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
615 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
616 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
617 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
618 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
619 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
620 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
621 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
622 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
623 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
624 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
625 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
626 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
627 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
628 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
629 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
630 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
631 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
632 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
633 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
634 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
635 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
636 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
637 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
638 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
639 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
640 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
641};
642
643static const u32 ar5416Common_9160[][2] = {
644 { 0x0000000c, 0x00000000 },
645 { 0x00000030, 0x00020015 },
646 { 0x00000034, 0x00000005 },
647 { 0x00000040, 0x00000000 },
648 { 0x00000044, 0x00000008 },
649 { 0x00000048, 0x00000008 },
650 { 0x0000004c, 0x00000010 },
651 { 0x00000050, 0x00000000 },
652 { 0x00000054, 0x0000001f },
653 { 0x00000800, 0x00000000 },
654 { 0x00000804, 0x00000000 },
655 { 0x00000808, 0x00000000 },
656 { 0x0000080c, 0x00000000 },
657 { 0x00000810, 0x00000000 },
658 { 0x00000814, 0x00000000 },
659 { 0x00000818, 0x00000000 },
660 { 0x0000081c, 0x00000000 },
661 { 0x00000820, 0x00000000 },
662 { 0x00000824, 0x00000000 },
663 { 0x00001040, 0x002ffc0f },
664 { 0x00001044, 0x002ffc0f },
665 { 0x00001048, 0x002ffc0f },
666 { 0x0000104c, 0x002ffc0f },
667 { 0x00001050, 0x002ffc0f },
668 { 0x00001054, 0x002ffc0f },
669 { 0x00001058, 0x002ffc0f },
670 { 0x0000105c, 0x002ffc0f },
671 { 0x00001060, 0x002ffc0f },
672 { 0x00001064, 0x002ffc0f },
673 { 0x00001230, 0x00000000 },
674 { 0x00001270, 0x00000000 },
675 { 0x00001038, 0x00000000 },
676 { 0x00001078, 0x00000000 },
677 { 0x000010b8, 0x00000000 },
678 { 0x000010f8, 0x00000000 },
679 { 0x00001138, 0x00000000 },
680 { 0x00001178, 0x00000000 },
681 { 0x000011b8, 0x00000000 },
682 { 0x000011f8, 0x00000000 },
683 { 0x00001238, 0x00000000 },
684 { 0x00001278, 0x00000000 },
685 { 0x000012b8, 0x00000000 },
686 { 0x000012f8, 0x00000000 },
687 { 0x00001338, 0x00000000 },
688 { 0x00001378, 0x00000000 },
689 { 0x000013b8, 0x00000000 },
690 { 0x000013f8, 0x00000000 },
691 { 0x00001438, 0x00000000 },
692 { 0x00001478, 0x00000000 },
693 { 0x000014b8, 0x00000000 },
694 { 0x000014f8, 0x00000000 },
695 { 0x00001538, 0x00000000 },
696 { 0x00001578, 0x00000000 },
697 { 0x000015b8, 0x00000000 },
698 { 0x000015f8, 0x00000000 },
699 { 0x00001638, 0x00000000 },
700 { 0x00001678, 0x00000000 },
701 { 0x000016b8, 0x00000000 },
702 { 0x000016f8, 0x00000000 },
703 { 0x00001738, 0x00000000 },
704 { 0x00001778, 0x00000000 },
705 { 0x000017b8, 0x00000000 },
706 { 0x000017f8, 0x00000000 },
707 { 0x0000103c, 0x00000000 },
708 { 0x0000107c, 0x00000000 },
709 { 0x000010bc, 0x00000000 },
710 { 0x000010fc, 0x00000000 },
711 { 0x0000113c, 0x00000000 },
712 { 0x0000117c, 0x00000000 },
713 { 0x000011bc, 0x00000000 },
714 { 0x000011fc, 0x00000000 },
715 { 0x0000123c, 0x00000000 },
716 { 0x0000127c, 0x00000000 },
717 { 0x000012bc, 0x00000000 },
718 { 0x000012fc, 0x00000000 },
719 { 0x0000133c, 0x00000000 },
720 { 0x0000137c, 0x00000000 },
721 { 0x000013bc, 0x00000000 },
722 { 0x000013fc, 0x00000000 },
723 { 0x0000143c, 0x00000000 },
724 { 0x0000147c, 0x00000000 },
725 { 0x00004030, 0x00000002 },
726 { 0x0000403c, 0x00000002 },
727 { 0x00007010, 0x00000020 },
728 { 0x00007038, 0x000004c2 },
729 { 0x00008004, 0x00000000 },
730 { 0x00008008, 0x00000000 },
731 { 0x0000800c, 0x00000000 },
732 { 0x00008018, 0x00000700 },
733 { 0x00008020, 0x00000000 },
734 { 0x00008038, 0x00000000 },
735 { 0x0000803c, 0x00000000 },
736 { 0x00008048, 0x40000000 },
737 { 0x00008054, 0x00000000 },
738 { 0x00008058, 0x00000000 },
739 { 0x0000805c, 0x000fc78f },
740 { 0x00008060, 0x0000000f },
741 { 0x00008064, 0x00000000 },
742 { 0x000080c0, 0x2a82301a },
743 { 0x000080c4, 0x05dc01e0 },
744 { 0x000080c8, 0x1f402710 },
745 { 0x000080cc, 0x01f40000 },
746 { 0x000080d0, 0x00001e00 },
747 { 0x000080d4, 0x00000000 },
748 { 0x000080d8, 0x00400000 },
749 { 0x000080e0, 0xffffffff },
750 { 0x000080e4, 0x0000ffff },
751 { 0x000080e8, 0x003f3f3f },
752 { 0x000080ec, 0x00000000 },
753 { 0x000080f0, 0x00000000 },
754 { 0x000080f4, 0x00000000 },
755 { 0x000080f8, 0x00000000 },
756 { 0x000080fc, 0x00020000 },
757 { 0x00008100, 0x00020000 },
758 { 0x00008104, 0x00000001 },
759 { 0x00008108, 0x00000052 },
760 { 0x0000810c, 0x00000000 },
761 { 0x00008110, 0x00000168 },
762 { 0x00008118, 0x000100aa },
763 { 0x0000811c, 0x00003210 },
764 { 0x00008120, 0x08f04800 },
765 { 0x00008124, 0x00000000 },
766 { 0x00008128, 0x00000000 },
767 { 0x0000812c, 0x00000000 },
768 { 0x00008130, 0x00000000 },
769 { 0x00008134, 0x00000000 },
770 { 0x00008138, 0x00000000 },
771 { 0x0000813c, 0x00000000 },
772 { 0x00008144, 0xffffffff },
773 { 0x00008168, 0x00000000 },
774 { 0x0000816c, 0x00000000 },
775 { 0x00008170, 0x32143320 },
776 { 0x00008174, 0xfaa4fa50 },
777 { 0x00008178, 0x00000100 },
778 { 0x0000817c, 0x00000000 },
779 { 0x000081c4, 0x00000000 },
780 { 0x000081d0, 0x00003210 },
781 { 0x000081ec, 0x00000000 },
782 { 0x000081f0, 0x00000000 },
783 { 0x000081f4, 0x00000000 },
784 { 0x000081f8, 0x00000000 },
785 { 0x000081fc, 0x00000000 },
786 { 0x00008200, 0x00000000 },
787 { 0x00008204, 0x00000000 },
788 { 0x00008208, 0x00000000 },
789 { 0x0000820c, 0x00000000 },
790 { 0x00008210, 0x00000000 },
791 { 0x00008214, 0x00000000 },
792 { 0x00008218, 0x00000000 },
793 { 0x0000821c, 0x00000000 },
794 { 0x00008220, 0x00000000 },
795 { 0x00008224, 0x00000000 },
796 { 0x00008228, 0x00000000 },
797 { 0x0000822c, 0x00000000 },
798 { 0x00008230, 0x00000000 },
799 { 0x00008234, 0x00000000 },
800 { 0x00008238, 0x00000000 },
801 { 0x0000823c, 0x00000000 },
802 { 0x00008240, 0x00100000 },
803 { 0x00008244, 0x0010f400 },
804 { 0x00008248, 0x00000100 },
805 { 0x0000824c, 0x0001e800 },
806 { 0x00008250, 0x00000000 },
807 { 0x00008254, 0x00000000 },
808 { 0x00008258, 0x00000000 },
809 { 0x0000825c, 0x400000ff },
810 { 0x00008260, 0x00080922 },
811 { 0x00008270, 0x00000000 },
812 { 0x00008274, 0x40000000 },
813 { 0x00008278, 0x003e4180 },
814 { 0x0000827c, 0x00000000 },
815 { 0x00008284, 0x0000002c },
816 { 0x00008288, 0x0000002c },
817 { 0x0000828c, 0x00000000 },
818 { 0x00008294, 0x00000000 },
819 { 0x00008298, 0x00000000 },
820 { 0x00008300, 0x00000000 },
821 { 0x00008304, 0x00000000 },
822 { 0x00008308, 0x00000000 },
823 { 0x0000830c, 0x00000000 },
824 { 0x00008310, 0x00000000 },
825 { 0x00008314, 0x00000000 },
826 { 0x00008318, 0x00000000 },
827 { 0x00008328, 0x00000000 },
828 { 0x0000832c, 0x00000007 },
829 { 0x00008330, 0x00000302 },
830 { 0x00008334, 0x00000e00 },
831 { 0x00008338, 0x00ff0000 },
832 { 0x0000833c, 0x00000000 },
833 { 0x00008340, 0x000107ff },
834 { 0x00009808, 0x00000000 },
835 { 0x0000980c, 0xad848e19 },
836 { 0x00009810, 0x7d14e000 },
837 { 0x00009814, 0x9c0a9f6b },
838 { 0x0000981c, 0x00000000 },
839 { 0x0000982c, 0x0000a000 },
840 { 0x00009830, 0x00000000 },
841 { 0x0000983c, 0x00200400 },
842 { 0x00009840, 0x206a01ae },
843 { 0x0000984c, 0x1284233c },
844 { 0x00009854, 0x00000859 },
845 { 0x00009900, 0x00000000 },
846 { 0x00009904, 0x00000000 },
847 { 0x00009908, 0x00000000 },
848 { 0x0000990c, 0x00000000 },
849 { 0x0000991c, 0x10000fff },
850 { 0x00009920, 0x05100000 },
851 { 0x0000a920, 0x05100000 },
852 { 0x0000b920, 0x05100000 },
853 { 0x00009928, 0x00000001 },
854 { 0x0000992c, 0x00000004 },
855 { 0x00009934, 0x1e1f2022 },
856 { 0x00009938, 0x0a0b0c0d },
857 { 0x0000993c, 0x00000000 },
858 { 0x00009948, 0x9280b212 },
859 { 0x0000994c, 0x00020028 },
860 { 0x00009954, 0x5f3ca3de },
861 { 0x00009958, 0x2108ecff },
862 { 0x00009940, 0x00750604 },
863 { 0x0000c95c, 0x004b6a8e },
864 { 0x00009970, 0x190fb515 },
865 { 0x00009974, 0x00000000 },
866 { 0x00009978, 0x00000001 },
867 { 0x0000997c, 0x00000000 },
868 { 0x00009980, 0x00000000 },
869 { 0x00009984, 0x00000000 },
870 { 0x00009988, 0x00000000 },
871 { 0x0000998c, 0x00000000 },
872 { 0x00009990, 0x00000000 },
873 { 0x00009994, 0x00000000 },
874 { 0x00009998, 0x00000000 },
875 { 0x0000999c, 0x00000000 },
876 { 0x000099a0, 0x00000000 },
877 { 0x000099a4, 0x00000001 },
878 { 0x000099a8, 0x201fff00 },
879 { 0x000099ac, 0x006f0000 },
880 { 0x000099b0, 0x03051000 },
881 { 0x000099dc, 0x00000000 },
882 { 0x000099e0, 0x00000200 },
883 { 0x000099e4, 0xaaaaaaaa },
884 { 0x000099e8, 0x3c466478 },
885 { 0x000099ec, 0x0cc80caa },
886 { 0x000099fc, 0x00001042 },
887 { 0x00009b00, 0x00000000 },
888 { 0x00009b04, 0x00000001 },
889 { 0x00009b08, 0x00000002 },
890 { 0x00009b0c, 0x00000003 },
891 { 0x00009b10, 0x00000004 },
892 { 0x00009b14, 0x00000005 },
893 { 0x00009b18, 0x00000008 },
894 { 0x00009b1c, 0x00000009 },
895 { 0x00009b20, 0x0000000a },
896 { 0x00009b24, 0x0000000b },
897 { 0x00009b28, 0x0000000c },
898 { 0x00009b2c, 0x0000000d },
899 { 0x00009b30, 0x00000010 },
900 { 0x00009b34, 0x00000011 },
901 { 0x00009b38, 0x00000012 },
902 { 0x00009b3c, 0x00000013 },
903 { 0x00009b40, 0x00000014 },
904 { 0x00009b44, 0x00000015 },
905 { 0x00009b48, 0x00000018 },
906 { 0x00009b4c, 0x00000019 },
907 { 0x00009b50, 0x0000001a },
908 { 0x00009b54, 0x0000001b },
909 { 0x00009b58, 0x0000001c },
910 { 0x00009b5c, 0x0000001d },
911 { 0x00009b60, 0x00000020 },
912 { 0x00009b64, 0x00000021 },
913 { 0x00009b68, 0x00000022 },
914 { 0x00009b6c, 0x00000023 },
915 { 0x00009b70, 0x00000024 },
916 { 0x00009b74, 0x00000025 },
917 { 0x00009b78, 0x00000028 },
918 { 0x00009b7c, 0x00000029 },
919 { 0x00009b80, 0x0000002a },
920 { 0x00009b84, 0x0000002b },
921 { 0x00009b88, 0x0000002c },
922 { 0x00009b8c, 0x0000002d },
923 { 0x00009b90, 0x00000030 },
924 { 0x00009b94, 0x00000031 },
925 { 0x00009b98, 0x00000032 },
926 { 0x00009b9c, 0x00000033 },
927 { 0x00009ba0, 0x00000034 },
928 { 0x00009ba4, 0x00000035 },
929 { 0x00009ba8, 0x00000035 },
930 { 0x00009bac, 0x00000035 },
931 { 0x00009bb0, 0x00000035 },
932 { 0x00009bb4, 0x00000035 },
933 { 0x00009bb8, 0x00000035 },
934 { 0x00009bbc, 0x00000035 },
935 { 0x00009bc0, 0x00000035 },
936 { 0x00009bc4, 0x00000035 },
937 { 0x00009bc8, 0x00000035 },
938 { 0x00009bcc, 0x00000035 },
939 { 0x00009bd0, 0x00000035 },
940 { 0x00009bd4, 0x00000035 },
941 { 0x00009bd8, 0x00000035 },
942 { 0x00009bdc, 0x00000035 },
943 { 0x00009be0, 0x00000035 },
944 { 0x00009be4, 0x00000035 },
945 { 0x00009be8, 0x00000035 },
946 { 0x00009bec, 0x00000035 },
947 { 0x00009bf0, 0x00000035 },
948 { 0x00009bf4, 0x00000035 },
949 { 0x00009bf8, 0x00000010 },
950 { 0x00009bfc, 0x0000001a },
951 { 0x0000a210, 0x40806333 },
952 { 0x0000a214, 0x00106c10 },
953 { 0x0000a218, 0x009c4060 },
954 { 0x0000a220, 0x018830c6 },
955 { 0x0000a224, 0x00000400 },
956 { 0x0000a228, 0x001a0bb5 },
957 { 0x0000a22c, 0x00000000 },
958 { 0x0000a234, 0x20202020 },
959 { 0x0000a238, 0x20202020 },
960 { 0x0000a23c, 0x13c889af },
961 { 0x0000a240, 0x38490a20 },
962 { 0x0000a244, 0x00007bb6 },
963 { 0x0000a248, 0x0fff3ffc },
964 { 0x0000a24c, 0x00000001 },
965 { 0x0000a250, 0x0000e000 },
966 { 0x0000a254, 0x00000000 },
967 { 0x0000a258, 0x0cc75380 },
968 { 0x0000a25c, 0x0f0f0f01 },
969 { 0x0000a260, 0xdfa91f01 },
970 { 0x0000a268, 0x00000001 },
971 { 0x0000a26c, 0x0ebae9c6 },
972 { 0x0000b26c, 0x0ebae9c6 },
973 { 0x0000c26c, 0x0ebae9c6 },
974 { 0x0000d270, 0x00820820 },
975 { 0x0000a278, 0x1ce739ce },
976 { 0x0000a27c, 0x050701ce },
977 { 0x0000a338, 0x00000000 },
978 { 0x0000a33c, 0x00000000 },
979 { 0x0000a340, 0x00000000 },
980 { 0x0000a344, 0x00000000 },
981 { 0x0000a348, 0x3fffffff },
982 { 0x0000a34c, 0x3fffffff },
983 { 0x0000a350, 0x3fffffff },
984 { 0x0000a354, 0x0003ffff },
985 { 0x0000a358, 0x79bfaa03 },
986 { 0x0000d35c, 0x07ffffef },
987 { 0x0000d360, 0x0fffffe7 },
988 { 0x0000d364, 0x17ffffe5 },
989 { 0x0000d368, 0x1fffffe4 },
990 { 0x0000d36c, 0x37ffffe3 },
991 { 0x0000d370, 0x3fffffe3 },
992 { 0x0000d374, 0x57ffffe3 },
993 { 0x0000d378, 0x5fffffe2 },
994 { 0x0000d37c, 0x7fffffe2 },
995 { 0x0000d380, 0x7f3c7bba },
996 { 0x0000d384, 0xf3307ff0 },
997 { 0x0000a388, 0x0c000000 },
998 { 0x0000a38c, 0x20202020 },
999 { 0x0000a390, 0x20202020 },
1000 { 0x0000a394, 0x1ce739ce },
1001 { 0x0000a398, 0x000001ce },
1002 { 0x0000a39c, 0x00000001 },
1003 { 0x0000a3a0, 0x00000000 },
1004 { 0x0000a3a4, 0x00000000 },
1005 { 0x0000a3a8, 0x00000000 },
1006 { 0x0000a3ac, 0x00000000 },
1007 { 0x0000a3b0, 0x00000000 },
1008 { 0x0000a3b4, 0x00000000 },
1009 { 0x0000a3b8, 0x00000000 },
1010 { 0x0000a3bc, 0x00000000 },
1011 { 0x0000a3c0, 0x00000000 },
1012 { 0x0000a3c4, 0x00000000 },
1013 { 0x0000a3c8, 0x00000246 },
1014 { 0x0000a3cc, 0x20202020 },
1015 { 0x0000a3d0, 0x20202020 },
1016 { 0x0000a3d4, 0x20202020 },
1017 { 0x0000a3dc, 0x1ce739ce },
1018 { 0x0000a3e0, 0x000001ce },
1019};
1020
1021static const u32 ar5416Bank0_9160[][2] = {
1022 { 0x000098b0, 0x1e5795e5 },
1023 { 0x000098e0, 0x02008020 },
1024};
1025
1026static const u32 ar5416BB_RfGain_9160[][3] = {
1027 { 0x00009a00, 0x00000000, 0x00000000 },
1028 { 0x00009a04, 0x00000040, 0x00000040 },
1029 { 0x00009a08, 0x00000080, 0x00000080 },
1030 { 0x00009a0c, 0x000001a1, 0x00000141 },
1031 { 0x00009a10, 0x000001e1, 0x00000181 },
1032 { 0x00009a14, 0x00000021, 0x000001c1 },
1033 { 0x00009a18, 0x00000061, 0x00000001 },
1034 { 0x00009a1c, 0x00000168, 0x00000041 },
1035 { 0x00009a20, 0x000001a8, 0x000001a8 },
1036 { 0x00009a24, 0x000001e8, 0x000001e8 },
1037 { 0x00009a28, 0x00000028, 0x00000028 },
1038 { 0x00009a2c, 0x00000068, 0x00000068 },
1039 { 0x00009a30, 0x00000189, 0x000000a8 },
1040 { 0x00009a34, 0x000001c9, 0x00000169 },
1041 { 0x00009a38, 0x00000009, 0x000001a9 },
1042 { 0x00009a3c, 0x00000049, 0x000001e9 },
1043 { 0x00009a40, 0x00000089, 0x00000029 },
1044 { 0x00009a44, 0x00000170, 0x00000069 },
1045 { 0x00009a48, 0x000001b0, 0x00000190 },
1046 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1047 { 0x00009a50, 0x00000030, 0x00000010 },
1048 { 0x00009a54, 0x00000070, 0x00000050 },
1049 { 0x00009a58, 0x00000191, 0x00000090 },
1050 { 0x00009a5c, 0x000001d1, 0x00000151 },
1051 { 0x00009a60, 0x00000011, 0x00000191 },
1052 { 0x00009a64, 0x00000051, 0x000001d1 },
1053 { 0x00009a68, 0x00000091, 0x00000011 },
1054 { 0x00009a6c, 0x000001b8, 0x00000051 },
1055 { 0x00009a70, 0x000001f8, 0x00000198 },
1056 { 0x00009a74, 0x00000038, 0x000001d8 },
1057 { 0x00009a78, 0x00000078, 0x00000018 },
1058 { 0x00009a7c, 0x00000199, 0x00000058 },
1059 { 0x00009a80, 0x000001d9, 0x00000098 },
1060 { 0x00009a84, 0x00000019, 0x00000159 },
1061 { 0x00009a88, 0x00000059, 0x00000199 },
1062 { 0x00009a8c, 0x00000099, 0x000001d9 },
1063 { 0x00009a90, 0x000000d9, 0x00000019 },
1064 { 0x00009a94, 0x000000f9, 0x00000059 },
1065 { 0x00009a98, 0x000000f9, 0x00000099 },
1066 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1067 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1068 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1069 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1070 { 0x00009aac, 0x000000f9, 0x000000f9 },
1071 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1072 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1073 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1074 { 0x00009abc, 0x000000f9, 0x000000f9 },
1075 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1076 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1077 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1078 { 0x00009acc, 0x000000f9, 0x000000f9 },
1079 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1080 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1081 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1082 { 0x00009adc, 0x000000f9, 0x000000f9 },
1083 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1084 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1085 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1086 { 0x00009aec, 0x000000f9, 0x000000f9 },
1087 { 0x00009af0, 0x000000f9, 0x000000f9 },
1088 { 0x00009af4, 0x000000f9, 0x000000f9 },
1089 { 0x00009af8, 0x000000f9, 0x000000f9 },
1090 { 0x00009afc, 0x000000f9, 0x000000f9 },
1091};
1092
1093static const u32 ar5416Bank1_9160[][2] = {
1094 { 0x000098b0, 0x02108421 },
1095 { 0x000098ec, 0x00000008 },
1096};
1097
1098static const u32 ar5416Bank2_9160[][2] = {
1099 { 0x000098b0, 0x0e73ff17 },
1100 { 0x000098e0, 0x00000420 },
1101};
1102
1103static const u32 ar5416Bank3_9160[][3] = {
1104 { 0x000098f0, 0x01400018, 0x01c00018 },
1105};
1106
1107static const u32 ar5416Bank6_9160[][3] = {
1108 { 0x0000989c, 0x00000000, 0x00000000 },
1109 { 0x0000989c, 0x00000000, 0x00000000 },
1110 { 0x0000989c, 0x00000000, 0x00000000 },
1111 { 0x0000989c, 0x00e00000, 0x00e00000 },
1112 { 0x0000989c, 0x005e0000, 0x005e0000 },
1113 { 0x0000989c, 0x00120000, 0x00120000 },
1114 { 0x0000989c, 0x00620000, 0x00620000 },
1115 { 0x0000989c, 0x00020000, 0x00020000 },
1116 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1117 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1118 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1119 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1120 { 0x0000989c, 0x005f0000, 0x005f0000 },
1121 { 0x0000989c, 0x00870000, 0x00870000 },
1122 { 0x0000989c, 0x00f90000, 0x00f90000 },
1123 { 0x0000989c, 0x007b0000, 0x007b0000 },
1124 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1125 { 0x0000989c, 0x00f50000, 0x00f50000 },
1126 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1127 { 0x0000989c, 0x00110000, 0x00110000 },
1128 { 0x0000989c, 0x006100a8, 0x006100a8 },
1129 { 0x0000989c, 0x004210a2, 0x004210a2 },
1130 { 0x0000989c, 0x0014008f, 0x0014008f },
1131 { 0x0000989c, 0x00c40003, 0x00c40003 },
1132 { 0x0000989c, 0x003000f2, 0x003000f2 },
1133 { 0x0000989c, 0x00440016, 0x00440016 },
1134 { 0x0000989c, 0x00410040, 0x00410040 },
1135 { 0x0000989c, 0x0001805e, 0x0001805e },
1136 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1137 { 0x0000989c, 0x000000f1, 0x000000f1 },
1138 { 0x0000989c, 0x00002081, 0x00002081 },
1139 { 0x0000989c, 0x000000d4, 0x000000d4 },
1140 { 0x000098d0, 0x0000000f, 0x0010000f },
1141};
1142
1143static const u32 ar5416Bank6TPC_9160[][3] = {
1144 { 0x0000989c, 0x00000000, 0x00000000 },
1145 { 0x0000989c, 0x00000000, 0x00000000 },
1146 { 0x0000989c, 0x00000000, 0x00000000 },
1147 { 0x0000989c, 0x00e00000, 0x00e00000 },
1148 { 0x0000989c, 0x005e0000, 0x005e0000 },
1149 { 0x0000989c, 0x00120000, 0x00120000 },
1150 { 0x0000989c, 0x00620000, 0x00620000 },
1151 { 0x0000989c, 0x00020000, 0x00020000 },
1152 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1153 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1154 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1155 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1156 { 0x0000989c, 0x005f0000, 0x005f0000 },
1157 { 0x0000989c, 0x00870000, 0x00870000 },
1158 { 0x0000989c, 0x00f90000, 0x00f90000 },
1159 { 0x0000989c, 0x007b0000, 0x007b0000 },
1160 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1161 { 0x0000989c, 0x00f50000, 0x00f50000 },
1162 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1163 { 0x0000989c, 0x00110000, 0x00110000 },
1164 { 0x0000989c, 0x006100a8, 0x006100a8 },
1165 { 0x0000989c, 0x00423022, 0x00423022 },
1166 { 0x0000989c, 0x2014008f, 0x2014008f },
1167 { 0x0000989c, 0x00c40002, 0x00c40002 },
1168 { 0x0000989c, 0x003000f2, 0x003000f2 },
1169 { 0x0000989c, 0x00440016, 0x00440016 },
1170 { 0x0000989c, 0x00410040, 0x00410040 },
1171 { 0x0000989c, 0x0001805e, 0x0001805e },
1172 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1173 { 0x0000989c, 0x000000e1, 0x000000e1 },
1174 { 0x0000989c, 0x00007080, 0x00007080 },
1175 { 0x0000989c, 0x000000d4, 0x000000d4 },
1176 { 0x000098d0, 0x0000000f, 0x0010000f },
1177};
1178
1179static const u32 ar5416Bank7_9160[][2] = {
1180 { 0x0000989c, 0x00000500 },
1181 { 0x0000989c, 0x00000800 },
1182 { 0x000098cc, 0x0000000e },
1183};
1184
1185static const u32 ar5416Addac_9160[][2] = {
1186 {0x0000989c, 0x00000000 },
1187 {0x0000989c, 0x00000000 },
1188 {0x0000989c, 0x00000000 },
1189 {0x0000989c, 0x00000000 },
1190 {0x0000989c, 0x00000000 },
1191 {0x0000989c, 0x00000000 },
1192 {0x0000989c, 0x000000c0 },
1193 {0x0000989c, 0x00000018 },
1194 {0x0000989c, 0x00000004 },
1195 {0x0000989c, 0x00000000 },
1196 {0x0000989c, 0x00000000 },
1197 {0x0000989c, 0x00000000 },
1198 {0x0000989c, 0x00000000 },
1199 {0x0000989c, 0x00000000 },
1200 {0x0000989c, 0x00000000 },
1201 {0x0000989c, 0x00000000 },
1202 {0x0000989c, 0x00000000 },
1203 {0x0000989c, 0x00000000 },
1204 {0x0000989c, 0x00000000 },
1205 {0x0000989c, 0x00000000 },
1206 {0x0000989c, 0x00000000 },
1207 {0x0000989c, 0x000000c0 },
1208 {0x0000989c, 0x00000019 },
1209 {0x0000989c, 0x00000004 },
1210 {0x0000989c, 0x00000000 },
1211 {0x0000989c, 0x00000000 },
1212 {0x0000989c, 0x00000000 },
1213 {0x0000989c, 0x00000004 },
1214 {0x0000989c, 0x00000003 },
1215 {0x0000989c, 0x00000008 },
1216 {0x0000989c, 0x00000000 },
1217 {0x000098cc, 0x00000000 },
1218};
1219
1220static const u32 ar5416Addac_91601_1[][2] = {
1221 {0x0000989c, 0x00000000 },
1222 {0x0000989c, 0x00000000 },
1223 {0x0000989c, 0x00000000 },
1224 {0x0000989c, 0x00000000 },
1225 {0x0000989c, 0x00000000 },
1226 {0x0000989c, 0x00000000 },
1227 {0x0000989c, 0x000000c0 },
1228 {0x0000989c, 0x00000018 },
1229 {0x0000989c, 0x00000004 },
1230 {0x0000989c, 0x00000000 },
1231 {0x0000989c, 0x00000000 },
1232 {0x0000989c, 0x00000000 },
1233 {0x0000989c, 0x00000000 },
1234 {0x0000989c, 0x00000000 },
1235 {0x0000989c, 0x00000000 },
1236 {0x0000989c, 0x00000000 },
1237 {0x0000989c, 0x00000000 },
1238 {0x0000989c, 0x00000000 },
1239 {0x0000989c, 0x00000000 },
1240 {0x0000989c, 0x00000000 },
1241 {0x0000989c, 0x00000000 },
1242 {0x0000989c, 0x000000c0 },
1243 {0x0000989c, 0x00000019 },
1244 {0x0000989c, 0x00000004 },
1245 {0x0000989c, 0x00000000 },
1246 {0x0000989c, 0x00000000 },
1247 {0x0000989c, 0x00000000 },
1248 {0x0000989c, 0x00000000 },
1249 {0x0000989c, 0x00000000 },
1250 {0x0000989c, 0x00000000 },
1251 {0x0000989c, 0x00000000 },
1252 {0x000098cc, 0x00000000 },
1253};
1254
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
new file mode 100644
index 000000000000..5fdbb53b47e0
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -0,0 +1,1000 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "ar9002_phy.h"
20
21#define AR9285_CLCAL_REDO_THRESH 1
22
23static void ar9002_hw_setup_calibration(struct ath_hw *ah,
24 struct ath9k_cal_list *currCal)
25{
26 struct ath_common *common = ath9k_hw_common(ah);
27
28 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
29 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
30 currCal->calData->calCountMax);
31
32 switch (currCal->calData->calType) {
33 case IQ_MISMATCH_CAL:
34 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
35 ath_print(common, ATH_DBG_CALIBRATE,
36 "starting IQ Mismatch Calibration\n");
37 break;
38 case ADC_GAIN_CAL:
39 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
40 ath_print(common, ATH_DBG_CALIBRATE,
41 "starting ADC Gain Calibration\n");
42 break;
43 case ADC_DC_CAL:
44 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
45 ath_print(common, ATH_DBG_CALIBRATE,
46 "starting ADC DC Calibration\n");
47 break;
48 case ADC_DC_INIT_CAL:
49 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
50 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Init ADC DC Calibration\n");
52 break;
53 case TEMP_COMP_CAL:
54 break; /* Not supported */
55 }
56
57 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
58 AR_PHY_TIMING_CTRL4_DO_CAL);
59}
60
61static bool ar9002_hw_per_calibration(struct ath_hw *ah,
62 struct ath9k_channel *ichan,
63 u8 rxchainmask,
64 struct ath9k_cal_list *currCal)
65{
66 bool iscaldone = false;
67
68 if (currCal->calState == CAL_RUNNING) {
69 if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
70 AR_PHY_TIMING_CTRL4_DO_CAL)) {
71
72 currCal->calData->calCollect(ah);
73 ah->cal_samples++;
74
75 if (ah->cal_samples >=
76 currCal->calData->calNumSamples) {
77 int i, numChains = 0;
78 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
79 if (rxchainmask & (1 << i))
80 numChains++;
81 }
82
83 currCal->calData->calPostProc(ah, numChains);
84 ichan->CalValid |= currCal->calData->calType;
85 currCal->calState = CAL_DONE;
86 iscaldone = true;
87 } else {
88 ar9002_hw_setup_calibration(ah, currCal);
89 }
90 }
91 } else if (!(ichan->CalValid & currCal->calData->calType)) {
92 ath9k_hw_reset_calibration(ah, currCal);
93 }
94
95 return iscaldone;
96}
97
98/* Assumes you are talking about the currently configured channel */
99static bool ar9002_hw_iscal_supported(struct ath_hw *ah,
100 enum ath9k_cal_types calType)
101{
102 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
103
104 switch (calType & ah->supp_cals) {
105 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
106 return true;
107 case ADC_GAIN_CAL:
108 case ADC_DC_CAL:
109 if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
110 conf_is_ht20(conf)))
111 return true;
112 break;
113 }
114 return false;
115}
116
117static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
118{
119 int i;
120
121 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
122 ah->totalPowerMeasI[i] +=
123 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
124 ah->totalPowerMeasQ[i] +=
125 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
126 ah->totalIqCorrMeas[i] +=
127 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
128 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
129 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
130 ah->cal_samples, i, ah->totalPowerMeasI[i],
131 ah->totalPowerMeasQ[i],
132 ah->totalIqCorrMeas[i]);
133 }
134}
135
136static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
137{
138 int i;
139
140 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
141 ah->totalAdcIOddPhase[i] +=
142 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
143 ah->totalAdcIEvenPhase[i] +=
144 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
145 ah->totalAdcQOddPhase[i] +=
146 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
147 ah->totalAdcQEvenPhase[i] +=
148 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
149
150 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
151 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
152 "oddq=0x%08x; evenq=0x%08x;\n",
153 ah->cal_samples, i,
154 ah->totalAdcIOddPhase[i],
155 ah->totalAdcIEvenPhase[i],
156 ah->totalAdcQOddPhase[i],
157 ah->totalAdcQEvenPhase[i]);
158 }
159}
160
161static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
162{
163 int i;
164
165 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
166 ah->totalAdcDcOffsetIOddPhase[i] +=
167 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
168 ah->totalAdcDcOffsetIEvenPhase[i] +=
169 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
170 ah->totalAdcDcOffsetQOddPhase[i] +=
171 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
172 ah->totalAdcDcOffsetQEvenPhase[i] +=
173 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
174
175 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
176 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
177 "oddq=0x%08x; evenq=0x%08x;\n",
178 ah->cal_samples, i,
179 ah->totalAdcDcOffsetIOddPhase[i],
180 ah->totalAdcDcOffsetIEvenPhase[i],
181 ah->totalAdcDcOffsetQOddPhase[i],
182 ah->totalAdcDcOffsetQEvenPhase[i]);
183 }
184}
185
186static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
187{
188 struct ath_common *common = ath9k_hw_common(ah);
189 u32 powerMeasQ, powerMeasI, iqCorrMeas;
190 u32 qCoffDenom, iCoffDenom;
191 int32_t qCoff, iCoff;
192 int iqCorrNeg, i;
193
194 for (i = 0; i < numChains; i++) {
195 powerMeasI = ah->totalPowerMeasI[i];
196 powerMeasQ = ah->totalPowerMeasQ[i];
197 iqCorrMeas = ah->totalIqCorrMeas[i];
198
199 ath_print(common, ATH_DBG_CALIBRATE,
200 "Starting IQ Cal and Correction for Chain %d\n",
201 i);
202
203 ath_print(common, ATH_DBG_CALIBRATE,
204 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
205 i, ah->totalIqCorrMeas[i]);
206
207 iqCorrNeg = 0;
208
209 if (iqCorrMeas > 0x80000000) {
210 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
211 iqCorrNeg = 1;
212 }
213
214 ath_print(common, ATH_DBG_CALIBRATE,
215 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
216 ath_print(common, ATH_DBG_CALIBRATE,
217 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
218 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
219 iqCorrNeg);
220
221 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
222 qCoffDenom = powerMeasQ / 64;
223
224 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
225 (qCoffDenom != 0)) {
226 iCoff = iqCorrMeas / iCoffDenom;
227 qCoff = powerMeasI / qCoffDenom - 64;
228 ath_print(common, ATH_DBG_CALIBRATE,
229 "Chn %d iCoff = 0x%08x\n", i, iCoff);
230 ath_print(common, ATH_DBG_CALIBRATE,
231 "Chn %d qCoff = 0x%08x\n", i, qCoff);
232
233 iCoff = iCoff & 0x3f;
234 ath_print(common, ATH_DBG_CALIBRATE,
235 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
236 if (iqCorrNeg == 0x0)
237 iCoff = 0x40 - iCoff;
238
239 if (qCoff > 15)
240 qCoff = 15;
241 else if (qCoff <= -16)
242 qCoff = 16;
243
244 ath_print(common, ATH_DBG_CALIBRATE,
245 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
246 i, iCoff, qCoff);
247
248 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
249 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
250 iCoff);
251 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
252 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
253 qCoff);
254 ath_print(common, ATH_DBG_CALIBRATE,
255 "IQ Cal and Correction done for Chain %d\n",
256 i);
257 }
258 }
259
260 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
261 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
262}
263
264static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
265{
266 struct ath_common *common = ath9k_hw_common(ah);
267 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
268 u32 qGainMismatch, iGainMismatch, val, i;
269
270 for (i = 0; i < numChains; i++) {
271 iOddMeasOffset = ah->totalAdcIOddPhase[i];
272 iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
273 qOddMeasOffset = ah->totalAdcQOddPhase[i];
274 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
275
276 ath_print(common, ATH_DBG_CALIBRATE,
277 "Starting ADC Gain Cal for Chain %d\n", i);
278
279 ath_print(common, ATH_DBG_CALIBRATE,
280 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
281 iOddMeasOffset);
282 ath_print(common, ATH_DBG_CALIBRATE,
283 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
284 iEvenMeasOffset);
285 ath_print(common, ATH_DBG_CALIBRATE,
286 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
287 qOddMeasOffset);
288 ath_print(common, ATH_DBG_CALIBRATE,
289 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
290 qEvenMeasOffset);
291
292 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
293 iGainMismatch =
294 ((iEvenMeasOffset * 32) /
295 iOddMeasOffset) & 0x3f;
296 qGainMismatch =
297 ((qOddMeasOffset * 32) /
298 qEvenMeasOffset) & 0x3f;
299
300 ath_print(common, ATH_DBG_CALIBRATE,
301 "Chn %d gain_mismatch_i = 0x%08x\n", i,
302 iGainMismatch);
303 ath_print(common, ATH_DBG_CALIBRATE,
304 "Chn %d gain_mismatch_q = 0x%08x\n", i,
305 qGainMismatch);
306
307 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
308 val &= 0xfffff000;
309 val |= (qGainMismatch) | (iGainMismatch << 6);
310 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
311
312 ath_print(common, ATH_DBG_CALIBRATE,
313 "ADC Gain Cal done for Chain %d\n", i);
314 }
315 }
316
317 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
318 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
319 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
320}
321
322static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
323{
324 struct ath_common *common = ath9k_hw_common(ah);
325 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
326 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
327 const struct ath9k_percal_data *calData =
328 ah->cal_list_curr->calData;
329 u32 numSamples =
330 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
331
332 for (i = 0; i < numChains; i++) {
333 iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
334 iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
335 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
336 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
337
338 ath_print(common, ATH_DBG_CALIBRATE,
339 "Starting ADC DC Offset Cal for Chain %d\n", i);
340
341 ath_print(common, ATH_DBG_CALIBRATE,
342 "Chn %d pwr_meas_odd_i = %d\n", i,
343 iOddMeasOffset);
344 ath_print(common, ATH_DBG_CALIBRATE,
345 "Chn %d pwr_meas_even_i = %d\n", i,
346 iEvenMeasOffset);
347 ath_print(common, ATH_DBG_CALIBRATE,
348 "Chn %d pwr_meas_odd_q = %d\n", i,
349 qOddMeasOffset);
350 ath_print(common, ATH_DBG_CALIBRATE,
351 "Chn %d pwr_meas_even_q = %d\n", i,
352 qEvenMeasOffset);
353
354 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
355 numSamples) & 0x1ff;
356 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
357 numSamples) & 0x1ff;
358
359 ath_print(common, ATH_DBG_CALIBRATE,
360 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
361 iDcMismatch);
362 ath_print(common, ATH_DBG_CALIBRATE,
363 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
364 qDcMismatch);
365
366 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
367 val &= 0xc0000fff;
368 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
369 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
370
371 ath_print(common, ATH_DBG_CALIBRATE,
372 "ADC DC Offset Cal done for Chain %d\n", i);
373 }
374
375 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
376 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
377 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
378}
379
380static void ar9287_hw_olc_temp_compensation(struct ath_hw *ah)
381{
382 u32 rddata;
383 int32_t delta, currPDADC, slope;
384
385 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
386 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
387
388 if (ah->initPDADC == 0 || currPDADC == 0) {
389 /*
390 * Zero value indicates that no frames have been transmitted
391 * yet, can't do temperature compensation until frames are
392 * transmitted.
393 */
394 return;
395 } else {
396 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
397
398 if (slope == 0) { /* to avoid divide by zero case */
399 delta = 0;
400 } else {
401 delta = ((currPDADC - ah->initPDADC)*4) / slope;
402 }
403 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
404 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
405 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
406 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
407 }
408}
409
410static void ar9280_hw_olc_temp_compensation(struct ath_hw *ah)
411{
412 u32 rddata, i;
413 int delta, currPDADC, regval;
414
415 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
416 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
417
418 if (ah->initPDADC == 0 || currPDADC == 0)
419 return;
420
421 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
422 delta = (currPDADC - ah->initPDADC + 4) / 8;
423 else
424 delta = (currPDADC - ah->initPDADC + 5) / 10;
425
426 if (delta != ah->PDADCdelta) {
427 ah->PDADCdelta = delta;
428 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
429 regval = ah->originalGain[i] - delta;
430 if (regval < 0)
431 regval = 0;
432
433 REG_RMW_FIELD(ah,
434 AR_PHY_TX_GAIN_TBL1 + i * 4,
435 AR_PHY_TX_GAIN, regval);
436 }
437 }
438}
439
440static void ar9271_hw_pa_cal(struct ath_hw *ah, bool is_reset)
441{
442 u32 regVal;
443 unsigned int i;
444 u32 regList[][2] = {
445 { 0x786c, 0 },
446 { 0x7854, 0 },
447 { 0x7820, 0 },
448 { 0x7824, 0 },
449 { 0x7868, 0 },
450 { 0x783c, 0 },
451 { 0x7838, 0 } ,
452 { 0x7828, 0 } ,
453 };
454
455 for (i = 0; i < ARRAY_SIZE(regList); i++)
456 regList[i][1] = REG_READ(ah, regList[i][0]);
457
458 regVal = REG_READ(ah, 0x7834);
459 regVal &= (~(0x1));
460 REG_WRITE(ah, 0x7834, regVal);
461 regVal = REG_READ(ah, 0x9808);
462 regVal |= (0x1 << 27);
463 REG_WRITE(ah, 0x9808, regVal);
464
465 /* 786c,b23,1, pwddac=1 */
466 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
467 /* 7854, b5,1, pdrxtxbb=1 */
468 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
469 /* 7854, b7,1, pdv2i=1 */
470 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
471 /* 7854, b8,1, pddacinterface=1 */
472 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
473 /* 7824,b12,0, offcal=0 */
474 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
475 /* 7838, b1,0, pwddb=0 */
476 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
477 /* 7820,b11,0, enpacal=0 */
478 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
479 /* 7820,b25,1, pdpadrv1=0 */
480 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
481 /* 7820,b24,0, pdpadrv2=0 */
482 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
483 /* 7820,b23,0, pdpaout=0 */
484 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
485 /* 783c,b14-16,7, padrvgn2tab_0=7 */
486 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
487 /*
488 * 7838,b29-31,0, padrvgn1tab_0=0
489 * does not matter since we turn it off
490 */
491 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
492
493 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
494
495 /* Set:
496 * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
497 * txon=1,paon=1,oscon=1,synthon_force=1
498 */
499 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
500 udelay(30);
501 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
502
503 /* find off_6_1; */
504 for (i = 6; i > 0; i--) {
505 regVal = REG_READ(ah, 0x7834);
506 regVal |= (1 << (20 + i));
507 REG_WRITE(ah, 0x7834, regVal);
508 udelay(1);
509 /* regVal = REG_READ(ah, 0x7834); */
510 regVal &= (~(0x1 << (20 + i)));
511 regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
512 << (20 + i));
513 REG_WRITE(ah, 0x7834, regVal);
514 }
515
516 regVal = (regVal >> 20) & 0x7f;
517
518 /* Update PA cal info */
519 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
520 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
521 ah->pacal_info.max_skipcount =
522 2 * ah->pacal_info.max_skipcount;
523 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
524 } else {
525 ah->pacal_info.max_skipcount = 1;
526 ah->pacal_info.skipcount = 0;
527 ah->pacal_info.prev_offset = regVal;
528 }
529
530 ENABLE_REGWRITE_BUFFER(ah);
531
532 regVal = REG_READ(ah, 0x7834);
533 regVal |= 0x1;
534 REG_WRITE(ah, 0x7834, regVal);
535 regVal = REG_READ(ah, 0x9808);
536 regVal &= (~(0x1 << 27));
537 REG_WRITE(ah, 0x9808, regVal);
538
539 for (i = 0; i < ARRAY_SIZE(regList); i++)
540 REG_WRITE(ah, regList[i][0], regList[i][1]);
541
542 REGWRITE_BUFFER_FLUSH(ah);
543 DISABLE_REGWRITE_BUFFER(ah);
544}
545
546static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
547{
548 struct ath_common *common = ath9k_hw_common(ah);
549 u32 regVal;
550 int i, offset, offs_6_1, offs_0;
551 u32 ccomp_org, reg_field;
552 u32 regList[][2] = {
553 { 0x786c, 0 },
554 { 0x7854, 0 },
555 { 0x7820, 0 },
556 { 0x7824, 0 },
557 { 0x7868, 0 },
558 { 0x783c, 0 },
559 { 0x7838, 0 },
560 };
561
562 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
563
564 /* PA CAL is not needed for high power solution */
565 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
566 AR5416_EEP_TXGAIN_HIGH_POWER)
567 return;
568
569 if (AR_SREV_9285_11(ah)) {
570 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
571 udelay(10);
572 }
573
574 for (i = 0; i < ARRAY_SIZE(regList); i++)
575 regList[i][1] = REG_READ(ah, regList[i][0]);
576
577 regVal = REG_READ(ah, 0x7834);
578 regVal &= (~(0x1));
579 REG_WRITE(ah, 0x7834, regVal);
580 regVal = REG_READ(ah, 0x9808);
581 regVal |= (0x1 << 27);
582 REG_WRITE(ah, 0x9808, regVal);
583
584 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
585 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
586 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
587 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
588 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
589 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
590 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
591 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
592 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
593 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
594 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
595 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
596 ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
597 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
598
599 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
600 udelay(30);
601 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
602 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
603
604 for (i = 6; i > 0; i--) {
605 regVal = REG_READ(ah, 0x7834);
606 regVal |= (1 << (19 + i));
607 REG_WRITE(ah, 0x7834, regVal);
608 udelay(1);
609 regVal = REG_READ(ah, 0x7834);
610 regVal &= (~(0x1 << (19 + i)));
611 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
612 regVal |= (reg_field << (19 + i));
613 REG_WRITE(ah, 0x7834, regVal);
614 }
615
616 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
617 udelay(1);
618 reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
619 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
620 offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
621 offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
622
623 offset = (offs_6_1<<1) | offs_0;
624 offset = offset - 0;
625 offs_6_1 = offset>>1;
626 offs_0 = offset & 1;
627
628 if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
629 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
630 ah->pacal_info.max_skipcount =
631 2 * ah->pacal_info.max_skipcount;
632 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
633 } else {
634 ah->pacal_info.max_skipcount = 1;
635 ah->pacal_info.skipcount = 0;
636 ah->pacal_info.prev_offset = offset;
637 }
638
639 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
640 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
641
642 regVal = REG_READ(ah, 0x7834);
643 regVal |= 0x1;
644 REG_WRITE(ah, 0x7834, regVal);
645 regVal = REG_READ(ah, 0x9808);
646 regVal &= (~(0x1 << 27));
647 REG_WRITE(ah, 0x9808, regVal);
648
649 for (i = 0; i < ARRAY_SIZE(regList); i++)
650 REG_WRITE(ah, regList[i][0], regList[i][1]);
651
652 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
653
654 if (AR_SREV_9285_11(ah))
655 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
656
657}
658
659static void ar9002_hw_pa_cal(struct ath_hw *ah, bool is_reset)
660{
661 if (AR_SREV_9271(ah)) {
662 if (is_reset || !ah->pacal_info.skipcount)
663 ar9271_hw_pa_cal(ah, is_reset);
664 else
665 ah->pacal_info.skipcount--;
666 } else if (AR_SREV_9285_11_OR_LATER(ah)) {
667 if (is_reset || !ah->pacal_info.skipcount)
668 ar9285_hw_pa_cal(ah, is_reset);
669 else
670 ah->pacal_info.skipcount--;
671 }
672}
673
674static void ar9002_hw_olc_temp_compensation(struct ath_hw *ah)
675{
676 if (OLC_FOR_AR9287_10_LATER)
677 ar9287_hw_olc_temp_compensation(ah);
678 else if (OLC_FOR_AR9280_20_LATER)
679 ar9280_hw_olc_temp_compensation(ah);
680}
681
682static bool ar9002_hw_calibrate(struct ath_hw *ah,
683 struct ath9k_channel *chan,
684 u8 rxchainmask,
685 bool longcal)
686{
687 bool iscaldone = true;
688 struct ath9k_cal_list *currCal = ah->cal_list_curr;
689
690 if (currCal &&
691 (currCal->calState == CAL_RUNNING ||
692 currCal->calState == CAL_WAITING)) {
693 iscaldone = ar9002_hw_per_calibration(ah, chan,
694 rxchainmask, currCal);
695 if (iscaldone) {
696 ah->cal_list_curr = currCal = currCal->calNext;
697
698 if (currCal->calState == CAL_WAITING) {
699 iscaldone = false;
700 ath9k_hw_reset_calibration(ah, currCal);
701 }
702 }
703 }
704
705 /* Do NF cal only at longer intervals */
706 if (longcal) {
707 /* Do periodic PAOffset Cal */
708 ar9002_hw_pa_cal(ah, false);
709 ar9002_hw_olc_temp_compensation(ah);
710
711 /*
712 * Get the value from the previous NF cal and update
713 * history buffer.
714 */
715 ath9k_hw_getnf(ah, chan);
716
717 /*
718 * Load the NF from history buffer of the current channel.
719 * NF is slow time-variant, so it is OK to use a historical
720 * value.
721 */
722 ath9k_hw_loadnf(ah, ah->curchan);
723
724 ath9k_hw_start_nfcal(ah);
725 }
726
727 return iscaldone;
728}
729
730/* Carrier leakage Calibration fix */
731static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
732{
733 struct ath_common *common = ath9k_hw_common(ah);
734
735 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
736 if (IS_CHAN_HT20(chan)) {
737 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
738 REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
739 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
740 AR_PHY_AGC_CONTROL_FLTR_CAL);
741 REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
742 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
743 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
744 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
745 ath_print(common, ATH_DBG_CALIBRATE, "offset "
746 "calibration failed to complete in "
747 "1ms; noisy ??\n");
748 return false;
749 }
750 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
751 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
752 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
753 }
754 REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
755 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
756 REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
757 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
758 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
759 0, AH_WAIT_TIMEOUT)) {
760 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
761 "failed to complete in 1ms; noisy ??\n");
762 return false;
763 }
764
765 REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
766 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
767 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
768
769 return true;
770}
771
772static bool ar9285_hw_clc(struct ath_hw *ah, struct ath9k_channel *chan)
773{
774 int i;
775 u_int32_t txgain_max;
776 u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
777 u_int32_t reg_clc_I0, reg_clc_Q0;
778 u_int32_t i0_num = 0;
779 u_int32_t q0_num = 0;
780 u_int32_t total_num = 0;
781 u_int32_t reg_rf2g5_org;
782 bool retv = true;
783
784 if (!(ar9285_hw_cl_cal(ah, chan)))
785 return false;
786
787 txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
788 AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
789
790 for (i = 0; i < (txgain_max+1); i++) {
791 clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
792 AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
793 if (!(gain_mask & (1 << clc_gain))) {
794 gain_mask |= (1 << clc_gain);
795 clc_num++;
796 }
797 }
798
799 for (i = 0; i < clc_num; i++) {
800 reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
801 & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
802 reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
803 & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
804 if (reg_clc_I0 == 0)
805 i0_num++;
806
807 if (reg_clc_Q0 == 0)
808 q0_num++;
809 }
810 total_num = i0_num + q0_num;
811 if (total_num > AR9285_CLCAL_REDO_THRESH) {
812 reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
813 if (AR_SREV_9285E_20(ah)) {
814 REG_WRITE(ah, AR9285_RF2G5,
815 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
816 AR9285_RF2G5_IC50TX_XE_SET);
817 } else {
818 REG_WRITE(ah, AR9285_RF2G5,
819 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
820 AR9285_RF2G5_IC50TX_SET);
821 }
822 retv = ar9285_hw_cl_cal(ah, chan);
823 REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
824 }
825 return retv;
826}
827
828static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
829{
830 struct ath_common *common = ath9k_hw_common(ah);
831
832 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
833 if (!ar9285_hw_clc(ah, chan))
834 return false;
835 } else {
836 if (AR_SREV_9280_10_OR_LATER(ah)) {
837 if (!AR_SREV_9287_10_OR_LATER(ah))
838 REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
839 AR_PHY_ADC_CTL_OFF_PWDADC);
840 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
841 AR_PHY_AGC_CONTROL_FLTR_CAL);
842 }
843
844 /* Calibrate the AGC */
845 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
846 REG_READ(ah, AR_PHY_AGC_CONTROL) |
847 AR_PHY_AGC_CONTROL_CAL);
848
849 /* Poll for offset calibration complete */
850 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
851 AR_PHY_AGC_CONTROL_CAL,
852 0, AH_WAIT_TIMEOUT)) {
853 ath_print(common, ATH_DBG_CALIBRATE,
854 "offset calibration failed to "
855 "complete in 1ms; noisy environment?\n");
856 return false;
857 }
858
859 if (AR_SREV_9280_10_OR_LATER(ah)) {
860 if (!AR_SREV_9287_10_OR_LATER(ah))
861 REG_SET_BIT(ah, AR_PHY_ADC_CTL,
862 AR_PHY_ADC_CTL_OFF_PWDADC);
863 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
864 AR_PHY_AGC_CONTROL_FLTR_CAL);
865 }
866 }
867
868 /* Do PA Calibration */
869 ar9002_hw_pa_cal(ah, true);
870
871 /* Do NF Calibration after DC offset and other calibrations */
872 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
873 REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
874
875 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
876
877 /* Enable IQ, ADC Gain and ADC DC offset CALs */
878 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
879 if (ar9002_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
880 INIT_CAL(&ah->adcgain_caldata);
881 INSERT_CAL(ah, &ah->adcgain_caldata);
882 ath_print(common, ATH_DBG_CALIBRATE,
883 "enabling ADC Gain Calibration.\n");
884 }
885 if (ar9002_hw_iscal_supported(ah, ADC_DC_CAL)) {
886 INIT_CAL(&ah->adcdc_caldata);
887 INSERT_CAL(ah, &ah->adcdc_caldata);
888 ath_print(common, ATH_DBG_CALIBRATE,
889 "enabling ADC DC Calibration.\n");
890 }
891 if (ar9002_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
892 INIT_CAL(&ah->iq_caldata);
893 INSERT_CAL(ah, &ah->iq_caldata);
894 ath_print(common, ATH_DBG_CALIBRATE,
895 "enabling IQ Calibration.\n");
896 }
897
898 ah->cal_list_curr = ah->cal_list;
899
900 if (ah->cal_list_curr)
901 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
902 }
903
904 chan->CalValid = 0;
905
906 return true;
907}
908
909static const struct ath9k_percal_data iq_cal_multi_sample = {
910 IQ_MISMATCH_CAL,
911 MAX_CAL_SAMPLES,
912 PER_MIN_LOG_COUNT,
913 ar9002_hw_iqcal_collect,
914 ar9002_hw_iqcalibrate
915};
916static const struct ath9k_percal_data iq_cal_single_sample = {
917 IQ_MISMATCH_CAL,
918 MIN_CAL_SAMPLES,
919 PER_MAX_LOG_COUNT,
920 ar9002_hw_iqcal_collect,
921 ar9002_hw_iqcalibrate
922};
923static const struct ath9k_percal_data adc_gain_cal_multi_sample = {
924 ADC_GAIN_CAL,
925 MAX_CAL_SAMPLES,
926 PER_MIN_LOG_COUNT,
927 ar9002_hw_adc_gaincal_collect,
928 ar9002_hw_adc_gaincal_calibrate
929};
930static const struct ath9k_percal_data adc_gain_cal_single_sample = {
931 ADC_GAIN_CAL,
932 MIN_CAL_SAMPLES,
933 PER_MAX_LOG_COUNT,
934 ar9002_hw_adc_gaincal_collect,
935 ar9002_hw_adc_gaincal_calibrate
936};
937static const struct ath9k_percal_data adc_dc_cal_multi_sample = {
938 ADC_DC_CAL,
939 MAX_CAL_SAMPLES,
940 PER_MIN_LOG_COUNT,
941 ar9002_hw_adc_dccal_collect,
942 ar9002_hw_adc_dccal_calibrate
943};
944static const struct ath9k_percal_data adc_dc_cal_single_sample = {
945 ADC_DC_CAL,
946 MIN_CAL_SAMPLES,
947 PER_MAX_LOG_COUNT,
948 ar9002_hw_adc_dccal_collect,
949 ar9002_hw_adc_dccal_calibrate
950};
951static const struct ath9k_percal_data adc_init_dc_cal = {
952 ADC_DC_INIT_CAL,
953 MIN_CAL_SAMPLES,
954 INIT_LOG_COUNT,
955 ar9002_hw_adc_dccal_collect,
956 ar9002_hw_adc_dccal_calibrate
957};
958
959static void ar9002_hw_init_cal_settings(struct ath_hw *ah)
960{
961 if (AR_SREV_9100(ah)) {
962 ah->iq_caldata.calData = &iq_cal_multi_sample;
963 ah->supp_cals = IQ_MISMATCH_CAL;
964 return;
965 }
966
967 if (AR_SREV_9160_10_OR_LATER(ah)) {
968 if (AR_SREV_9280_10_OR_LATER(ah)) {
969 ah->iq_caldata.calData = &iq_cal_single_sample;
970 ah->adcgain_caldata.calData =
971 &adc_gain_cal_single_sample;
972 ah->adcdc_caldata.calData =
973 &adc_dc_cal_single_sample;
974 ah->adcdc_calinitdata.calData =
975 &adc_init_dc_cal;
976 } else {
977 ah->iq_caldata.calData = &iq_cal_multi_sample;
978 ah->adcgain_caldata.calData =
979 &adc_gain_cal_multi_sample;
980 ah->adcdc_caldata.calData =
981 &adc_dc_cal_multi_sample;
982 ah->adcdc_calinitdata.calData =
983 &adc_init_dc_cal;
984 }
985 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
986 }
987}
988
989void ar9002_hw_attach_calib_ops(struct ath_hw *ah)
990{
991 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
992 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
993
994 priv_ops->init_cal_settings = ar9002_hw_init_cal_settings;
995 priv_ops->init_cal = ar9002_hw_init_cal;
996 priv_ops->setup_calibration = ar9002_hw_setup_calibration;
997 priv_ops->iscal_supported = ar9002_hw_iscal_supported;
998
999 ops->calibrate = ar9002_hw_calibrate;
1000}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
new file mode 100644
index 000000000000..a8a8cdc04afa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -0,0 +1,598 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar5008_initvals.h"
19#include "ar9001_initvals.h"
20#include "ar9002_initvals.h"
21
22/* General hardware code for the A5008/AR9001/AR9002 hadware families */
23
24static bool ar9002_hw_macversion_supported(u32 macversion)
25{
26 switch (macversion) {
27 case AR_SREV_VERSION_5416_PCI:
28 case AR_SREV_VERSION_5416_PCIE:
29 case AR_SREV_VERSION_9160:
30 case AR_SREV_VERSION_9100:
31 case AR_SREV_VERSION_9280:
32 case AR_SREV_VERSION_9285:
33 case AR_SREV_VERSION_9287:
34 case AR_SREV_VERSION_9271:
35 return true;
36 default:
37 break;
38 }
39 return false;
40}
41
42static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
43{
44 if (AR_SREV_9271(ah)) {
45 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
46 ARRAY_SIZE(ar9271Modes_9271), 6);
47 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
48 ARRAY_SIZE(ar9271Common_9271), 2);
49 INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
50 ar9271Common_normal_cck_fir_coeff_9271,
51 ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
52 INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
53 ar9271Common_japan_2484_cck_fir_coeff_9271,
54 ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
55 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
56 ar9271Modes_9271_1_0_only,
57 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
58 INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
59 ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
60 INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
61 ar9271Modes_high_power_tx_gain_9271,
62 ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
63 INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
64 ar9271Modes_normal_power_tx_gain_9271,
65 ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
66 return;
67 }
68
69 if (AR_SREV_9287_11_OR_LATER(ah)) {
70 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
71 ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
72 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
73 ARRAY_SIZE(ar9287Common_9287_1_1), 2);
74 if (ah->config.pcie_clock_req)
75 INIT_INI_ARRAY(&ah->iniPcieSerdes,
76 ar9287PciePhy_clkreq_off_L1_9287_1_1,
77 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
78 else
79 INIT_INI_ARRAY(&ah->iniPcieSerdes,
80 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
81 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
82 2);
83 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
84 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
85 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
86 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
87 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
88
89 if (ah->config.pcie_clock_req)
90 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9287PciePhy_clkreq_off_L1_9287_1_0,
92 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
93 else
94 INIT_INI_ARRAY(&ah->iniPcieSerdes,
95 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
96 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
97 2);
98 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
99
100
101 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
102 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
103 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
104 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
105
106 if (ah->config.pcie_clock_req) {
107 INIT_INI_ARRAY(&ah->iniPcieSerdes,
108 ar9285PciePhy_clkreq_off_L1_9285_1_2,
109 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
110 } else {
111 INIT_INI_ARRAY(&ah->iniPcieSerdes,
112 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
113 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
114 2);
115 }
116 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
117 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
118 ARRAY_SIZE(ar9285Modes_9285), 6);
119 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
120 ARRAY_SIZE(ar9285Common_9285), 2);
121
122 if (ah->config.pcie_clock_req) {
123 INIT_INI_ARRAY(&ah->iniPcieSerdes,
124 ar9285PciePhy_clkreq_off_L1_9285,
125 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
126 } else {
127 INIT_INI_ARRAY(&ah->iniPcieSerdes,
128 ar9285PciePhy_clkreq_always_on_L1_9285,
129 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
130 }
131 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
132 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
133 ARRAY_SIZE(ar9280Modes_9280_2), 6);
134 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
135 ARRAY_SIZE(ar9280Common_9280_2), 2);
136
137 if (ah->config.pcie_clock_req) {
138 INIT_INI_ARRAY(&ah->iniPcieSerdes,
139 ar9280PciePhy_clkreq_off_L1_9280,
140 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280), 2);
141 } else {
142 INIT_INI_ARRAY(&ah->iniPcieSerdes,
143 ar9280PciePhy_clkreq_always_on_L1_9280,
144 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
145 }
146 INIT_INI_ARRAY(&ah->iniModesAdditional,
147 ar9280Modes_fast_clock_9280_2,
148 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
149 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
150 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
151 ARRAY_SIZE(ar9280Modes_9280), 6);
152 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
153 ARRAY_SIZE(ar9280Common_9280), 2);
154 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
155 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
156 ARRAY_SIZE(ar5416Modes_9160), 6);
157 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
158 ARRAY_SIZE(ar5416Common_9160), 2);
159 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
160 ARRAY_SIZE(ar5416Bank0_9160), 2);
161 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
162 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
163 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
164 ARRAY_SIZE(ar5416Bank1_9160), 2);
165 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
166 ARRAY_SIZE(ar5416Bank2_9160), 2);
167 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
168 ARRAY_SIZE(ar5416Bank3_9160), 3);
169 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
170 ARRAY_SIZE(ar5416Bank6_9160), 3);
171 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
172 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
173 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
174 ARRAY_SIZE(ar5416Bank7_9160), 2);
175 if (AR_SREV_9160_11(ah)) {
176 INIT_INI_ARRAY(&ah->iniAddac,
177 ar5416Addac_91601_1,
178 ARRAY_SIZE(ar5416Addac_91601_1), 2);
179 } else {
180 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
181 ARRAY_SIZE(ar5416Addac_9160), 2);
182 }
183 } else if (AR_SREV_9100_OR_LATER(ah)) {
184 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
185 ARRAY_SIZE(ar5416Modes_9100), 6);
186 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
187 ARRAY_SIZE(ar5416Common_9100), 2);
188 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
189 ARRAY_SIZE(ar5416Bank0_9100), 2);
190 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
191 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
192 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
193 ARRAY_SIZE(ar5416Bank1_9100), 2);
194 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
195 ARRAY_SIZE(ar5416Bank2_9100), 2);
196 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
197 ARRAY_SIZE(ar5416Bank3_9100), 3);
198 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
199 ARRAY_SIZE(ar5416Bank6_9100), 3);
200 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
201 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
202 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
203 ARRAY_SIZE(ar5416Bank7_9100), 2);
204 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
205 ARRAY_SIZE(ar5416Addac_9100), 2);
206 } else {
207 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
208 ARRAY_SIZE(ar5416Modes), 6);
209 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
210 ARRAY_SIZE(ar5416Common), 2);
211 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
212 ARRAY_SIZE(ar5416Bank0), 2);
213 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
214 ARRAY_SIZE(ar5416BB_RfGain), 3);
215 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
216 ARRAY_SIZE(ar5416Bank1), 2);
217 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
218 ARRAY_SIZE(ar5416Bank2), 2);
219 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
220 ARRAY_SIZE(ar5416Bank3), 3);
221 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
222 ARRAY_SIZE(ar5416Bank6), 3);
223 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
224 ARRAY_SIZE(ar5416Bank6TPC), 3);
225 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
226 ARRAY_SIZE(ar5416Bank7), 2);
227 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
228 ARRAY_SIZE(ar5416Addac), 2);
229 }
230}
231
232/* Support for Japan ch.14 (2484) spread */
233void ar9002_hw_cck_chan14_spread(struct ath_hw *ah)
234{
235 if (AR_SREV_9287_11_OR_LATER(ah)) {
236 INIT_INI_ARRAY(&ah->iniCckfirNormal,
237 ar9287Common_normal_cck_fir_coeff_92871_1,
238 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1),
239 2);
240 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
241 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
242 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1),
243 2);
244 }
245}
246
247static void ar9280_20_hw_init_rxgain_ini(struct ath_hw *ah)
248{
249 u32 rxgain_type;
250
251 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
252 AR5416_EEP_MINOR_VER_17) {
253 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
254
255 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
256 INIT_INI_ARRAY(&ah->iniModesRxGain,
257 ar9280Modes_backoff_13db_rxgain_9280_2,
258 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
259 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
260 INIT_INI_ARRAY(&ah->iniModesRxGain,
261 ar9280Modes_backoff_23db_rxgain_9280_2,
262 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
263 else
264 INIT_INI_ARRAY(&ah->iniModesRxGain,
265 ar9280Modes_original_rxgain_9280_2,
266 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
267 } else {
268 INIT_INI_ARRAY(&ah->iniModesRxGain,
269 ar9280Modes_original_rxgain_9280_2,
270 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
271 }
272}
273
274static void ar9280_20_hw_init_txgain_ini(struct ath_hw *ah)
275{
276 u32 txgain_type;
277
278 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >=
279 AR5416_EEP_MINOR_VER_19) {
280 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
281
282 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
283 INIT_INI_ARRAY(&ah->iniModesTxGain,
284 ar9280Modes_high_power_tx_gain_9280_2,
285 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
286 else
287 INIT_INI_ARRAY(&ah->iniModesTxGain,
288 ar9280Modes_original_tx_gain_9280_2,
289 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
290 } else {
291 INIT_INI_ARRAY(&ah->iniModesTxGain,
292 ar9280Modes_original_tx_gain_9280_2,
293 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
294 }
295}
296
297static void ar9002_hw_init_mode_gain_regs(struct ath_hw *ah)
298{
299 if (AR_SREV_9287_11_OR_LATER(ah))
300 INIT_INI_ARRAY(&ah->iniModesRxGain,
301 ar9287Modes_rx_gain_9287_1_1,
302 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
303 else if (AR_SREV_9287_10(ah))
304 INIT_INI_ARRAY(&ah->iniModesRxGain,
305 ar9287Modes_rx_gain_9287_1_0,
306 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
307 else if (AR_SREV_9280_20(ah))
308 ar9280_20_hw_init_rxgain_ini(ah);
309
310 if (AR_SREV_9287_11_OR_LATER(ah)) {
311 INIT_INI_ARRAY(&ah->iniModesTxGain,
312 ar9287Modes_tx_gain_9287_1_1,
313 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
314 } else if (AR_SREV_9287_10(ah)) {
315 INIT_INI_ARRAY(&ah->iniModesTxGain,
316 ar9287Modes_tx_gain_9287_1_0,
317 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
318 } else if (AR_SREV_9280_20(ah)) {
319 ar9280_20_hw_init_txgain_ini(ah);
320 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
321 u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
322
323 /* txgain table */
324 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
325 if (AR_SREV_9285E_20(ah)) {
326 INIT_INI_ARRAY(&ah->iniModesTxGain,
327 ar9285Modes_XE2_0_high_power,
328 ARRAY_SIZE(
329 ar9285Modes_XE2_0_high_power), 6);
330 } else {
331 INIT_INI_ARRAY(&ah->iniModesTxGain,
332 ar9285Modes_high_power_tx_gain_9285_1_2,
333 ARRAY_SIZE(
334 ar9285Modes_high_power_tx_gain_9285_1_2), 6);
335 }
336 } else {
337 if (AR_SREV_9285E_20(ah)) {
338 INIT_INI_ARRAY(&ah->iniModesTxGain,
339 ar9285Modes_XE2_0_normal_power,
340 ARRAY_SIZE(
341 ar9285Modes_XE2_0_normal_power), 6);
342 } else {
343 INIT_INI_ARRAY(&ah->iniModesTxGain,
344 ar9285Modes_original_tx_gain_9285_1_2,
345 ARRAY_SIZE(
346 ar9285Modes_original_tx_gain_9285_1_2), 6);
347 }
348 }
349 }
350}
351
352/*
353 * Helper for ASPM support.
354 *
355 * Disable PLL when in L0s as well as receiver clock when in L1.
356 * This power saving option must be enabled through the SerDes.
357 *
358 * Programming the SerDes must go through the same 288 bit serial shift
359 * register as the other analog registers. Hence the 9 writes.
360 */
361static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
362 int restore,
363 int power_off)
364{
365 u8 i;
366 u32 val;
367
368 if (ah->is_pciexpress != true)
369 return;
370
371 /* Do not touch SerDes registers */
372 if (ah->config.pcie_powersave_enable == 2)
373 return;
374
375 /* Nothing to do on restore for 11N */
376 if (!restore) {
377 if (AR_SREV_9280_20_OR_LATER(ah)) {
378 /*
379 * AR9280 2.0 or later chips use SerDes values from the
380 * initvals.h initialized depending on chipset during
381 * __ath9k_hw_init()
382 */
383 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
384 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
385 INI_RA(&ah->iniPcieSerdes, i, 1));
386 }
387 } else if (AR_SREV_9280(ah) &&
388 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
389 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
390 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
391
392 /* RX shut off when elecidle is asserted */
393 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
394 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
395 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
396
397 /* Shut off CLKREQ active in L1 */
398 if (ah->config.pcie_clock_req)
399 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
400 else
401 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
402
403 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
404 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
405 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
406
407 /* Load the new settings */
408 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
409
410 } else {
411 ENABLE_REGWRITE_BUFFER(ah);
412
413 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
414 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
415
416 /* RX shut off when elecidle is asserted */
417 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
418 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
419 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
420
421 /*
422 * Ignore ah->ah_config.pcie_clock_req setting for
423 * pre-AR9280 11n
424 */
425 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
426
427 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
428 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
429 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
430
431 /* Load the new settings */
432 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
433
434 REGWRITE_BUFFER_FLUSH(ah);
435 DISABLE_REGWRITE_BUFFER(ah);
436 }
437
438 udelay(1000);
439
440 /* set bit 19 to allow forcing of pcie core into L1 state */
441 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
442
443 /* Several PCIe massages to ensure proper behaviour */
444 if (ah->config.pcie_waen) {
445 val = ah->config.pcie_waen;
446 if (!power_off)
447 val &= (~AR_WA_D3_L1_DISABLE);
448 } else {
449 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
450 AR_SREV_9287(ah)) {
451 val = AR9285_WA_DEFAULT;
452 if (!power_off)
453 val &= (~AR_WA_D3_L1_DISABLE);
454 } else if (AR_SREV_9280(ah)) {
455 /*
456 * On AR9280 chips bit 22 of 0x4004 needs to be
457 * set otherwise card may disappear.
458 */
459 val = AR9280_WA_DEFAULT;
460 if (!power_off)
461 val &= (~AR_WA_D3_L1_DISABLE);
462 } else
463 val = AR_WA_DEFAULT;
464 }
465
466 REG_WRITE(ah, AR_WA, val);
467 }
468
469 if (power_off) {
470 /*
471 * Set PCIe workaround bits
472 * bit 14 in WA register (disable L1) should only
473 * be set when device enters D3 and be cleared
474 * when device comes back to D0.
475 */
476 if (ah->config.pcie_waen) {
477 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
478 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
479 } else {
480 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
481 AR_SREV_9287(ah)) &&
482 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
483 (AR_SREV_9280(ah) &&
484 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
485 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
486 }
487 }
488 }
489}
490
491static int ar9002_hw_get_radiorev(struct ath_hw *ah)
492{
493 u32 val;
494 int i;
495
496 ENABLE_REGWRITE_BUFFER(ah);
497
498 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
499 for (i = 0; i < 8; i++)
500 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
501
502 REGWRITE_BUFFER_FLUSH(ah);
503 DISABLE_REGWRITE_BUFFER(ah);
504
505 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
506 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
507
508 return ath9k_hw_reverse_bits(val, 8);
509}
510
511int ar9002_hw_rf_claim(struct ath_hw *ah)
512{
513 u32 val;
514
515 REG_WRITE(ah, AR_PHY(0), 0x00000007);
516
517 val = ar9002_hw_get_radiorev(ah);
518 switch (val & AR_RADIO_SREV_MAJOR) {
519 case 0:
520 val = AR_RAD5133_SREV_MAJOR;
521 break;
522 case AR_RAD5133_SREV_MAJOR:
523 case AR_RAD5122_SREV_MAJOR:
524 case AR_RAD2133_SREV_MAJOR:
525 case AR_RAD2122_SREV_MAJOR:
526 break;
527 default:
528 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
529 "Radio Chip Rev 0x%02X not supported\n",
530 val & AR_RADIO_SREV_MAJOR);
531 return -EOPNOTSUPP;
532 }
533
534 ah->hw_version.analog5GhzRev = val;
535
536 return 0;
537}
538
539/*
540 * Enable ASYNC FIFO
541 *
542 * If Async FIFO is enabled, the following counters change as MAC now runs
543 * at 117 Mhz instead of 88/44MHz when async FIFO is disabled.
544 *
545 * The values below tested for ht40 2 chain.
546 * Overwrite the delay/timeouts initialized in process ini.
547 */
548void ar9002_hw_enable_async_fifo(struct ath_hw *ah)
549{
550 if (AR_SREV_9287_12_OR_LATER(ah)) {
551 REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
552 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR);
553 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
554 AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
555 REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
556 AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
557
558 REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
559 REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
560
561 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
562 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
563 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
564 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
565 }
566}
567
568/*
569 * We don't enable WEP aggregation on mac80211 but we keep this
570 * around for HAL unification purposes.
571 */
572void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah)
573{
574 if (AR_SREV_9287_12_OR_LATER(ah)) {
575 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
576 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
577 }
578}
579
580/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
581void ar9002_hw_attach_ops(struct ath_hw *ah)
582{
583 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
584 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
585
586 priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
587 priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
588 priv_ops->macversion_supported = ar9002_hw_macversion_supported;
589
590 ops->config_pci_powersave = ar9002_hw_configpcipowersave;
591
592 ar5008_hw_attach_phy_ops(ah);
593 if (AR_SREV_9280_10_OR_LATER(ah))
594 ar9002_hw_attach_phy_ops(ah);
595
596 ar9002_hw_attach_calib_ops(ah);
597 ar9002_hw_attach_mac_ops(ah);
598}
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
index 8a3bf3ab998d..dae7f3304eb8 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_initvals.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -14,1982 +14,9 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17static const u32 ar5416Modes[][6] = { 17#ifndef INITVALS_9002_10_H
18 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 18#define INITVALS_9002_10_H
19 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
20 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
25 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
26 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
27 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
28 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
29 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
30 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
31 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
32 { 0x00009844, 0x1372161e, 0x1372161e, 0x137216a0, 0x137216a0, 0x137216a0 },
33 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
35 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
36 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
37 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
38 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
39 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
40 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
41 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
42 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
43 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
44 { 0x00009918, 0x000001b8, 0x00000370, 0x00000268, 0x00000134, 0x00000134 },
45 { 0x00009924, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b, 0xd0058a0b },
46 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
47 { 0x00009960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
49 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
50 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
51 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
52 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
53 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
54 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
55 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
56 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
57 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
58 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
59 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
60 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
61 { 0x0000a20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
62 { 0x0000b20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
63 { 0x0000c20c, 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 },
64 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
65 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
66 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
67 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
68 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
69 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
70 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
71 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
72 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
73 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
74 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
75 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
76 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
77 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
78 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
79 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
80 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
81};
82
83static const u32 ar5416Common[][2] = {
84 { 0x0000000c, 0x00000000 },
85 { 0x00000030, 0x00020015 },
86 { 0x00000034, 0x00000005 },
87 { 0x00000040, 0x00000000 },
88 { 0x00000044, 0x00000008 },
89 { 0x00000048, 0x00000008 },
90 { 0x0000004c, 0x00000010 },
91 { 0x00000050, 0x00000000 },
92 { 0x00000054, 0x0000001f },
93 { 0x00000800, 0x00000000 },
94 { 0x00000804, 0x00000000 },
95 { 0x00000808, 0x00000000 },
96 { 0x0000080c, 0x00000000 },
97 { 0x00000810, 0x00000000 },
98 { 0x00000814, 0x00000000 },
99 { 0x00000818, 0x00000000 },
100 { 0x0000081c, 0x00000000 },
101 { 0x00000820, 0x00000000 },
102 { 0x00000824, 0x00000000 },
103 { 0x00001040, 0x002ffc0f },
104 { 0x00001044, 0x002ffc0f },
105 { 0x00001048, 0x002ffc0f },
106 { 0x0000104c, 0x002ffc0f },
107 { 0x00001050, 0x002ffc0f },
108 { 0x00001054, 0x002ffc0f },
109 { 0x00001058, 0x002ffc0f },
110 { 0x0000105c, 0x002ffc0f },
111 { 0x00001060, 0x002ffc0f },
112 { 0x00001064, 0x002ffc0f },
113 { 0x00001230, 0x00000000 },
114 { 0x00001270, 0x00000000 },
115 { 0x00001038, 0x00000000 },
116 { 0x00001078, 0x00000000 },
117 { 0x000010b8, 0x00000000 },
118 { 0x000010f8, 0x00000000 },
119 { 0x00001138, 0x00000000 },
120 { 0x00001178, 0x00000000 },
121 { 0x000011b8, 0x00000000 },
122 { 0x000011f8, 0x00000000 },
123 { 0x00001238, 0x00000000 },
124 { 0x00001278, 0x00000000 },
125 { 0x000012b8, 0x00000000 },
126 { 0x000012f8, 0x00000000 },
127 { 0x00001338, 0x00000000 },
128 { 0x00001378, 0x00000000 },
129 { 0x000013b8, 0x00000000 },
130 { 0x000013f8, 0x00000000 },
131 { 0x00001438, 0x00000000 },
132 { 0x00001478, 0x00000000 },
133 { 0x000014b8, 0x00000000 },
134 { 0x000014f8, 0x00000000 },
135 { 0x00001538, 0x00000000 },
136 { 0x00001578, 0x00000000 },
137 { 0x000015b8, 0x00000000 },
138 { 0x000015f8, 0x00000000 },
139 { 0x00001638, 0x00000000 },
140 { 0x00001678, 0x00000000 },
141 { 0x000016b8, 0x00000000 },
142 { 0x000016f8, 0x00000000 },
143 { 0x00001738, 0x00000000 },
144 { 0x00001778, 0x00000000 },
145 { 0x000017b8, 0x00000000 },
146 { 0x000017f8, 0x00000000 },
147 { 0x0000103c, 0x00000000 },
148 { 0x0000107c, 0x00000000 },
149 { 0x000010bc, 0x00000000 },
150 { 0x000010fc, 0x00000000 },
151 { 0x0000113c, 0x00000000 },
152 { 0x0000117c, 0x00000000 },
153 { 0x000011bc, 0x00000000 },
154 { 0x000011fc, 0x00000000 },
155 { 0x0000123c, 0x00000000 },
156 { 0x0000127c, 0x00000000 },
157 { 0x000012bc, 0x00000000 },
158 { 0x000012fc, 0x00000000 },
159 { 0x0000133c, 0x00000000 },
160 { 0x0000137c, 0x00000000 },
161 { 0x000013bc, 0x00000000 },
162 { 0x000013fc, 0x00000000 },
163 { 0x0000143c, 0x00000000 },
164 { 0x0000147c, 0x00000000 },
165 { 0x00004030, 0x00000002 },
166 { 0x0000403c, 0x00000002 },
167 { 0x00007010, 0x00000000 },
168 { 0x00007038, 0x000004c2 },
169 { 0x00008004, 0x00000000 },
170 { 0x00008008, 0x00000000 },
171 { 0x0000800c, 0x00000000 },
172 { 0x00008018, 0x00000700 },
173 { 0x00008020, 0x00000000 },
174 { 0x00008038, 0x00000000 },
175 { 0x0000803c, 0x00000000 },
176 { 0x00008048, 0x40000000 },
177 { 0x00008054, 0x00000000 },
178 { 0x00008058, 0x00000000 },
179 { 0x0000805c, 0x000fc78f },
180 { 0x00008060, 0x0000000f },
181 { 0x00008064, 0x00000000 },
182 { 0x000080c0, 0x2a82301a },
183 { 0x000080c4, 0x05dc01e0 },
184 { 0x000080c8, 0x1f402710 },
185 { 0x000080cc, 0x01f40000 },
186 { 0x000080d0, 0x00001e00 },
187 { 0x000080d4, 0x00000000 },
188 { 0x000080d8, 0x00400000 },
189 { 0x000080e0, 0xffffffff },
190 { 0x000080e4, 0x0000ffff },
191 { 0x000080e8, 0x003f3f3f },
192 { 0x000080ec, 0x00000000 },
193 { 0x000080f0, 0x00000000 },
194 { 0x000080f4, 0x00000000 },
195 { 0x000080f8, 0x00000000 },
196 { 0x000080fc, 0x00020000 },
197 { 0x00008100, 0x00020000 },
198 { 0x00008104, 0x00000001 },
199 { 0x00008108, 0x00000052 },
200 { 0x0000810c, 0x00000000 },
201 { 0x00008110, 0x00000168 },
202 { 0x00008118, 0x000100aa },
203 { 0x0000811c, 0x00003210 },
204 { 0x00008124, 0x00000000 },
205 { 0x00008128, 0x00000000 },
206 { 0x0000812c, 0x00000000 },
207 { 0x00008130, 0x00000000 },
208 { 0x00008134, 0x00000000 },
209 { 0x00008138, 0x00000000 },
210 { 0x0000813c, 0x00000000 },
211 { 0x00008144, 0xffffffff },
212 { 0x00008168, 0x00000000 },
213 { 0x0000816c, 0x00000000 },
214 { 0x00008170, 0x32143320 },
215 { 0x00008174, 0xfaa4fa50 },
216 { 0x00008178, 0x00000100 },
217 { 0x0000817c, 0x00000000 },
218 { 0x000081c4, 0x00000000 },
219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 },
222 { 0x000081f8, 0x00000000 },
223 { 0x000081fc, 0x00000000 },
224 { 0x00008200, 0x00000000 },
225 { 0x00008204, 0x00000000 },
226 { 0x00008208, 0x00000000 },
227 { 0x0000820c, 0x00000000 },
228 { 0x00008210, 0x00000000 },
229 { 0x00008214, 0x00000000 },
230 { 0x00008218, 0x00000000 },
231 { 0x0000821c, 0x00000000 },
232 { 0x00008220, 0x00000000 },
233 { 0x00008224, 0x00000000 },
234 { 0x00008228, 0x00000000 },
235 { 0x0000822c, 0x00000000 },
236 { 0x00008230, 0x00000000 },
237 { 0x00008234, 0x00000000 },
238 { 0x00008238, 0x00000000 },
239 { 0x0000823c, 0x00000000 },
240 { 0x00008240, 0x00100000 },
241 { 0x00008244, 0x0010f400 },
242 { 0x00008248, 0x00000100 },
243 { 0x0000824c, 0x0001e800 },
244 { 0x00008250, 0x00000000 },
245 { 0x00008254, 0x00000000 },
246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 },
249 { 0x00008264, 0xa8000010 },
250 { 0x00008270, 0x00000000 },
251 { 0x00008274, 0x40000000 },
252 { 0x00008278, 0x003e4180 },
253 { 0x0000827c, 0x00000000 },
254 { 0x00008284, 0x0000002c },
255 { 0x00008288, 0x0000002c },
256 { 0x0000828c, 0x00000000 },
257 { 0x00008294, 0x00000000 },
258 { 0x00008298, 0x00000000 },
259 { 0x00008300, 0x00000000 },
260 { 0x00008304, 0x00000000 },
261 { 0x00008308, 0x00000000 },
262 { 0x0000830c, 0x00000000 },
263 { 0x00008310, 0x00000000 },
264 { 0x00008314, 0x00000000 },
265 { 0x00008318, 0x00000000 },
266 { 0x00008328, 0x00000000 },
267 { 0x0000832c, 0x00000007 },
268 { 0x00008330, 0x00000302 },
269 { 0x00008334, 0x00000e00 },
270 { 0x00008338, 0x00070000 },
271 { 0x0000833c, 0x00000000 },
272 { 0x00008340, 0x000107ff },
273 { 0x00009808, 0x00000000 },
274 { 0x0000980c, 0xad848e19 },
275 { 0x00009810, 0x7d14e000 },
276 { 0x00009814, 0x9c0a9f6b },
277 { 0x0000981c, 0x00000000 },
278 { 0x0000982c, 0x0000a000 },
279 { 0x00009830, 0x00000000 },
280 { 0x0000983c, 0x00200400 },
281 { 0x00009840, 0x206a002e },
282 { 0x0000984c, 0x1284233c },
283 { 0x00009854, 0x00000859 },
284 { 0x00009900, 0x00000000 },
285 { 0x00009904, 0x00000000 },
286 { 0x00009908, 0x00000000 },
287 { 0x0000990c, 0x00000000 },
288 { 0x0000991c, 0x10000fff },
289 { 0x00009920, 0x05100000 },
290 { 0x0000a920, 0x05100000 },
291 { 0x0000b920, 0x05100000 },
292 { 0x00009928, 0x00000001 },
293 { 0x0000992c, 0x00000004 },
294 { 0x00009934, 0x1e1f2022 },
295 { 0x00009938, 0x0a0b0c0d },
296 { 0x0000993c, 0x00000000 },
297 { 0x00009948, 0x9280b212 },
298 { 0x0000994c, 0x00020028 },
299 { 0x00009954, 0x5d50e188 },
300 { 0x00009958, 0x00081fff },
301 { 0x0000c95c, 0x004b6a8e },
302 { 0x0000c968, 0x000003ce },
303 { 0x00009970, 0x190fb515 },
304 { 0x00009974, 0x00000000 },
305 { 0x00009978, 0x00000001 },
306 { 0x0000997c, 0x00000000 },
307 { 0x00009980, 0x00000000 },
308 { 0x00009984, 0x00000000 },
309 { 0x00009988, 0x00000000 },
310 { 0x0000998c, 0x00000000 },
311 { 0x00009990, 0x00000000 },
312 { 0x00009994, 0x00000000 },
313 { 0x00009998, 0x00000000 },
314 { 0x0000999c, 0x00000000 },
315 { 0x000099a0, 0x00000000 },
316 { 0x000099a4, 0x00000001 },
317 { 0x000099a8, 0x001fff00 },
318 { 0x000099ac, 0x00000000 },
319 { 0x000099b0, 0x03051000 },
320 { 0x000099dc, 0x00000000 },
321 { 0x000099e0, 0x00000200 },
322 { 0x000099e4, 0xaaaaaaaa },
323 { 0x000099e8, 0x3c466478 },
324 { 0x000099ec, 0x000000aa },
325 { 0x000099fc, 0x00001042 },
326 { 0x00009b00, 0x00000000 },
327 { 0x00009b04, 0x00000001 },
328 { 0x00009b08, 0x00000002 },
329 { 0x00009b0c, 0x00000003 },
330 { 0x00009b10, 0x00000004 },
331 { 0x00009b14, 0x00000005 },
332 { 0x00009b18, 0x00000008 },
333 { 0x00009b1c, 0x00000009 },
334 { 0x00009b20, 0x0000000a },
335 { 0x00009b24, 0x0000000b },
336 { 0x00009b28, 0x0000000c },
337 { 0x00009b2c, 0x0000000d },
338 { 0x00009b30, 0x00000010 },
339 { 0x00009b34, 0x00000011 },
340 { 0x00009b38, 0x00000012 },
341 { 0x00009b3c, 0x00000013 },
342 { 0x00009b40, 0x00000014 },
343 { 0x00009b44, 0x00000015 },
344 { 0x00009b48, 0x00000018 },
345 { 0x00009b4c, 0x00000019 },
346 { 0x00009b50, 0x0000001a },
347 { 0x00009b54, 0x0000001b },
348 { 0x00009b58, 0x0000001c },
349 { 0x00009b5c, 0x0000001d },
350 { 0x00009b60, 0x00000020 },
351 { 0x00009b64, 0x00000021 },
352 { 0x00009b68, 0x00000022 },
353 { 0x00009b6c, 0x00000023 },
354 { 0x00009b70, 0x00000024 },
355 { 0x00009b74, 0x00000025 },
356 { 0x00009b78, 0x00000028 },
357 { 0x00009b7c, 0x00000029 },
358 { 0x00009b80, 0x0000002a },
359 { 0x00009b84, 0x0000002b },
360 { 0x00009b88, 0x0000002c },
361 { 0x00009b8c, 0x0000002d },
362 { 0x00009b90, 0x00000030 },
363 { 0x00009b94, 0x00000031 },
364 { 0x00009b98, 0x00000032 },
365 { 0x00009b9c, 0x00000033 },
366 { 0x00009ba0, 0x00000034 },
367 { 0x00009ba4, 0x00000035 },
368 { 0x00009ba8, 0x00000035 },
369 { 0x00009bac, 0x00000035 },
370 { 0x00009bb0, 0x00000035 },
371 { 0x00009bb4, 0x00000035 },
372 { 0x00009bb8, 0x00000035 },
373 { 0x00009bbc, 0x00000035 },
374 { 0x00009bc0, 0x00000035 },
375 { 0x00009bc4, 0x00000035 },
376 { 0x00009bc8, 0x00000035 },
377 { 0x00009bcc, 0x00000035 },
378 { 0x00009bd0, 0x00000035 },
379 { 0x00009bd4, 0x00000035 },
380 { 0x00009bd8, 0x00000035 },
381 { 0x00009bdc, 0x00000035 },
382 { 0x00009be0, 0x00000035 },
383 { 0x00009be4, 0x00000035 },
384 { 0x00009be8, 0x00000035 },
385 { 0x00009bec, 0x00000035 },
386 { 0x00009bf0, 0x00000035 },
387 { 0x00009bf4, 0x00000035 },
388 { 0x00009bf8, 0x00000010 },
389 { 0x00009bfc, 0x0000001a },
390 { 0x0000a210, 0x40806333 },
391 { 0x0000a214, 0x00106c10 },
392 { 0x0000a218, 0x009c4060 },
393 { 0x0000a220, 0x018830c6 },
394 { 0x0000a224, 0x00000400 },
395 { 0x0000a228, 0x00000bb5 },
396 { 0x0000a22c, 0x00000011 },
397 { 0x0000a234, 0x20202020 },
398 { 0x0000a238, 0x20202020 },
399 { 0x0000a23c, 0x13c889af },
400 { 0x0000a240, 0x38490a20 },
401 { 0x0000a244, 0x00007bb6 },
402 { 0x0000a248, 0x0fff3ffc },
403 { 0x0000a24c, 0x00000001 },
404 { 0x0000a250, 0x0000a000 },
405 { 0x0000a254, 0x00000000 },
406 { 0x0000a258, 0x0cc75380 },
407 { 0x0000a25c, 0x0f0f0f01 },
408 { 0x0000a260, 0xdfa91f01 },
409 { 0x0000a268, 0x00000000 },
410 { 0x0000a26c, 0x0e79e5c6 },
411 { 0x0000b26c, 0x0e79e5c6 },
412 { 0x0000c26c, 0x0e79e5c6 },
413 { 0x0000d270, 0x00820820 },
414 { 0x0000a278, 0x1ce739ce },
415 { 0x0000a27c, 0x051701ce },
416 { 0x0000a338, 0x00000000 },
417 { 0x0000a33c, 0x00000000 },
418 { 0x0000a340, 0x00000000 },
419 { 0x0000a344, 0x00000000 },
420 { 0x0000a348, 0x3fffffff },
421 { 0x0000a34c, 0x3fffffff },
422 { 0x0000a350, 0x3fffffff },
423 { 0x0000a354, 0x0003ffff },
424 { 0x0000a358, 0x79a8aa1f },
425 { 0x0000d35c, 0x07ffffef },
426 { 0x0000d360, 0x0fffffe7 },
427 { 0x0000d364, 0x17ffffe5 },
428 { 0x0000d368, 0x1fffffe4 },
429 { 0x0000d36c, 0x37ffffe3 },
430 { 0x0000d370, 0x3fffffe3 },
431 { 0x0000d374, 0x57ffffe3 },
432 { 0x0000d378, 0x5fffffe2 },
433 { 0x0000d37c, 0x7fffffe2 },
434 { 0x0000d380, 0x7f3c7bba },
435 { 0x0000d384, 0xf3307ff0 },
436 { 0x0000a388, 0x08000000 },
437 { 0x0000a38c, 0x20202020 },
438 { 0x0000a390, 0x20202020 },
439 { 0x0000a394, 0x1ce739ce },
440 { 0x0000a398, 0x000001ce },
441 { 0x0000a39c, 0x00000001 },
442 { 0x0000a3a0, 0x00000000 },
443 { 0x0000a3a4, 0x00000000 },
444 { 0x0000a3a8, 0x00000000 },
445 { 0x0000a3ac, 0x00000000 },
446 { 0x0000a3b0, 0x00000000 },
447 { 0x0000a3b4, 0x00000000 },
448 { 0x0000a3b8, 0x00000000 },
449 { 0x0000a3bc, 0x00000000 },
450 { 0x0000a3c0, 0x00000000 },
451 { 0x0000a3c4, 0x00000000 },
452 { 0x0000a3c8, 0x00000246 },
453 { 0x0000a3cc, 0x20202020 },
454 { 0x0000a3d0, 0x20202020 },
455 { 0x0000a3d4, 0x20202020 },
456 { 0x0000a3dc, 0x1ce739ce },
457 { 0x0000a3e0, 0x000001ce },
458};
459
460static const u32 ar5416Bank0[][2] = {
461 { 0x000098b0, 0x1e5795e5 },
462 { 0x000098e0, 0x02008020 },
463};
464
465static const u32 ar5416BB_RfGain[][3] = {
466 { 0x00009a00, 0x00000000, 0x00000000 },
467 { 0x00009a04, 0x00000040, 0x00000040 },
468 { 0x00009a08, 0x00000080, 0x00000080 },
469 { 0x00009a0c, 0x000001a1, 0x00000141 },
470 { 0x00009a10, 0x000001e1, 0x00000181 },
471 { 0x00009a14, 0x00000021, 0x000001c1 },
472 { 0x00009a18, 0x00000061, 0x00000001 },
473 { 0x00009a1c, 0x00000168, 0x00000041 },
474 { 0x00009a20, 0x000001a8, 0x000001a8 },
475 { 0x00009a24, 0x000001e8, 0x000001e8 },
476 { 0x00009a28, 0x00000028, 0x00000028 },
477 { 0x00009a2c, 0x00000068, 0x00000068 },
478 { 0x00009a30, 0x00000189, 0x000000a8 },
479 { 0x00009a34, 0x000001c9, 0x00000169 },
480 { 0x00009a38, 0x00000009, 0x000001a9 },
481 { 0x00009a3c, 0x00000049, 0x000001e9 },
482 { 0x00009a40, 0x00000089, 0x00000029 },
483 { 0x00009a44, 0x00000170, 0x00000069 },
484 { 0x00009a48, 0x000001b0, 0x00000190 },
485 { 0x00009a4c, 0x000001f0, 0x000001d0 },
486 { 0x00009a50, 0x00000030, 0x00000010 },
487 { 0x00009a54, 0x00000070, 0x00000050 },
488 { 0x00009a58, 0x00000191, 0x00000090 },
489 { 0x00009a5c, 0x000001d1, 0x00000151 },
490 { 0x00009a60, 0x00000011, 0x00000191 },
491 { 0x00009a64, 0x00000051, 0x000001d1 },
492 { 0x00009a68, 0x00000091, 0x00000011 },
493 { 0x00009a6c, 0x000001b8, 0x00000051 },
494 { 0x00009a70, 0x000001f8, 0x00000198 },
495 { 0x00009a74, 0x00000038, 0x000001d8 },
496 { 0x00009a78, 0x00000078, 0x00000018 },
497 { 0x00009a7c, 0x00000199, 0x00000058 },
498 { 0x00009a80, 0x000001d9, 0x00000098 },
499 { 0x00009a84, 0x00000019, 0x00000159 },
500 { 0x00009a88, 0x00000059, 0x00000199 },
501 { 0x00009a8c, 0x00000099, 0x000001d9 },
502 { 0x00009a90, 0x000000d9, 0x00000019 },
503 { 0x00009a94, 0x000000f9, 0x00000059 },
504 { 0x00009a98, 0x000000f9, 0x00000099 },
505 { 0x00009a9c, 0x000000f9, 0x000000d9 },
506 { 0x00009aa0, 0x000000f9, 0x000000f9 },
507 { 0x00009aa4, 0x000000f9, 0x000000f9 },
508 { 0x00009aa8, 0x000000f9, 0x000000f9 },
509 { 0x00009aac, 0x000000f9, 0x000000f9 },
510 { 0x00009ab0, 0x000000f9, 0x000000f9 },
511 { 0x00009ab4, 0x000000f9, 0x000000f9 },
512 { 0x00009ab8, 0x000000f9, 0x000000f9 },
513 { 0x00009abc, 0x000000f9, 0x000000f9 },
514 { 0x00009ac0, 0x000000f9, 0x000000f9 },
515 { 0x00009ac4, 0x000000f9, 0x000000f9 },
516 { 0x00009ac8, 0x000000f9, 0x000000f9 },
517 { 0x00009acc, 0x000000f9, 0x000000f9 },
518 { 0x00009ad0, 0x000000f9, 0x000000f9 },
519 { 0x00009ad4, 0x000000f9, 0x000000f9 },
520 { 0x00009ad8, 0x000000f9, 0x000000f9 },
521 { 0x00009adc, 0x000000f9, 0x000000f9 },
522 { 0x00009ae0, 0x000000f9, 0x000000f9 },
523 { 0x00009ae4, 0x000000f9, 0x000000f9 },
524 { 0x00009ae8, 0x000000f9, 0x000000f9 },
525 { 0x00009aec, 0x000000f9, 0x000000f9 },
526 { 0x00009af0, 0x000000f9, 0x000000f9 },
527 { 0x00009af4, 0x000000f9, 0x000000f9 },
528 { 0x00009af8, 0x000000f9, 0x000000f9 },
529 { 0x00009afc, 0x000000f9, 0x000000f9 },
530};
531
532static const u32 ar5416Bank1[][2] = {
533 { 0x000098b0, 0x02108421 },
534 { 0x000098ec, 0x00000008 },
535};
536
537static const u32 ar5416Bank2[][2] = {
538 { 0x000098b0, 0x0e73ff17 },
539 { 0x000098e0, 0x00000420 },
540};
541
542static const u32 ar5416Bank3[][3] = {
543 { 0x000098f0, 0x01400018, 0x01c00018 },
544};
545
546static const u32 ar5416Bank6[][3] = {
547
548 { 0x0000989c, 0x00000000, 0x00000000 },
549 { 0x0000989c, 0x00000000, 0x00000000 },
550 { 0x0000989c, 0x00000000, 0x00000000 },
551 { 0x0000989c, 0x00e00000, 0x00e00000 },
552 { 0x0000989c, 0x005e0000, 0x005e0000 },
553 { 0x0000989c, 0x00120000, 0x00120000 },
554 { 0x0000989c, 0x00620000, 0x00620000 },
555 { 0x0000989c, 0x00020000, 0x00020000 },
556 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
557 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
558 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
559 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
560 { 0x0000989c, 0x005f0000, 0x005f0000 },
561 { 0x0000989c, 0x00870000, 0x00870000 },
562 { 0x0000989c, 0x00f90000, 0x00f90000 },
563 { 0x0000989c, 0x007b0000, 0x007b0000 },
564 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
565 { 0x0000989c, 0x00f50000, 0x00f50000 },
566 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
567 { 0x0000989c, 0x00110000, 0x00110000 },
568 { 0x0000989c, 0x006100a8, 0x006100a8 },
569 { 0x0000989c, 0x004210a2, 0x004210a2 },
570 { 0x0000989c, 0x0014008f, 0x0014008f },
571 { 0x0000989c, 0x00c40003, 0x00c40003 },
572 { 0x0000989c, 0x003000f2, 0x003000f2 },
573 { 0x0000989c, 0x00440016, 0x00440016 },
574 { 0x0000989c, 0x00410040, 0x00410040 },
575 { 0x0000989c, 0x0001805e, 0x0001805e },
576 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
577 { 0x0000989c, 0x000000f1, 0x000000f1 },
578 { 0x0000989c, 0x00002081, 0x00002081 },
579 { 0x0000989c, 0x000000d4, 0x000000d4 },
580 { 0x000098d0, 0x0000000f, 0x0010000f },
581};
582
583static const u32 ar5416Bank6TPC[][3] = {
584 { 0x0000989c, 0x00000000, 0x00000000 },
585 { 0x0000989c, 0x00000000, 0x00000000 },
586 { 0x0000989c, 0x00000000, 0x00000000 },
587 { 0x0000989c, 0x00e00000, 0x00e00000 },
588 { 0x0000989c, 0x005e0000, 0x005e0000 },
589 { 0x0000989c, 0x00120000, 0x00120000 },
590 { 0x0000989c, 0x00620000, 0x00620000 },
591 { 0x0000989c, 0x00020000, 0x00020000 },
592 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
593 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
594 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
595 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
596 { 0x0000989c, 0x005f0000, 0x005f0000 },
597 { 0x0000989c, 0x00870000, 0x00870000 },
598 { 0x0000989c, 0x00f90000, 0x00f90000 },
599 { 0x0000989c, 0x007b0000, 0x007b0000 },
600 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
601 { 0x0000989c, 0x00f50000, 0x00f50000 },
602 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
603 { 0x0000989c, 0x00110000, 0x00110000 },
604 { 0x0000989c, 0x006100a8, 0x006100a8 },
605 { 0x0000989c, 0x00423022, 0x00423022 },
606 { 0x0000989c, 0x201400df, 0x201400df },
607 { 0x0000989c, 0x00c40002, 0x00c40002 },
608 { 0x0000989c, 0x003000f2, 0x003000f2 },
609 { 0x0000989c, 0x00440016, 0x00440016 },
610 { 0x0000989c, 0x00410040, 0x00410040 },
611 { 0x0000989c, 0x0001805e, 0x0001805e },
612 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
613 { 0x0000989c, 0x000000e1, 0x000000e1 },
614 { 0x0000989c, 0x00007081, 0x00007081 },
615 { 0x0000989c, 0x000000d4, 0x000000d4 },
616 { 0x000098d0, 0x0000000f, 0x0010000f },
617};
618
619static const u32 ar5416Bank7[][2] = {
620 { 0x0000989c, 0x00000500 },
621 { 0x0000989c, 0x00000800 },
622 { 0x000098cc, 0x0000000e },
623};
624
625static const u32 ar5416Addac[][2] = {
626 {0x0000989c, 0x00000000 },
627 {0x0000989c, 0x00000003 },
628 {0x0000989c, 0x00000000 },
629 {0x0000989c, 0x0000000c },
630 {0x0000989c, 0x00000000 },
631 {0x0000989c, 0x00000030 },
632 {0x0000989c, 0x00000000 },
633 {0x0000989c, 0x00000000 },
634 {0x0000989c, 0x00000000 },
635 {0x0000989c, 0x00000000 },
636 {0x0000989c, 0x00000000 },
637 {0x0000989c, 0x00000000 },
638 {0x0000989c, 0x00000000 },
639 {0x0000989c, 0x00000000 },
640 {0x0000989c, 0x00000000 },
641 {0x0000989c, 0x00000000 },
642 {0x0000989c, 0x00000000 },
643 {0x0000989c, 0x00000000 },
644 {0x0000989c, 0x00000060 },
645 {0x0000989c, 0x00000000 },
646 {0x0000989c, 0x00000000 },
647 {0x0000989c, 0x00000000 },
648 {0x0000989c, 0x00000000 },
649 {0x0000989c, 0x00000000 },
650 {0x0000989c, 0x00000000 },
651 {0x0000989c, 0x00000000 },
652 {0x0000989c, 0x00000000 },
653 {0x0000989c, 0x00000000 },
654 {0x0000989c, 0x00000000 },
655 {0x0000989c, 0x00000000 },
656 {0x0000989c, 0x00000000 },
657 {0x0000989c, 0x00000058 },
658 {0x0000989c, 0x00000000 },
659 {0x0000989c, 0x00000000 },
660 {0x0000989c, 0x00000000 },
661 {0x0000989c, 0x00000000 },
662 {0x000098cc, 0x00000000 },
663};
664
665static const u32 ar5416Modes_9100[][6] = {
666 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
667 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
668 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
669 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
670 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
671 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
672 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
673 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
674 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
675 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
676 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
677 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
678 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
679 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
680 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
681 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
682 { 0x00009850, 0x6d48b4e2, 0x6d48b4e2, 0x6d48b0e2, 0x6d48b0e2, 0x6d48b0e2 },
683 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec86d2e, 0x7ec84d2e, 0x7ec82d2e },
684 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e },
685 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
686 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
687 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
688 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
689 { 0x00009914, 0x000007d0, 0x000007d0, 0x00000898, 0x00000898, 0x000007d0 },
690 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
691 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a11, 0xd00a8a0d, 0xd00a8a0d },
692 { 0x00009940, 0x00754604, 0x00754604, 0xfff81204, 0xfff81204, 0xfff81204 },
693 { 0x00009944, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020, 0xdfb81020 },
694 { 0x00009954, 0x5f3ca3de, 0x5f3ca3de, 0xe250a51e, 0xe250a51e, 0xe250a51e },
695 { 0x00009958, 0x2108ecff, 0x2108ecff, 0x3388ffff, 0x3388ffff, 0x3388ffff },
696#ifdef TB243
697 { 0x00009960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
698 { 0x0000a960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
699 { 0x0000b960, 0x00000900, 0x00000900, 0x00009b40, 0x00009b40, 0x00012d80 },
700 { 0x00009964, 0x00000000, 0x00000000, 0x00002210, 0x00002210, 0x00001120 },
701#else
702 { 0x00009960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
703 { 0x0000a960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
704 { 0x0000b960, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0, 0x0001bfc0 },
705 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
706#endif
707 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a1000, 0x001a0c00, 0x001a0c00 },
708 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
709 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
710 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
711 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
712 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
713 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
714 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
715 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
716 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
717 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
718 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
719 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
720 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
721 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
722 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
723 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
724 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
725 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
726 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
727 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
728 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
729 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
730 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
731 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
732 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
733 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
734 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
735 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
736 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
737};
738
739static const u32 ar5416Common_9100[][2] = {
740 { 0x0000000c, 0x00000000 },
741 { 0x00000030, 0x00020015 },
742 { 0x00000034, 0x00000005 },
743 { 0x00000040, 0x00000000 },
744 { 0x00000044, 0x00000008 },
745 { 0x00000048, 0x00000008 },
746 { 0x0000004c, 0x00000010 },
747 { 0x00000050, 0x00000000 },
748 { 0x00000054, 0x0000001f },
749 { 0x00000800, 0x00000000 },
750 { 0x00000804, 0x00000000 },
751 { 0x00000808, 0x00000000 },
752 { 0x0000080c, 0x00000000 },
753 { 0x00000810, 0x00000000 },
754 { 0x00000814, 0x00000000 },
755 { 0x00000818, 0x00000000 },
756 { 0x0000081c, 0x00000000 },
757 { 0x00000820, 0x00000000 },
758 { 0x00000824, 0x00000000 },
759 { 0x00001040, 0x002ffc0f },
760 { 0x00001044, 0x002ffc0f },
761 { 0x00001048, 0x002ffc0f },
762 { 0x0000104c, 0x002ffc0f },
763 { 0x00001050, 0x002ffc0f },
764 { 0x00001054, 0x002ffc0f },
765 { 0x00001058, 0x002ffc0f },
766 { 0x0000105c, 0x002ffc0f },
767 { 0x00001060, 0x002ffc0f },
768 { 0x00001064, 0x002ffc0f },
769 { 0x00001230, 0x00000000 },
770 { 0x00001270, 0x00000000 },
771 { 0x00001038, 0x00000000 },
772 { 0x00001078, 0x00000000 },
773 { 0x000010b8, 0x00000000 },
774 { 0x000010f8, 0x00000000 },
775 { 0x00001138, 0x00000000 },
776 { 0x00001178, 0x00000000 },
777 { 0x000011b8, 0x00000000 },
778 { 0x000011f8, 0x00000000 },
779 { 0x00001238, 0x00000000 },
780 { 0x00001278, 0x00000000 },
781 { 0x000012b8, 0x00000000 },
782 { 0x000012f8, 0x00000000 },
783 { 0x00001338, 0x00000000 },
784 { 0x00001378, 0x00000000 },
785 { 0x000013b8, 0x00000000 },
786 { 0x000013f8, 0x00000000 },
787 { 0x00001438, 0x00000000 },
788 { 0x00001478, 0x00000000 },
789 { 0x000014b8, 0x00000000 },
790 { 0x000014f8, 0x00000000 },
791 { 0x00001538, 0x00000000 },
792 { 0x00001578, 0x00000000 },
793 { 0x000015b8, 0x00000000 },
794 { 0x000015f8, 0x00000000 },
795 { 0x00001638, 0x00000000 },
796 { 0x00001678, 0x00000000 },
797 { 0x000016b8, 0x00000000 },
798 { 0x000016f8, 0x00000000 },
799 { 0x00001738, 0x00000000 },
800 { 0x00001778, 0x00000000 },
801 { 0x000017b8, 0x00000000 },
802 { 0x000017f8, 0x00000000 },
803 { 0x0000103c, 0x00000000 },
804 { 0x0000107c, 0x00000000 },
805 { 0x000010bc, 0x00000000 },
806 { 0x000010fc, 0x00000000 },
807 { 0x0000113c, 0x00000000 },
808 { 0x0000117c, 0x00000000 },
809 { 0x000011bc, 0x00000000 },
810 { 0x000011fc, 0x00000000 },
811 { 0x0000123c, 0x00000000 },
812 { 0x0000127c, 0x00000000 },
813 { 0x000012bc, 0x00000000 },
814 { 0x000012fc, 0x00000000 },
815 { 0x0000133c, 0x00000000 },
816 { 0x0000137c, 0x00000000 },
817 { 0x000013bc, 0x00000000 },
818 { 0x000013fc, 0x00000000 },
819 { 0x0000143c, 0x00000000 },
820 { 0x0000147c, 0x00000000 },
821 { 0x00020010, 0x00000003 },
822 { 0x00020038, 0x000004c2 },
823 { 0x00008004, 0x00000000 },
824 { 0x00008008, 0x00000000 },
825 { 0x0000800c, 0x00000000 },
826 { 0x00008018, 0x00000700 },
827 { 0x00008020, 0x00000000 },
828 { 0x00008038, 0x00000000 },
829 { 0x0000803c, 0x00000000 },
830 { 0x00008048, 0x40000000 },
831 { 0x00008054, 0x00004000 },
832 { 0x00008058, 0x00000000 },
833 { 0x0000805c, 0x000fc78f },
834 { 0x00008060, 0x0000000f },
835 { 0x00008064, 0x00000000 },
836 { 0x000080c0, 0x2a82301a },
837 { 0x000080c4, 0x05dc01e0 },
838 { 0x000080c8, 0x1f402710 },
839 { 0x000080cc, 0x01f40000 },
840 { 0x000080d0, 0x00001e00 },
841 { 0x000080d4, 0x00000000 },
842 { 0x000080d8, 0x00400000 },
843 { 0x000080e0, 0xffffffff },
844 { 0x000080e4, 0x0000ffff },
845 { 0x000080e8, 0x003f3f3f },
846 { 0x000080ec, 0x00000000 },
847 { 0x000080f0, 0x00000000 },
848 { 0x000080f4, 0x00000000 },
849 { 0x000080f8, 0x00000000 },
850 { 0x000080fc, 0x00020000 },
851 { 0x00008100, 0x00020000 },
852 { 0x00008104, 0x00000001 },
853 { 0x00008108, 0x00000052 },
854 { 0x0000810c, 0x00000000 },
855 { 0x00008110, 0x00000168 },
856 { 0x00008118, 0x000100aa },
857 { 0x0000811c, 0x00003210 },
858 { 0x00008120, 0x08f04800 },
859 { 0x00008124, 0x00000000 },
860 { 0x00008128, 0x00000000 },
861 { 0x0000812c, 0x00000000 },
862 { 0x00008130, 0x00000000 },
863 { 0x00008134, 0x00000000 },
864 { 0x00008138, 0x00000000 },
865 { 0x0000813c, 0x00000000 },
866 { 0x00008144, 0x00000000 },
867 { 0x00008168, 0x00000000 },
868 { 0x0000816c, 0x00000000 },
869 { 0x00008170, 0x32143320 },
870 { 0x00008174, 0xfaa4fa50 },
871 { 0x00008178, 0x00000100 },
872 { 0x0000817c, 0x00000000 },
873 { 0x000081c4, 0x00000000 },
874 { 0x000081d0, 0x00003210 },
875 { 0x000081ec, 0x00000000 },
876 { 0x000081f0, 0x00000000 },
877 { 0x000081f4, 0x00000000 },
878 { 0x000081f8, 0x00000000 },
879 { 0x000081fc, 0x00000000 },
880 { 0x00008200, 0x00000000 },
881 { 0x00008204, 0x00000000 },
882 { 0x00008208, 0x00000000 },
883 { 0x0000820c, 0x00000000 },
884 { 0x00008210, 0x00000000 },
885 { 0x00008214, 0x00000000 },
886 { 0x00008218, 0x00000000 },
887 { 0x0000821c, 0x00000000 },
888 { 0x00008220, 0x00000000 },
889 { 0x00008224, 0x00000000 },
890 { 0x00008228, 0x00000000 },
891 { 0x0000822c, 0x00000000 },
892 { 0x00008230, 0x00000000 },
893 { 0x00008234, 0x00000000 },
894 { 0x00008238, 0x00000000 },
895 { 0x0000823c, 0x00000000 },
896 { 0x00008240, 0x00100000 },
897 { 0x00008244, 0x0010f400 },
898 { 0x00008248, 0x00000100 },
899 { 0x0000824c, 0x0001e800 },
900 { 0x00008250, 0x00000000 },
901 { 0x00008254, 0x00000000 },
902 { 0x00008258, 0x00000000 },
903 { 0x0000825c, 0x400000ff },
904 { 0x00008260, 0x00080922 },
905 { 0x00008270, 0x00000000 },
906 { 0x00008274, 0x40000000 },
907 { 0x00008278, 0x003e4180 },
908 { 0x0000827c, 0x00000000 },
909 { 0x00008284, 0x0000002c },
910 { 0x00008288, 0x0000002c },
911 { 0x0000828c, 0x00000000 },
912 { 0x00008294, 0x00000000 },
913 { 0x00008298, 0x00000000 },
914 { 0x00008300, 0x00000000 },
915 { 0x00008304, 0x00000000 },
916 { 0x00008308, 0x00000000 },
917 { 0x0000830c, 0x00000000 },
918 { 0x00008310, 0x00000000 },
919 { 0x00008314, 0x00000000 },
920 { 0x00008318, 0x00000000 },
921 { 0x00008328, 0x00000000 },
922 { 0x0000832c, 0x00000007 },
923 { 0x00008330, 0x00000302 },
924 { 0x00008334, 0x00000e00 },
925 { 0x00008338, 0x00000000 },
926 { 0x0000833c, 0x00000000 },
927 { 0x00008340, 0x000107ff },
928 { 0x00009808, 0x00000000 },
929 { 0x0000980c, 0xad848e19 },
930 { 0x00009810, 0x7d14e000 },
931 { 0x00009814, 0x9c0a9f6b },
932 { 0x0000981c, 0x00000000 },
933 { 0x0000982c, 0x0000a000 },
934 { 0x00009830, 0x00000000 },
935 { 0x0000983c, 0x00200400 },
936 { 0x00009840, 0x206a01ae },
937 { 0x0000984c, 0x1284233c },
938 { 0x00009854, 0x00000859 },
939 { 0x00009900, 0x00000000 },
940 { 0x00009904, 0x00000000 },
941 { 0x00009908, 0x00000000 },
942 { 0x0000990c, 0x00000000 },
943 { 0x0000991c, 0x10000fff },
944 { 0x00009920, 0x05100000 },
945 { 0x0000a920, 0x05100000 },
946 { 0x0000b920, 0x05100000 },
947 { 0x00009928, 0x00000001 },
948 { 0x0000992c, 0x00000004 },
949 { 0x00009934, 0x1e1f2022 },
950 { 0x00009938, 0x0a0b0c0d },
951 { 0x0000993c, 0x00000000 },
952 { 0x00009948, 0x9280b212 },
953 { 0x0000994c, 0x00020028 },
954 { 0x0000c95c, 0x004b6a8e },
955 { 0x0000c968, 0x000003ce },
956 { 0x00009970, 0x190fb515 },
957 { 0x00009974, 0x00000000 },
958 { 0x00009978, 0x00000001 },
959 { 0x0000997c, 0x00000000 },
960 { 0x00009980, 0x00000000 },
961 { 0x00009984, 0x00000000 },
962 { 0x00009988, 0x00000000 },
963 { 0x0000998c, 0x00000000 },
964 { 0x00009990, 0x00000000 },
965 { 0x00009994, 0x00000000 },
966 { 0x00009998, 0x00000000 },
967 { 0x0000999c, 0x00000000 },
968 { 0x000099a0, 0x00000000 },
969 { 0x000099a4, 0x00000001 },
970 { 0x000099a8, 0x201fff00 },
971 { 0x000099ac, 0x006f0000 },
972 { 0x000099b0, 0x03051000 },
973 { 0x000099dc, 0x00000000 },
974 { 0x000099e0, 0x00000200 },
975 { 0x000099e4, 0xaaaaaaaa },
976 { 0x000099e8, 0x3c466478 },
977 { 0x000099ec, 0x0cc80caa },
978 { 0x000099fc, 0x00001042 },
979 { 0x00009b00, 0x00000000 },
980 { 0x00009b04, 0x00000001 },
981 { 0x00009b08, 0x00000002 },
982 { 0x00009b0c, 0x00000003 },
983 { 0x00009b10, 0x00000004 },
984 { 0x00009b14, 0x00000005 },
985 { 0x00009b18, 0x00000008 },
986 { 0x00009b1c, 0x00000009 },
987 { 0x00009b20, 0x0000000a },
988 { 0x00009b24, 0x0000000b },
989 { 0x00009b28, 0x0000000c },
990 { 0x00009b2c, 0x0000000d },
991 { 0x00009b30, 0x00000010 },
992 { 0x00009b34, 0x00000011 },
993 { 0x00009b38, 0x00000012 },
994 { 0x00009b3c, 0x00000013 },
995 { 0x00009b40, 0x00000014 },
996 { 0x00009b44, 0x00000015 },
997 { 0x00009b48, 0x00000018 },
998 { 0x00009b4c, 0x00000019 },
999 { 0x00009b50, 0x0000001a },
1000 { 0x00009b54, 0x0000001b },
1001 { 0x00009b58, 0x0000001c },
1002 { 0x00009b5c, 0x0000001d },
1003 { 0x00009b60, 0x00000020 },
1004 { 0x00009b64, 0x00000021 },
1005 { 0x00009b68, 0x00000022 },
1006 { 0x00009b6c, 0x00000023 },
1007 { 0x00009b70, 0x00000024 },
1008 { 0x00009b74, 0x00000025 },
1009 { 0x00009b78, 0x00000028 },
1010 { 0x00009b7c, 0x00000029 },
1011 { 0x00009b80, 0x0000002a },
1012 { 0x00009b84, 0x0000002b },
1013 { 0x00009b88, 0x0000002c },
1014 { 0x00009b8c, 0x0000002d },
1015 { 0x00009b90, 0x00000030 },
1016 { 0x00009b94, 0x00000031 },
1017 { 0x00009b98, 0x00000032 },
1018 { 0x00009b9c, 0x00000033 },
1019 { 0x00009ba0, 0x00000034 },
1020 { 0x00009ba4, 0x00000035 },
1021 { 0x00009ba8, 0x00000035 },
1022 { 0x00009bac, 0x00000035 },
1023 { 0x00009bb0, 0x00000035 },
1024 { 0x00009bb4, 0x00000035 },
1025 { 0x00009bb8, 0x00000035 },
1026 { 0x00009bbc, 0x00000035 },
1027 { 0x00009bc0, 0x00000035 },
1028 { 0x00009bc4, 0x00000035 },
1029 { 0x00009bc8, 0x00000035 },
1030 { 0x00009bcc, 0x00000035 },
1031 { 0x00009bd0, 0x00000035 },
1032 { 0x00009bd4, 0x00000035 },
1033 { 0x00009bd8, 0x00000035 },
1034 { 0x00009bdc, 0x00000035 },
1035 { 0x00009be0, 0x00000035 },
1036 { 0x00009be4, 0x00000035 },
1037 { 0x00009be8, 0x00000035 },
1038 { 0x00009bec, 0x00000035 },
1039 { 0x00009bf0, 0x00000035 },
1040 { 0x00009bf4, 0x00000035 },
1041 { 0x00009bf8, 0x00000010 },
1042 { 0x00009bfc, 0x0000001a },
1043 { 0x0000a210, 0x40806333 },
1044 { 0x0000a214, 0x00106c10 },
1045 { 0x0000a218, 0x009c4060 },
1046 { 0x0000a220, 0x018830c6 },
1047 { 0x0000a224, 0x00000400 },
1048 { 0x0000a228, 0x001a0bb5 },
1049 { 0x0000a22c, 0x00000000 },
1050 { 0x0000a234, 0x20202020 },
1051 { 0x0000a238, 0x20202020 },
1052 { 0x0000a23c, 0x13c889ae },
1053 { 0x0000a240, 0x38490a20 },
1054 { 0x0000a244, 0x00007bb6 },
1055 { 0x0000a248, 0x0fff3ffc },
1056 { 0x0000a24c, 0x00000001 },
1057 { 0x0000a250, 0x0000a000 },
1058 { 0x0000a254, 0x00000000 },
1059 { 0x0000a258, 0x0cc75380 },
1060 { 0x0000a25c, 0x0f0f0f01 },
1061 { 0x0000a260, 0xdfa91f01 },
1062 { 0x0000a268, 0x00000001 },
1063 { 0x0000a26c, 0x0ebae9c6 },
1064 { 0x0000b26c, 0x0ebae9c6 },
1065 { 0x0000c26c, 0x0ebae9c6 },
1066 { 0x0000d270, 0x00820820 },
1067 { 0x0000a278, 0x1ce739ce },
1068 { 0x0000a27c, 0x050701ce },
1069 { 0x0000a338, 0x00000000 },
1070 { 0x0000a33c, 0x00000000 },
1071 { 0x0000a340, 0x00000000 },
1072 { 0x0000a344, 0x00000000 },
1073 { 0x0000a348, 0x3fffffff },
1074 { 0x0000a34c, 0x3fffffff },
1075 { 0x0000a350, 0x3fffffff },
1076 { 0x0000a354, 0x0003ffff },
1077 { 0x0000a358, 0x79a8aa33 },
1078 { 0x0000d35c, 0x07ffffef },
1079 { 0x0000d360, 0x0fffffe7 },
1080 { 0x0000d364, 0x17ffffe5 },
1081 { 0x0000d368, 0x1fffffe4 },
1082 { 0x0000d36c, 0x37ffffe3 },
1083 { 0x0000d370, 0x3fffffe3 },
1084 { 0x0000d374, 0x57ffffe3 },
1085 { 0x0000d378, 0x5fffffe2 },
1086 { 0x0000d37c, 0x7fffffe2 },
1087 { 0x0000d380, 0x7f3c7bba },
1088 { 0x0000d384, 0xf3307ff0 },
1089 { 0x0000a388, 0x0c000000 },
1090 { 0x0000a38c, 0x20202020 },
1091 { 0x0000a390, 0x20202020 },
1092 { 0x0000a394, 0x1ce739ce },
1093 { 0x0000a398, 0x000001ce },
1094 { 0x0000a39c, 0x00000001 },
1095 { 0x0000a3a0, 0x00000000 },
1096 { 0x0000a3a4, 0x00000000 },
1097 { 0x0000a3a8, 0x00000000 },
1098 { 0x0000a3ac, 0x00000000 },
1099 { 0x0000a3b0, 0x00000000 },
1100 { 0x0000a3b4, 0x00000000 },
1101 { 0x0000a3b8, 0x00000000 },
1102 { 0x0000a3bc, 0x00000000 },
1103 { 0x0000a3c0, 0x00000000 },
1104 { 0x0000a3c4, 0x00000000 },
1105 { 0x0000a3c8, 0x00000246 },
1106 { 0x0000a3cc, 0x20202020 },
1107 { 0x0000a3d0, 0x20202020 },
1108 { 0x0000a3d4, 0x20202020 },
1109 { 0x0000a3dc, 0x1ce739ce },
1110 { 0x0000a3e0, 0x000001ce },
1111};
1112
1113static const u32 ar5416Bank0_9100[][2] = {
1114 { 0x000098b0, 0x1e5795e5 },
1115 { 0x000098e0, 0x02008020 },
1116};
1117
1118static const u32 ar5416BB_RfGain_9100[][3] = {
1119 { 0x00009a00, 0x00000000, 0x00000000 },
1120 { 0x00009a04, 0x00000040, 0x00000040 },
1121 { 0x00009a08, 0x00000080, 0x00000080 },
1122 { 0x00009a0c, 0x000001a1, 0x00000141 },
1123 { 0x00009a10, 0x000001e1, 0x00000181 },
1124 { 0x00009a14, 0x00000021, 0x000001c1 },
1125 { 0x00009a18, 0x00000061, 0x00000001 },
1126 { 0x00009a1c, 0x00000168, 0x00000041 },
1127 { 0x00009a20, 0x000001a8, 0x000001a8 },
1128 { 0x00009a24, 0x000001e8, 0x000001e8 },
1129 { 0x00009a28, 0x00000028, 0x00000028 },
1130 { 0x00009a2c, 0x00000068, 0x00000068 },
1131 { 0x00009a30, 0x00000189, 0x000000a8 },
1132 { 0x00009a34, 0x000001c9, 0x00000169 },
1133 { 0x00009a38, 0x00000009, 0x000001a9 },
1134 { 0x00009a3c, 0x00000049, 0x000001e9 },
1135 { 0x00009a40, 0x00000089, 0x00000029 },
1136 { 0x00009a44, 0x00000170, 0x00000069 },
1137 { 0x00009a48, 0x000001b0, 0x00000190 },
1138 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1139 { 0x00009a50, 0x00000030, 0x00000010 },
1140 { 0x00009a54, 0x00000070, 0x00000050 },
1141 { 0x00009a58, 0x00000191, 0x00000090 },
1142 { 0x00009a5c, 0x000001d1, 0x00000151 },
1143 { 0x00009a60, 0x00000011, 0x00000191 },
1144 { 0x00009a64, 0x00000051, 0x000001d1 },
1145 { 0x00009a68, 0x00000091, 0x00000011 },
1146 { 0x00009a6c, 0x000001b8, 0x00000051 },
1147 { 0x00009a70, 0x000001f8, 0x00000198 },
1148 { 0x00009a74, 0x00000038, 0x000001d8 },
1149 { 0x00009a78, 0x00000078, 0x00000018 },
1150 { 0x00009a7c, 0x00000199, 0x00000058 },
1151 { 0x00009a80, 0x000001d9, 0x00000098 },
1152 { 0x00009a84, 0x00000019, 0x00000159 },
1153 { 0x00009a88, 0x00000059, 0x00000199 },
1154 { 0x00009a8c, 0x00000099, 0x000001d9 },
1155 { 0x00009a90, 0x000000d9, 0x00000019 },
1156 { 0x00009a94, 0x000000f9, 0x00000059 },
1157 { 0x00009a98, 0x000000f9, 0x00000099 },
1158 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1159 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1160 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1161 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1162 { 0x00009aac, 0x000000f9, 0x000000f9 },
1163 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1164 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1165 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1166 { 0x00009abc, 0x000000f9, 0x000000f9 },
1167 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1168 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1169 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1170 { 0x00009acc, 0x000000f9, 0x000000f9 },
1171 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1172 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1173 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1174 { 0x00009adc, 0x000000f9, 0x000000f9 },
1175 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1176 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1177 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1178 { 0x00009aec, 0x000000f9, 0x000000f9 },
1179 { 0x00009af0, 0x000000f9, 0x000000f9 },
1180 { 0x00009af4, 0x000000f9, 0x000000f9 },
1181 { 0x00009af8, 0x000000f9, 0x000000f9 },
1182 { 0x00009afc, 0x000000f9, 0x000000f9 },
1183};
1184
1185static const u32 ar5416Bank1_9100[][2] = {
1186 { 0x000098b0, 0x02108421},
1187 { 0x000098ec, 0x00000008},
1188};
1189
1190static const u32 ar5416Bank2_9100[][2] = {
1191 { 0x000098b0, 0x0e73ff17},
1192 { 0x000098e0, 0x00000420},
1193};
1194
1195static const u32 ar5416Bank3_9100[][3] = {
1196 { 0x000098f0, 0x01400018, 0x01c00018 },
1197};
1198
1199static const u32 ar5416Bank6_9100[][3] = {
1200
1201 { 0x0000989c, 0x00000000, 0x00000000 },
1202 { 0x0000989c, 0x00000000, 0x00000000 },
1203 { 0x0000989c, 0x00000000, 0x00000000 },
1204 { 0x0000989c, 0x00e00000, 0x00e00000 },
1205 { 0x0000989c, 0x005e0000, 0x005e0000 },
1206 { 0x0000989c, 0x00120000, 0x00120000 },
1207 { 0x0000989c, 0x00620000, 0x00620000 },
1208 { 0x0000989c, 0x00020000, 0x00020000 },
1209 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1210 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1211 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1212 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1213 { 0x0000989c, 0x005f0000, 0x005f0000 },
1214 { 0x0000989c, 0x00870000, 0x00870000 },
1215 { 0x0000989c, 0x00f90000, 0x00f90000 },
1216 { 0x0000989c, 0x007b0000, 0x007b0000 },
1217 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1218 { 0x0000989c, 0x00f50000, 0x00f50000 },
1219 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1220 { 0x0000989c, 0x00110000, 0x00110000 },
1221 { 0x0000989c, 0x006100a8, 0x006100a8 },
1222 { 0x0000989c, 0x004210a2, 0x004210a2 },
1223 { 0x0000989c, 0x0014000f, 0x0014000f },
1224 { 0x0000989c, 0x00c40002, 0x00c40002 },
1225 { 0x0000989c, 0x003000f2, 0x003000f2 },
1226 { 0x0000989c, 0x00440016, 0x00440016 },
1227 { 0x0000989c, 0x00410040, 0x00410040 },
1228 { 0x0000989c, 0x000180d6, 0x000180d6 },
1229 { 0x0000989c, 0x0000c0aa, 0x0000c0aa },
1230 { 0x0000989c, 0x000000b1, 0x000000b1 },
1231 { 0x0000989c, 0x00002000, 0x00002000 },
1232 { 0x0000989c, 0x000000d4, 0x000000d4 },
1233 { 0x000098d0, 0x0000000f, 0x0010000f },
1234};
1235
1236
1237static const u32 ar5416Bank6TPC_9100[][3] = {
1238
1239 { 0x0000989c, 0x00000000, 0x00000000 },
1240 { 0x0000989c, 0x00000000, 0x00000000 },
1241 { 0x0000989c, 0x00000000, 0x00000000 },
1242 { 0x0000989c, 0x00e00000, 0x00e00000 },
1243 { 0x0000989c, 0x005e0000, 0x005e0000 },
1244 { 0x0000989c, 0x00120000, 0x00120000 },
1245 { 0x0000989c, 0x00620000, 0x00620000 },
1246 { 0x0000989c, 0x00020000, 0x00020000 },
1247 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1248 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1249 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1250 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1251 { 0x0000989c, 0x005f0000, 0x005f0000 },
1252 { 0x0000989c, 0x00870000, 0x00870000 },
1253 { 0x0000989c, 0x00f90000, 0x00f90000 },
1254 { 0x0000989c, 0x007b0000, 0x007b0000 },
1255 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1256 { 0x0000989c, 0x00f50000, 0x00f50000 },
1257 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1258 { 0x0000989c, 0x00110000, 0x00110000 },
1259 { 0x0000989c, 0x006100a8, 0x006100a8 },
1260 { 0x0000989c, 0x00423022, 0x00423022 },
1261 { 0x0000989c, 0x2014008f, 0x2014008f },
1262 { 0x0000989c, 0x00c40002, 0x00c40002 },
1263 { 0x0000989c, 0x003000f2, 0x003000f2 },
1264 { 0x0000989c, 0x00440016, 0x00440016 },
1265 { 0x0000989c, 0x00410040, 0x00410040 },
1266 { 0x0000989c, 0x0001805e, 0x0001805e },
1267 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1268 { 0x0000989c, 0x000000e1, 0x000000e1 },
1269 { 0x0000989c, 0x00007080, 0x00007080 },
1270 { 0x0000989c, 0x000000d4, 0x000000d4 },
1271 { 0x000098d0, 0x0000000f, 0x0010000f },
1272};
1273
1274static const u32 ar5416Bank7_9100[][2] = {
1275 { 0x0000989c, 0x00000500 },
1276 { 0x0000989c, 0x00000800 },
1277 { 0x000098cc, 0x0000000e },
1278};
1279
1280static const u32 ar5416Addac_9100[][2] = {
1281 {0x0000989c, 0x00000000 },
1282 {0x0000989c, 0x00000000 },
1283 {0x0000989c, 0x00000000 },
1284 {0x0000989c, 0x00000000 },
1285 {0x0000989c, 0x00000000 },
1286 {0x0000989c, 0x00000000 },
1287 {0x0000989c, 0x00000000 },
1288 {0x0000989c, 0x00000010 },
1289 {0x0000989c, 0x00000000 },
1290 {0x0000989c, 0x00000000 },
1291 {0x0000989c, 0x00000000 },
1292 {0x0000989c, 0x00000000 },
1293 {0x0000989c, 0x00000000 },
1294 {0x0000989c, 0x00000000 },
1295 {0x0000989c, 0x00000000 },
1296 {0x0000989c, 0x00000000 },
1297 {0x0000989c, 0x00000000 },
1298 {0x0000989c, 0x00000000 },
1299 {0x0000989c, 0x00000000 },
1300 {0x0000989c, 0x00000000 },
1301 {0x0000989c, 0x00000000 },
1302 {0x0000989c, 0x000000c0 },
1303 {0x0000989c, 0x00000015 },
1304 {0x0000989c, 0x00000000 },
1305 {0x0000989c, 0x00000000 },
1306 {0x0000989c, 0x00000000 },
1307 {0x0000989c, 0x00000000 },
1308 {0x0000989c, 0x00000000 },
1309 {0x0000989c, 0x00000000 },
1310 {0x0000989c, 0x00000000 },
1311 {0x0000989c, 0x00000000 },
1312 {0x000098cc, 0x00000000 },
1313};
1314
1315static const u32 ar5416Modes_9160[][6] = {
1316 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1317 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
1318 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
1319 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
1320 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
1321 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
1322 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
1323 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
1324 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1325 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
1326 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
1327 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
1328 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
1329 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1330 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1331 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
1332 { 0x00009850, 0x6c48b4e2, 0x6c48b4e2, 0x6c48b0e2, 0x6c48b0e2, 0x6c48b0e2 },
1333 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
1334 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e },
1335 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
1336 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
1337 { 0x00009868, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0, 0x409a40d0 },
1338 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
1339 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
1340 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
1341 { 0x00009924, 0xd00a8a07, 0xd00a8a07, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
1342 { 0x00009944, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020, 0xffb81020 },
1343 { 0x00009960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1344 { 0x0000a960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1345 { 0x0000b960, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40, 0x00009b40 },
1346 { 0x00009964, 0x00001120, 0x00001120, 0x00001120, 0x00001120, 0x00001120 },
1347 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
1348 { 0x0000c9bc, 0x001a0600, 0x001a0600, 0x001a0c00, 0x001a0c00, 0x001a0c00 },
1349 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
1350 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
1351 { 0x000099c8, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329, 0x60f65329 },
1352 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
1353 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
1354 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1355 { 0x000099d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1356 { 0x0000a204, 0x00000880, 0x00000880, 0x00000880, 0x00000880, 0x00000880 },
1357 { 0x0000a208, 0xd6be4788, 0xd6be4788, 0xd03e4788, 0xd03e4788, 0xd03e4788 },
1358 { 0x0000a20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1359 { 0x0000b20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1360 { 0x0000c20c, 0x002fc160, 0x002fc160, 0x002ac120, 0x002ac120, 0x002ac120 },
1361 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
1362 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
1363 { 0x0000a274, 0x0a1a9caa, 0x0a1a9caa, 0x0a1a7caa, 0x0a1a7caa, 0x0a1a7caa },
1364 { 0x0000a300, 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 },
1365 { 0x0000a304, 0x30032602, 0x30032602, 0x2e032402, 0x2e032402, 0x2e032402 },
1366 { 0x0000a308, 0x48073e06, 0x48073e06, 0x4a0a3c06, 0x4a0a3c06, 0x4a0a3c06 },
1367 { 0x0000a30c, 0x560b4c0a, 0x560b4c0a, 0x621a540b, 0x621a540b, 0x621a540b },
1368 { 0x0000a310, 0x641a600f, 0x641a600f, 0x764f6c1b, 0x764f6c1b, 0x764f6c1b },
1369 { 0x0000a314, 0x7a4f6e1b, 0x7a4f6e1b, 0x845b7a5a, 0x845b7a5a, 0x845b7a5a },
1370 { 0x0000a318, 0x8c5b7e5a, 0x8c5b7e5a, 0x950f8ccf, 0x950f8ccf, 0x950f8ccf },
1371 { 0x0000a31c, 0x9d0f96cf, 0x9d0f96cf, 0xa5cf9b4f, 0xa5cf9b4f, 0xa5cf9b4f },
1372 { 0x0000a320, 0xb51fa69f, 0xb51fa69f, 0xbddfaf1f, 0xbddfaf1f, 0xbddfaf1f },
1373 { 0x0000a324, 0xcb3fbd07, 0xcb3fbcbf, 0xd1ffc93f, 0xd1ffc93f, 0xd1ffc93f },
1374 { 0x0000a328, 0x0000d7bf, 0x0000d7bf, 0x00000000, 0x00000000, 0x00000000 },
1375 { 0x0000a32c, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1376 { 0x0000a330, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1377 { 0x0000a334, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
1378};
1379
1380static const u32 ar5416Common_9160[][2] = {
1381 { 0x0000000c, 0x00000000 },
1382 { 0x00000030, 0x00020015 },
1383 { 0x00000034, 0x00000005 },
1384 { 0x00000040, 0x00000000 },
1385 { 0x00000044, 0x00000008 },
1386 { 0x00000048, 0x00000008 },
1387 { 0x0000004c, 0x00000010 },
1388 { 0x00000050, 0x00000000 },
1389 { 0x00000054, 0x0000001f },
1390 { 0x00000800, 0x00000000 },
1391 { 0x00000804, 0x00000000 },
1392 { 0x00000808, 0x00000000 },
1393 { 0x0000080c, 0x00000000 },
1394 { 0x00000810, 0x00000000 },
1395 { 0x00000814, 0x00000000 },
1396 { 0x00000818, 0x00000000 },
1397 { 0x0000081c, 0x00000000 },
1398 { 0x00000820, 0x00000000 },
1399 { 0x00000824, 0x00000000 },
1400 { 0x00001040, 0x002ffc0f },
1401 { 0x00001044, 0x002ffc0f },
1402 { 0x00001048, 0x002ffc0f },
1403 { 0x0000104c, 0x002ffc0f },
1404 { 0x00001050, 0x002ffc0f },
1405 { 0x00001054, 0x002ffc0f },
1406 { 0x00001058, 0x002ffc0f },
1407 { 0x0000105c, 0x002ffc0f },
1408 { 0x00001060, 0x002ffc0f },
1409 { 0x00001064, 0x002ffc0f },
1410 { 0x00001230, 0x00000000 },
1411 { 0x00001270, 0x00000000 },
1412 { 0x00001038, 0x00000000 },
1413 { 0x00001078, 0x00000000 },
1414 { 0x000010b8, 0x00000000 },
1415 { 0x000010f8, 0x00000000 },
1416 { 0x00001138, 0x00000000 },
1417 { 0x00001178, 0x00000000 },
1418 { 0x000011b8, 0x00000000 },
1419 { 0x000011f8, 0x00000000 },
1420 { 0x00001238, 0x00000000 },
1421 { 0x00001278, 0x00000000 },
1422 { 0x000012b8, 0x00000000 },
1423 { 0x000012f8, 0x00000000 },
1424 { 0x00001338, 0x00000000 },
1425 { 0x00001378, 0x00000000 },
1426 { 0x000013b8, 0x00000000 },
1427 { 0x000013f8, 0x00000000 },
1428 { 0x00001438, 0x00000000 },
1429 { 0x00001478, 0x00000000 },
1430 { 0x000014b8, 0x00000000 },
1431 { 0x000014f8, 0x00000000 },
1432 { 0x00001538, 0x00000000 },
1433 { 0x00001578, 0x00000000 },
1434 { 0x000015b8, 0x00000000 },
1435 { 0x000015f8, 0x00000000 },
1436 { 0x00001638, 0x00000000 },
1437 { 0x00001678, 0x00000000 },
1438 { 0x000016b8, 0x00000000 },
1439 { 0x000016f8, 0x00000000 },
1440 { 0x00001738, 0x00000000 },
1441 { 0x00001778, 0x00000000 },
1442 { 0x000017b8, 0x00000000 },
1443 { 0x000017f8, 0x00000000 },
1444 { 0x0000103c, 0x00000000 },
1445 { 0x0000107c, 0x00000000 },
1446 { 0x000010bc, 0x00000000 },
1447 { 0x000010fc, 0x00000000 },
1448 { 0x0000113c, 0x00000000 },
1449 { 0x0000117c, 0x00000000 },
1450 { 0x000011bc, 0x00000000 },
1451 { 0x000011fc, 0x00000000 },
1452 { 0x0000123c, 0x00000000 },
1453 { 0x0000127c, 0x00000000 },
1454 { 0x000012bc, 0x00000000 },
1455 { 0x000012fc, 0x00000000 },
1456 { 0x0000133c, 0x00000000 },
1457 { 0x0000137c, 0x00000000 },
1458 { 0x000013bc, 0x00000000 },
1459 { 0x000013fc, 0x00000000 },
1460 { 0x0000143c, 0x00000000 },
1461 { 0x0000147c, 0x00000000 },
1462 { 0x00004030, 0x00000002 },
1463 { 0x0000403c, 0x00000002 },
1464 { 0x00007010, 0x00000020 },
1465 { 0x00007038, 0x000004c2 },
1466 { 0x00008004, 0x00000000 },
1467 { 0x00008008, 0x00000000 },
1468 { 0x0000800c, 0x00000000 },
1469 { 0x00008018, 0x00000700 },
1470 { 0x00008020, 0x00000000 },
1471 { 0x00008038, 0x00000000 },
1472 { 0x0000803c, 0x00000000 },
1473 { 0x00008048, 0x40000000 },
1474 { 0x00008054, 0x00000000 },
1475 { 0x00008058, 0x00000000 },
1476 { 0x0000805c, 0x000fc78f },
1477 { 0x00008060, 0x0000000f },
1478 { 0x00008064, 0x00000000 },
1479 { 0x000080c0, 0x2a82301a },
1480 { 0x000080c4, 0x05dc01e0 },
1481 { 0x000080c8, 0x1f402710 },
1482 { 0x000080cc, 0x01f40000 },
1483 { 0x000080d0, 0x00001e00 },
1484 { 0x000080d4, 0x00000000 },
1485 { 0x000080d8, 0x00400000 },
1486 { 0x000080e0, 0xffffffff },
1487 { 0x000080e4, 0x0000ffff },
1488 { 0x000080e8, 0x003f3f3f },
1489 { 0x000080ec, 0x00000000 },
1490 { 0x000080f0, 0x00000000 },
1491 { 0x000080f4, 0x00000000 },
1492 { 0x000080f8, 0x00000000 },
1493 { 0x000080fc, 0x00020000 },
1494 { 0x00008100, 0x00020000 },
1495 { 0x00008104, 0x00000001 },
1496 { 0x00008108, 0x00000052 },
1497 { 0x0000810c, 0x00000000 },
1498 { 0x00008110, 0x00000168 },
1499 { 0x00008118, 0x000100aa },
1500 { 0x0000811c, 0x00003210 },
1501 { 0x00008120, 0x08f04800 },
1502 { 0x00008124, 0x00000000 },
1503 { 0x00008128, 0x00000000 },
1504 { 0x0000812c, 0x00000000 },
1505 { 0x00008130, 0x00000000 },
1506 { 0x00008134, 0x00000000 },
1507 { 0x00008138, 0x00000000 },
1508 { 0x0000813c, 0x00000000 },
1509 { 0x00008144, 0xffffffff },
1510 { 0x00008168, 0x00000000 },
1511 { 0x0000816c, 0x00000000 },
1512 { 0x00008170, 0x32143320 },
1513 { 0x00008174, 0xfaa4fa50 },
1514 { 0x00008178, 0x00000100 },
1515 { 0x0000817c, 0x00000000 },
1516 { 0x000081c4, 0x00000000 },
1517 { 0x000081d0, 0x00003210 },
1518 { 0x000081ec, 0x00000000 },
1519 { 0x000081f0, 0x00000000 },
1520 { 0x000081f4, 0x00000000 },
1521 { 0x000081f8, 0x00000000 },
1522 { 0x000081fc, 0x00000000 },
1523 { 0x00008200, 0x00000000 },
1524 { 0x00008204, 0x00000000 },
1525 { 0x00008208, 0x00000000 },
1526 { 0x0000820c, 0x00000000 },
1527 { 0x00008210, 0x00000000 },
1528 { 0x00008214, 0x00000000 },
1529 { 0x00008218, 0x00000000 },
1530 { 0x0000821c, 0x00000000 },
1531 { 0x00008220, 0x00000000 },
1532 { 0x00008224, 0x00000000 },
1533 { 0x00008228, 0x00000000 },
1534 { 0x0000822c, 0x00000000 },
1535 { 0x00008230, 0x00000000 },
1536 { 0x00008234, 0x00000000 },
1537 { 0x00008238, 0x00000000 },
1538 { 0x0000823c, 0x00000000 },
1539 { 0x00008240, 0x00100000 },
1540 { 0x00008244, 0x0010f400 },
1541 { 0x00008248, 0x00000100 },
1542 { 0x0000824c, 0x0001e800 },
1543 { 0x00008250, 0x00000000 },
1544 { 0x00008254, 0x00000000 },
1545 { 0x00008258, 0x00000000 },
1546 { 0x0000825c, 0x400000ff },
1547 { 0x00008260, 0x00080922 },
1548 { 0x00008270, 0x00000000 },
1549 { 0x00008274, 0x40000000 },
1550 { 0x00008278, 0x003e4180 },
1551 { 0x0000827c, 0x00000000 },
1552 { 0x00008284, 0x0000002c },
1553 { 0x00008288, 0x0000002c },
1554 { 0x0000828c, 0x00000000 },
1555 { 0x00008294, 0x00000000 },
1556 { 0x00008298, 0x00000000 },
1557 { 0x00008300, 0x00000000 },
1558 { 0x00008304, 0x00000000 },
1559 { 0x00008308, 0x00000000 },
1560 { 0x0000830c, 0x00000000 },
1561 { 0x00008310, 0x00000000 },
1562 { 0x00008314, 0x00000000 },
1563 { 0x00008318, 0x00000000 },
1564 { 0x00008328, 0x00000000 },
1565 { 0x0000832c, 0x00000007 },
1566 { 0x00008330, 0x00000302 },
1567 { 0x00008334, 0x00000e00 },
1568 { 0x00008338, 0x00ff0000 },
1569 { 0x0000833c, 0x00000000 },
1570 { 0x00008340, 0x000107ff },
1571 { 0x00009808, 0x00000000 },
1572 { 0x0000980c, 0xad848e19 },
1573 { 0x00009810, 0x7d14e000 },
1574 { 0x00009814, 0x9c0a9f6b },
1575 { 0x0000981c, 0x00000000 },
1576 { 0x0000982c, 0x0000a000 },
1577 { 0x00009830, 0x00000000 },
1578 { 0x0000983c, 0x00200400 },
1579 { 0x00009840, 0x206a01ae },
1580 { 0x0000984c, 0x1284233c },
1581 { 0x00009854, 0x00000859 },
1582 { 0x00009900, 0x00000000 },
1583 { 0x00009904, 0x00000000 },
1584 { 0x00009908, 0x00000000 },
1585 { 0x0000990c, 0x00000000 },
1586 { 0x0000991c, 0x10000fff },
1587 { 0x00009920, 0x05100000 },
1588 { 0x0000a920, 0x05100000 },
1589 { 0x0000b920, 0x05100000 },
1590 { 0x00009928, 0x00000001 },
1591 { 0x0000992c, 0x00000004 },
1592 { 0x00009934, 0x1e1f2022 },
1593 { 0x00009938, 0x0a0b0c0d },
1594 { 0x0000993c, 0x00000000 },
1595 { 0x00009948, 0x9280b212 },
1596 { 0x0000994c, 0x00020028 },
1597 { 0x00009954, 0x5f3ca3de },
1598 { 0x00009958, 0x2108ecff },
1599 { 0x00009940, 0x00750604 },
1600 { 0x0000c95c, 0x004b6a8e },
1601 { 0x00009970, 0x190fb515 },
1602 { 0x00009974, 0x00000000 },
1603 { 0x00009978, 0x00000001 },
1604 { 0x0000997c, 0x00000000 },
1605 { 0x00009980, 0x00000000 },
1606 { 0x00009984, 0x00000000 },
1607 { 0x00009988, 0x00000000 },
1608 { 0x0000998c, 0x00000000 },
1609 { 0x00009990, 0x00000000 },
1610 { 0x00009994, 0x00000000 },
1611 { 0x00009998, 0x00000000 },
1612 { 0x0000999c, 0x00000000 },
1613 { 0x000099a0, 0x00000000 },
1614 { 0x000099a4, 0x00000001 },
1615 { 0x000099a8, 0x201fff00 },
1616 { 0x000099ac, 0x006f0000 },
1617 { 0x000099b0, 0x03051000 },
1618 { 0x000099dc, 0x00000000 },
1619 { 0x000099e0, 0x00000200 },
1620 { 0x000099e4, 0xaaaaaaaa },
1621 { 0x000099e8, 0x3c466478 },
1622 { 0x000099ec, 0x0cc80caa },
1623 { 0x000099fc, 0x00001042 },
1624 { 0x00009b00, 0x00000000 },
1625 { 0x00009b04, 0x00000001 },
1626 { 0x00009b08, 0x00000002 },
1627 { 0x00009b0c, 0x00000003 },
1628 { 0x00009b10, 0x00000004 },
1629 { 0x00009b14, 0x00000005 },
1630 { 0x00009b18, 0x00000008 },
1631 { 0x00009b1c, 0x00000009 },
1632 { 0x00009b20, 0x0000000a },
1633 { 0x00009b24, 0x0000000b },
1634 { 0x00009b28, 0x0000000c },
1635 { 0x00009b2c, 0x0000000d },
1636 { 0x00009b30, 0x00000010 },
1637 { 0x00009b34, 0x00000011 },
1638 { 0x00009b38, 0x00000012 },
1639 { 0x00009b3c, 0x00000013 },
1640 { 0x00009b40, 0x00000014 },
1641 { 0x00009b44, 0x00000015 },
1642 { 0x00009b48, 0x00000018 },
1643 { 0x00009b4c, 0x00000019 },
1644 { 0x00009b50, 0x0000001a },
1645 { 0x00009b54, 0x0000001b },
1646 { 0x00009b58, 0x0000001c },
1647 { 0x00009b5c, 0x0000001d },
1648 { 0x00009b60, 0x00000020 },
1649 { 0x00009b64, 0x00000021 },
1650 { 0x00009b68, 0x00000022 },
1651 { 0x00009b6c, 0x00000023 },
1652 { 0x00009b70, 0x00000024 },
1653 { 0x00009b74, 0x00000025 },
1654 { 0x00009b78, 0x00000028 },
1655 { 0x00009b7c, 0x00000029 },
1656 { 0x00009b80, 0x0000002a },
1657 { 0x00009b84, 0x0000002b },
1658 { 0x00009b88, 0x0000002c },
1659 { 0x00009b8c, 0x0000002d },
1660 { 0x00009b90, 0x00000030 },
1661 { 0x00009b94, 0x00000031 },
1662 { 0x00009b98, 0x00000032 },
1663 { 0x00009b9c, 0x00000033 },
1664 { 0x00009ba0, 0x00000034 },
1665 { 0x00009ba4, 0x00000035 },
1666 { 0x00009ba8, 0x00000035 },
1667 { 0x00009bac, 0x00000035 },
1668 { 0x00009bb0, 0x00000035 },
1669 { 0x00009bb4, 0x00000035 },
1670 { 0x00009bb8, 0x00000035 },
1671 { 0x00009bbc, 0x00000035 },
1672 { 0x00009bc0, 0x00000035 },
1673 { 0x00009bc4, 0x00000035 },
1674 { 0x00009bc8, 0x00000035 },
1675 { 0x00009bcc, 0x00000035 },
1676 { 0x00009bd0, 0x00000035 },
1677 { 0x00009bd4, 0x00000035 },
1678 { 0x00009bd8, 0x00000035 },
1679 { 0x00009bdc, 0x00000035 },
1680 { 0x00009be0, 0x00000035 },
1681 { 0x00009be4, 0x00000035 },
1682 { 0x00009be8, 0x00000035 },
1683 { 0x00009bec, 0x00000035 },
1684 { 0x00009bf0, 0x00000035 },
1685 { 0x00009bf4, 0x00000035 },
1686 { 0x00009bf8, 0x00000010 },
1687 { 0x00009bfc, 0x0000001a },
1688 { 0x0000a210, 0x40806333 },
1689 { 0x0000a214, 0x00106c10 },
1690 { 0x0000a218, 0x009c4060 },
1691 { 0x0000a220, 0x018830c6 },
1692 { 0x0000a224, 0x00000400 },
1693 { 0x0000a228, 0x001a0bb5 },
1694 { 0x0000a22c, 0x00000000 },
1695 { 0x0000a234, 0x20202020 },
1696 { 0x0000a238, 0x20202020 },
1697 { 0x0000a23c, 0x13c889af },
1698 { 0x0000a240, 0x38490a20 },
1699 { 0x0000a244, 0x00007bb6 },
1700 { 0x0000a248, 0x0fff3ffc },
1701 { 0x0000a24c, 0x00000001 },
1702 { 0x0000a250, 0x0000e000 },
1703 { 0x0000a254, 0x00000000 },
1704 { 0x0000a258, 0x0cc75380 },
1705 { 0x0000a25c, 0x0f0f0f01 },
1706 { 0x0000a260, 0xdfa91f01 },
1707 { 0x0000a268, 0x00000001 },
1708 { 0x0000a26c, 0x0ebae9c6 },
1709 { 0x0000b26c, 0x0ebae9c6 },
1710 { 0x0000c26c, 0x0ebae9c6 },
1711 { 0x0000d270, 0x00820820 },
1712 { 0x0000a278, 0x1ce739ce },
1713 { 0x0000a27c, 0x050701ce },
1714 { 0x0000a338, 0x00000000 },
1715 { 0x0000a33c, 0x00000000 },
1716 { 0x0000a340, 0x00000000 },
1717 { 0x0000a344, 0x00000000 },
1718 { 0x0000a348, 0x3fffffff },
1719 { 0x0000a34c, 0x3fffffff },
1720 { 0x0000a350, 0x3fffffff },
1721 { 0x0000a354, 0x0003ffff },
1722 { 0x0000a358, 0x79bfaa03 },
1723 { 0x0000d35c, 0x07ffffef },
1724 { 0x0000d360, 0x0fffffe7 },
1725 { 0x0000d364, 0x17ffffe5 },
1726 { 0x0000d368, 0x1fffffe4 },
1727 { 0x0000d36c, 0x37ffffe3 },
1728 { 0x0000d370, 0x3fffffe3 },
1729 { 0x0000d374, 0x57ffffe3 },
1730 { 0x0000d378, 0x5fffffe2 },
1731 { 0x0000d37c, 0x7fffffe2 },
1732 { 0x0000d380, 0x7f3c7bba },
1733 { 0x0000d384, 0xf3307ff0 },
1734 { 0x0000a388, 0x0c000000 },
1735 { 0x0000a38c, 0x20202020 },
1736 { 0x0000a390, 0x20202020 },
1737 { 0x0000a394, 0x1ce739ce },
1738 { 0x0000a398, 0x000001ce },
1739 { 0x0000a39c, 0x00000001 },
1740 { 0x0000a3a0, 0x00000000 },
1741 { 0x0000a3a4, 0x00000000 },
1742 { 0x0000a3a8, 0x00000000 },
1743 { 0x0000a3ac, 0x00000000 },
1744 { 0x0000a3b0, 0x00000000 },
1745 { 0x0000a3b4, 0x00000000 },
1746 { 0x0000a3b8, 0x00000000 },
1747 { 0x0000a3bc, 0x00000000 },
1748 { 0x0000a3c0, 0x00000000 },
1749 { 0x0000a3c4, 0x00000000 },
1750 { 0x0000a3c8, 0x00000246 },
1751 { 0x0000a3cc, 0x20202020 },
1752 { 0x0000a3d0, 0x20202020 },
1753 { 0x0000a3d4, 0x20202020 },
1754 { 0x0000a3dc, 0x1ce739ce },
1755 { 0x0000a3e0, 0x000001ce },
1756};
1757
1758static const u32 ar5416Bank0_9160[][2] = {
1759 { 0x000098b0, 0x1e5795e5 },
1760 { 0x000098e0, 0x02008020 },
1761};
1762
1763static const u32 ar5416BB_RfGain_9160[][3] = {
1764 { 0x00009a00, 0x00000000, 0x00000000 },
1765 { 0x00009a04, 0x00000040, 0x00000040 },
1766 { 0x00009a08, 0x00000080, 0x00000080 },
1767 { 0x00009a0c, 0x000001a1, 0x00000141 },
1768 { 0x00009a10, 0x000001e1, 0x00000181 },
1769 { 0x00009a14, 0x00000021, 0x000001c1 },
1770 { 0x00009a18, 0x00000061, 0x00000001 },
1771 { 0x00009a1c, 0x00000168, 0x00000041 },
1772 { 0x00009a20, 0x000001a8, 0x000001a8 },
1773 { 0x00009a24, 0x000001e8, 0x000001e8 },
1774 { 0x00009a28, 0x00000028, 0x00000028 },
1775 { 0x00009a2c, 0x00000068, 0x00000068 },
1776 { 0x00009a30, 0x00000189, 0x000000a8 },
1777 { 0x00009a34, 0x000001c9, 0x00000169 },
1778 { 0x00009a38, 0x00000009, 0x000001a9 },
1779 { 0x00009a3c, 0x00000049, 0x000001e9 },
1780 { 0x00009a40, 0x00000089, 0x00000029 },
1781 { 0x00009a44, 0x00000170, 0x00000069 },
1782 { 0x00009a48, 0x000001b0, 0x00000190 },
1783 { 0x00009a4c, 0x000001f0, 0x000001d0 },
1784 { 0x00009a50, 0x00000030, 0x00000010 },
1785 { 0x00009a54, 0x00000070, 0x00000050 },
1786 { 0x00009a58, 0x00000191, 0x00000090 },
1787 { 0x00009a5c, 0x000001d1, 0x00000151 },
1788 { 0x00009a60, 0x00000011, 0x00000191 },
1789 { 0x00009a64, 0x00000051, 0x000001d1 },
1790 { 0x00009a68, 0x00000091, 0x00000011 },
1791 { 0x00009a6c, 0x000001b8, 0x00000051 },
1792 { 0x00009a70, 0x000001f8, 0x00000198 },
1793 { 0x00009a74, 0x00000038, 0x000001d8 },
1794 { 0x00009a78, 0x00000078, 0x00000018 },
1795 { 0x00009a7c, 0x00000199, 0x00000058 },
1796 { 0x00009a80, 0x000001d9, 0x00000098 },
1797 { 0x00009a84, 0x00000019, 0x00000159 },
1798 { 0x00009a88, 0x00000059, 0x00000199 },
1799 { 0x00009a8c, 0x00000099, 0x000001d9 },
1800 { 0x00009a90, 0x000000d9, 0x00000019 },
1801 { 0x00009a94, 0x000000f9, 0x00000059 },
1802 { 0x00009a98, 0x000000f9, 0x00000099 },
1803 { 0x00009a9c, 0x000000f9, 0x000000d9 },
1804 { 0x00009aa0, 0x000000f9, 0x000000f9 },
1805 { 0x00009aa4, 0x000000f9, 0x000000f9 },
1806 { 0x00009aa8, 0x000000f9, 0x000000f9 },
1807 { 0x00009aac, 0x000000f9, 0x000000f9 },
1808 { 0x00009ab0, 0x000000f9, 0x000000f9 },
1809 { 0x00009ab4, 0x000000f9, 0x000000f9 },
1810 { 0x00009ab8, 0x000000f9, 0x000000f9 },
1811 { 0x00009abc, 0x000000f9, 0x000000f9 },
1812 { 0x00009ac0, 0x000000f9, 0x000000f9 },
1813 { 0x00009ac4, 0x000000f9, 0x000000f9 },
1814 { 0x00009ac8, 0x000000f9, 0x000000f9 },
1815 { 0x00009acc, 0x000000f9, 0x000000f9 },
1816 { 0x00009ad0, 0x000000f9, 0x000000f9 },
1817 { 0x00009ad4, 0x000000f9, 0x000000f9 },
1818 { 0x00009ad8, 0x000000f9, 0x000000f9 },
1819 { 0x00009adc, 0x000000f9, 0x000000f9 },
1820 { 0x00009ae0, 0x000000f9, 0x000000f9 },
1821 { 0x00009ae4, 0x000000f9, 0x000000f9 },
1822 { 0x00009ae8, 0x000000f9, 0x000000f9 },
1823 { 0x00009aec, 0x000000f9, 0x000000f9 },
1824 { 0x00009af0, 0x000000f9, 0x000000f9 },
1825 { 0x00009af4, 0x000000f9, 0x000000f9 },
1826 { 0x00009af8, 0x000000f9, 0x000000f9 },
1827 { 0x00009afc, 0x000000f9, 0x000000f9 },
1828};
1829
1830static const u32 ar5416Bank1_9160[][2] = {
1831 { 0x000098b0, 0x02108421 },
1832 { 0x000098ec, 0x00000008 },
1833};
1834
1835static const u32 ar5416Bank2_9160[][2] = {
1836 { 0x000098b0, 0x0e73ff17 },
1837 { 0x000098e0, 0x00000420 },
1838};
1839
1840static const u32 ar5416Bank3_9160[][3] = {
1841 { 0x000098f0, 0x01400018, 0x01c00018 },
1842};
1843
1844static const u32 ar5416Bank6_9160[][3] = {
1845 { 0x0000989c, 0x00000000, 0x00000000 },
1846 { 0x0000989c, 0x00000000, 0x00000000 },
1847 { 0x0000989c, 0x00000000, 0x00000000 },
1848 { 0x0000989c, 0x00e00000, 0x00e00000 },
1849 { 0x0000989c, 0x005e0000, 0x005e0000 },
1850 { 0x0000989c, 0x00120000, 0x00120000 },
1851 { 0x0000989c, 0x00620000, 0x00620000 },
1852 { 0x0000989c, 0x00020000, 0x00020000 },
1853 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1854 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1855 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1856 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1857 { 0x0000989c, 0x005f0000, 0x005f0000 },
1858 { 0x0000989c, 0x00870000, 0x00870000 },
1859 { 0x0000989c, 0x00f90000, 0x00f90000 },
1860 { 0x0000989c, 0x007b0000, 0x007b0000 },
1861 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1862 { 0x0000989c, 0x00f50000, 0x00f50000 },
1863 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1864 { 0x0000989c, 0x00110000, 0x00110000 },
1865 { 0x0000989c, 0x006100a8, 0x006100a8 },
1866 { 0x0000989c, 0x004210a2, 0x004210a2 },
1867 { 0x0000989c, 0x0014008f, 0x0014008f },
1868 { 0x0000989c, 0x00c40003, 0x00c40003 },
1869 { 0x0000989c, 0x003000f2, 0x003000f2 },
1870 { 0x0000989c, 0x00440016, 0x00440016 },
1871 { 0x0000989c, 0x00410040, 0x00410040 },
1872 { 0x0000989c, 0x0001805e, 0x0001805e },
1873 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1874 { 0x0000989c, 0x000000f1, 0x000000f1 },
1875 { 0x0000989c, 0x00002081, 0x00002081 },
1876 { 0x0000989c, 0x000000d4, 0x000000d4 },
1877 { 0x000098d0, 0x0000000f, 0x0010000f },
1878};
1879
1880static const u32 ar5416Bank6TPC_9160[][3] = {
1881 { 0x0000989c, 0x00000000, 0x00000000 },
1882 { 0x0000989c, 0x00000000, 0x00000000 },
1883 { 0x0000989c, 0x00000000, 0x00000000 },
1884 { 0x0000989c, 0x00e00000, 0x00e00000 },
1885 { 0x0000989c, 0x005e0000, 0x005e0000 },
1886 { 0x0000989c, 0x00120000, 0x00120000 },
1887 { 0x0000989c, 0x00620000, 0x00620000 },
1888 { 0x0000989c, 0x00020000, 0x00020000 },
1889 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1890 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1891 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1892 { 0x0000989c, 0x40ff0000, 0x40ff0000 },
1893 { 0x0000989c, 0x005f0000, 0x005f0000 },
1894 { 0x0000989c, 0x00870000, 0x00870000 },
1895 { 0x0000989c, 0x00f90000, 0x00f90000 },
1896 { 0x0000989c, 0x007b0000, 0x007b0000 },
1897 { 0x0000989c, 0x00ff0000, 0x00ff0000 },
1898 { 0x0000989c, 0x00f50000, 0x00f50000 },
1899 { 0x0000989c, 0x00dc0000, 0x00dc0000 },
1900 { 0x0000989c, 0x00110000, 0x00110000 },
1901 { 0x0000989c, 0x006100a8, 0x006100a8 },
1902 { 0x0000989c, 0x00423022, 0x00423022 },
1903 { 0x0000989c, 0x2014008f, 0x2014008f },
1904 { 0x0000989c, 0x00c40002, 0x00c40002 },
1905 { 0x0000989c, 0x003000f2, 0x003000f2 },
1906 { 0x0000989c, 0x00440016, 0x00440016 },
1907 { 0x0000989c, 0x00410040, 0x00410040 },
1908 { 0x0000989c, 0x0001805e, 0x0001805e },
1909 { 0x0000989c, 0x0000c0ab, 0x0000c0ab },
1910 { 0x0000989c, 0x000000e1, 0x000000e1 },
1911 { 0x0000989c, 0x00007080, 0x00007080 },
1912 { 0x0000989c, 0x000000d4, 0x000000d4 },
1913 { 0x000098d0, 0x0000000f, 0x0010000f },
1914};
1915
1916static const u32 ar5416Bank7_9160[][2] = {
1917 { 0x0000989c, 0x00000500 },
1918 { 0x0000989c, 0x00000800 },
1919 { 0x000098cc, 0x0000000e },
1920};
1921 19
1922static u32 ar5416Addac_9160[][2] = {
1923 {0x0000989c, 0x00000000 },
1924 {0x0000989c, 0x00000000 },
1925 {0x0000989c, 0x00000000 },
1926 {0x0000989c, 0x00000000 },
1927 {0x0000989c, 0x00000000 },
1928 {0x0000989c, 0x00000000 },
1929 {0x0000989c, 0x000000c0 },
1930 {0x0000989c, 0x00000018 },
1931 {0x0000989c, 0x00000004 },
1932 {0x0000989c, 0x00000000 },
1933 {0x0000989c, 0x00000000 },
1934 {0x0000989c, 0x00000000 },
1935 {0x0000989c, 0x00000000 },
1936 {0x0000989c, 0x00000000 },
1937 {0x0000989c, 0x00000000 },
1938 {0x0000989c, 0x00000000 },
1939 {0x0000989c, 0x00000000 },
1940 {0x0000989c, 0x00000000 },
1941 {0x0000989c, 0x00000000 },
1942 {0x0000989c, 0x00000000 },
1943 {0x0000989c, 0x00000000 },
1944 {0x0000989c, 0x000000c0 },
1945 {0x0000989c, 0x00000019 },
1946 {0x0000989c, 0x00000004 },
1947 {0x0000989c, 0x00000000 },
1948 {0x0000989c, 0x00000000 },
1949 {0x0000989c, 0x00000000 },
1950 {0x0000989c, 0x00000004 },
1951 {0x0000989c, 0x00000003 },
1952 {0x0000989c, 0x00000008 },
1953 {0x0000989c, 0x00000000 },
1954 {0x000098cc, 0x00000000 },
1955};
1956
1957static u32 ar5416Addac_91601_1[][2] = {
1958 {0x0000989c, 0x00000000 },
1959 {0x0000989c, 0x00000000 },
1960 {0x0000989c, 0x00000000 },
1961 {0x0000989c, 0x00000000 },
1962 {0x0000989c, 0x00000000 },
1963 {0x0000989c, 0x00000000 },
1964 {0x0000989c, 0x000000c0 },
1965 {0x0000989c, 0x00000018 },
1966 {0x0000989c, 0x00000004 },
1967 {0x0000989c, 0x00000000 },
1968 {0x0000989c, 0x00000000 },
1969 {0x0000989c, 0x00000000 },
1970 {0x0000989c, 0x00000000 },
1971 {0x0000989c, 0x00000000 },
1972 {0x0000989c, 0x00000000 },
1973 {0x0000989c, 0x00000000 },
1974 {0x0000989c, 0x00000000 },
1975 {0x0000989c, 0x00000000 },
1976 {0x0000989c, 0x00000000 },
1977 {0x0000989c, 0x00000000 },
1978 {0x0000989c, 0x00000000 },
1979 {0x0000989c, 0x000000c0 },
1980 {0x0000989c, 0x00000019 },
1981 {0x0000989c, 0x00000004 },
1982 {0x0000989c, 0x00000000 },
1983 {0x0000989c, 0x00000000 },
1984 {0x0000989c, 0x00000000 },
1985 {0x0000989c, 0x00000000 },
1986 {0x0000989c, 0x00000000 },
1987 {0x0000989c, 0x00000000 },
1988 {0x0000989c, 0x00000000 },
1989 {0x000098cc, 0x00000000 },
1990};
1991
1992/* XXX 9280 1 */
1993static const u32 ar9280Modes_9280[][6] = { 20static const u32 ar9280Modes_9280[][6] = {
1994 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 21 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
1995 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 22 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -2766,7 +793,7 @@ static const u32 ar9280Common_9280_2[][2] = {
2766 { 0x00008258, 0x00000000 }, 793 { 0x00008258, 0x00000000 },
2767 { 0x0000825c, 0x400000ff }, 794 { 0x0000825c, 0x400000ff },
2768 { 0x00008260, 0x00080922 }, 795 { 0x00008260, 0x00080922 },
2769 { 0x00008264, 0xa8a00010 }, 796 { 0x00008264, 0x88a00010 },
2770 { 0x00008270, 0x00000000 }, 797 { 0x00008270, 0x00000000 },
2771 { 0x00008274, 0x40000000 }, 798 { 0x00008274, 0x40000000 },
2772 { 0x00008278, 0x003e4180 }, 799 { 0x00008278, 0x003e4180 },
@@ -3441,7 +1468,7 @@ static const u32 ar9280PciePhy_clkreq_always_on_L1_9280[][2] = {
3441}; 1468};
3442 1469
3443/* AR9285 Revsion 10*/ 1470/* AR9285 Revsion 10*/
3444static const u_int32_t ar9285Modes_9285[][6] = { 1471static const u32 ar9285Modes_9285[][6] = {
3445 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 1472 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
3446 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 1473 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
3447 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 1474 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -3763,7 +1790,7 @@ static const u_int32_t ar9285Modes_9285[][6] = {
3763 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 1790 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
3764}; 1791};
3765 1792
3766static const u_int32_t ar9285Common_9285[][2] = { 1793static const u32 ar9285Common_9285[][2] = {
3767 { 0x0000000c, 0x00000000 }, 1794 { 0x0000000c, 0x00000000 },
3768 { 0x00000030, 0x00020045 }, 1795 { 0x00000030, 0x00020045 },
3769 { 0x00000034, 0x00000005 }, 1796 { 0x00000034, 0x00000005 },
@@ -3936,7 +1963,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
3936 { 0x00008258, 0x00000000 }, 1963 { 0x00008258, 0x00000000 },
3937 { 0x0000825c, 0x400000ff }, 1964 { 0x0000825c, 0x400000ff },
3938 { 0x00008260, 0x00080922 }, 1965 { 0x00008260, 0x00080922 },
3939 { 0x00008264, 0xa8a00010 }, 1966 { 0x00008264, 0x88a00010 },
3940 { 0x00008270, 0x00000000 }, 1967 { 0x00008270, 0x00000000 },
3941 { 0x00008274, 0x40000000 }, 1968 { 0x00008274, 0x40000000 },
3942 { 0x00008278, 0x003e4180 }, 1969 { 0x00008278, 0x003e4180 },
@@ -4096,7 +2123,7 @@ static const u_int32_t ar9285Common_9285[][2] = {
4096 { 0x00007870, 0x10142c00 }, 2123 { 0x00007870, 0x10142c00 },
4097}; 2124};
4098 2125
4099static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = { 2126static const u32 ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
4100 {0x00004040, 0x9248fd00 }, 2127 {0x00004040, 0x9248fd00 },
4101 {0x00004040, 0x24924924 }, 2128 {0x00004040, 0x24924924 },
4102 {0x00004040, 0xa8000019 }, 2129 {0x00004040, 0xa8000019 },
@@ -4109,7 +2136,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285[][2] = {
4109 {0x00004044, 0x00000000 }, 2136 {0x00004044, 0x00000000 },
4110}; 2137};
4111 2138
4112static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = { 2139static const u32 ar9285PciePhy_clkreq_off_L1_9285[][2] = {
4113 {0x00004040, 0x9248fd00 }, 2140 {0x00004040, 0x9248fd00 },
4114 {0x00004040, 0x24924924 }, 2141 {0x00004040, 0x24924924 },
4115 {0x00004040, 0xa8000019 }, 2142 {0x00004040, 0xa8000019 },
@@ -4123,7 +2150,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285[][2] = {
4123}; 2150};
4124 2151
4125/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */ 2152/* AR9285 v1_2 PCI Register Writes. Created: 04/13/09 */
4126static const u_int32_t ar9285Modes_9285_1_2[][6] = { 2153static const u32 ar9285Modes_9285_1_2[][6] = {
4127 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2154 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4128 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 2155 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
4129 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 2156 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4184,7 +2211,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4184 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 2211 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4185 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 2212 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4186 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 2213 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4187 { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 2214 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
4188 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 2215 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4189 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 2216 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4190 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 2217 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +2225,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4198 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 2225 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4199 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 2226 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4200 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 2227 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4201 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 2228 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
4202 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2229 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4203 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2230 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4204 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 2231 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4205 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 2232 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +2339,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4312 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 2339 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4313 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 2340 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4314 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 2341 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4315 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 2342 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
4316 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 2343 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4317 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 2344 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4318 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 2345 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +2353,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4326 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 2353 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4327 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 2354 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4328 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 2355 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4329 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 2356 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
4330 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2357 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4331 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 2358 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4332 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 2359 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4333 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 2360 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4429,7 +2456,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4429 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2456 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
4430}; 2457};
4431 2458
4432static const u_int32_t ar9285Common_9285_1_2[][2] = { 2459static const u32 ar9285Common_9285_1_2[][2] = {
4433 { 0x0000000c, 0x00000000 }, 2460 { 0x0000000c, 0x00000000 },
4434 { 0x00000030, 0x00020045 }, 2461 { 0x00000030, 0x00020045 },
4435 { 0x00000034, 0x00000005 }, 2462 { 0x00000034, 0x00000005 },
@@ -4731,17 +2758,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4731 { 0x00007808, 0x54214514 }, 2758 { 0x00007808, 0x54214514 },
4732 { 0x0000780c, 0x02025830 }, 2759 { 0x0000780c, 0x02025830 },
4733 { 0x00007810, 0x71c0d388 }, 2760 { 0x00007810, 0x71c0d388 },
4734 { 0x00007814, 0x924934a8 },
4735 { 0x0000781c, 0x00000000 }, 2761 { 0x0000781c, 0x00000000 },
4736 { 0x00007824, 0x00d86fff }, 2762 { 0x00007824, 0x00d86fff },
4737 { 0x00007828, 0x26d2491b },
4738 { 0x0000782c, 0x6e36d97b }, 2763 { 0x0000782c, 0x6e36d97b },
4739 { 0x00007830, 0xedb6d96e },
4740 { 0x00007834, 0x71400087 }, 2764 { 0x00007834, 0x71400087 },
4741 { 0x0000783c, 0x0001fffe },
4742 { 0x00007840, 0xffeb1a20 },
4743 { 0x00007844, 0x000c0db6 }, 2765 { 0x00007844, 0x000c0db6 },
4744 { 0x00007848, 0x6db61b6f }, 2766 { 0x00007848, 0x6db6246f },
4745 { 0x0000784c, 0x6d9b66db }, 2767 { 0x0000784c, 0x6d9b66db },
4746 { 0x00007850, 0x6d8c6dba }, 2768 { 0x00007850, 0x6d8c6dba },
4747 { 0x00007854, 0x00040000 }, 2769 { 0x00007854, 0x00040000 },
@@ -4753,7 +2775,7 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4753 { 0x00007870, 0x10142c00 }, 2775 { 0x00007870, 0x10142c00 },
4754}; 2776};
4755 2777
4756static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = { 2778static const u32 ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4757 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2779 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4758 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2780 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4759 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 }, 2781 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
@@ -4777,7 +2799,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4777 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2799 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4778 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2800 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4779 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2801 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2802 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
2803 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
2804 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
4780 { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 }, 2805 { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
2806 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
2807 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
4781 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, 2808 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
4782 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, 2809 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
4783 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, 2810 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4789,7 +2816,7 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4789 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 }, 2816 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
4790}; 2817};
4791 2818
4792static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = { 2819static const u32 ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4793 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2820 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4794 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2821 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4795 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 }, 2822 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
@@ -4813,7 +2840,52 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4813 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2840 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4814 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2841 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4815 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 2842 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2843 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
2844 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
2845 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
4816 { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 }, 2846 { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
2847 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
2848 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
2849 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
2850 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
2851 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
2852 { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
2853 { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
2854 { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
2855 { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
2856 { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
2857 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
2858};
2859
2860static const u32 ar9285Modes_XE2_0_normal_power[][6] = {
2861 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2862 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
2863 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
2864 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
2865 { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
2866 { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
2867 { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
2868 { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
2869 { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
2870 { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
2871 { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
2872 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
2873 { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
2874 { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
2875 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
2876 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
2877 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2878 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2879 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2880 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2881 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2882 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2883 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
2884 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
2885 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
2886 { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
2887 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
2888 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
4817 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, 2889 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
4818 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, 2890 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
4819 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, 2891 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,7 +2897,47 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4825 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 2897 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4826}; 2898};
4827 2899
4828static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { 2900static const u32 ar9285Modes_XE2_0_high_power[][6] = {
2901 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2902 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
2903 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
2904 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
2905 { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
2906 { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
2907 { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
2908 { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
2909 { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
2910 { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
2911 { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
2912 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
2913 { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
2914 { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
2915 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
2916 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
2917 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2918 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2919 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2920 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2921 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2922 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
2923 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
2924 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
2925 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
2926 { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
2927 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
2928 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
2929 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
2930 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
2931 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
2932 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
2933 { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
2934 { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
2935 { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
2936 { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
2937 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
2938};
2939
2940static const u32 ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4829 {0x00004040, 0x9248fd00 }, 2941 {0x00004040, 0x9248fd00 },
4830 {0x00004040, 0x24924924 }, 2942 {0x00004040, 0x24924924 },
4831 {0x00004040, 0xa8000019 }, 2943 {0x00004040, 0xa8000019 },
@@ -4838,7 +2950,7 @@ static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4838 {0x00004044, 0x00000000 }, 2950 {0x00004044, 0x00000000 },
4839}; 2951};
4840 2952
4841static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = { 2953static const u32 ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
4842 {0x00004040, 0x9248fd00 }, 2954 {0x00004040, 0x9248fd00 },
4843 {0x00004040, 0x24924924 }, 2955 {0x00004040, 0x24924924 },
4844 {0x00004040, 0xa8000019 }, 2956 {0x00004040, 0xa8000019 },
@@ -4852,7 +2964,7 @@ static const u_int32_t ar9285PciePhy_clkreq_off_L1_9285_1_2[][2] = {
4852}; 2964};
4853 2965
4854/* AR9287 Revision 10 */ 2966/* AR9287 Revision 10 */
4855static const u_int32_t ar9287Modes_9287_1_0[][6] = { 2967static const u32 ar9287Modes_9287_1_0[][6] = {
4856 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 2968 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
4857 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, 2969 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
4858 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, 2970 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -4899,7 +3011,7 @@ static const u_int32_t ar9287Modes_9287_1_0[][6] = {
4899 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3011 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4900}; 3012};
4901 3013
4902static const u_int32_t ar9287Common_9287_1_0[][2] = { 3014static const u32 ar9287Common_9287_1_0[][2] = {
4903 { 0x0000000c, 0x00000000 }, 3015 { 0x0000000c, 0x00000000 },
4904 { 0x00000030, 0x00020015 }, 3016 { 0x00000030, 0x00020015 },
4905 { 0x00000034, 0x00000005 }, 3017 { 0x00000034, 0x00000005 },
@@ -5073,7 +3185,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
5073 { 0x00008258, 0x00000000 }, 3185 { 0x00008258, 0x00000000 },
5074 { 0x0000825c, 0x400000ff }, 3186 { 0x0000825c, 0x400000ff },
5075 { 0x00008260, 0x00080922 }, 3187 { 0x00008260, 0x00080922 },
5076 { 0x00008264, 0xa8a00010 }, 3188 { 0x00008264, 0x88a00010 },
5077 { 0x00008270, 0x00000000 }, 3189 { 0x00008270, 0x00000000 },
5078 { 0x00008274, 0x40000000 }, 3190 { 0x00008274, 0x40000000 },
5079 { 0x00008278, 0x003e4180 }, 3191 { 0x00008278, 0x003e4180 },
@@ -5270,7 +3382,7 @@ static const u_int32_t ar9287Common_9287_1_0[][2] = {
5270 { 0x000078b8, 0x2a850160 }, 3382 { 0x000078b8, 0x2a850160 },
5271}; 3383};
5272 3384
5273static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = { 3385static const u32 ar9287Modes_tx_gain_9287_1_0[][6] = {
5274 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3386 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5275 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3387 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
5276 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, 3388 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -5320,7 +3432,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_0[][6] = {
5320}; 3432};
5321 3433
5322 3434
5323static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = { 3435static const u32 ar9287Modes_rx_gain_9287_1_0[][6] = {
5324 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3436 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5325 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 3437 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
5326 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 3438 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -5582,7 +3694,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_0[][6] = {
5582 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, 3694 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
5583}; 3695};
5584 3696
5585static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = { 3697static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
5586 {0x00004040, 0x9248fd00 }, 3698 {0x00004040, 0x9248fd00 },
5587 {0x00004040, 0x24924924 }, 3699 {0x00004040, 0x24924924 },
5588 {0x00004040, 0xa8000019 }, 3700 {0x00004040, 0xa8000019 },
@@ -5595,7 +3707,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_0[][2] = {
5595 {0x00004044, 0x00000000 }, 3707 {0x00004044, 0x00000000 },
5596}; 3708};
5597 3709
5598static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = { 3710static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
5599 {0x00004040, 0x9248fd00 }, 3711 {0x00004040, 0x9248fd00 },
5600 {0x00004040, 0x24924924 }, 3712 {0x00004040, 0x24924924 },
5601 {0x00004040, 0xa8000019 }, 3713 {0x00004040, 0xa8000019 },
@@ -5610,7 +3722,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_0[][2] = {
5610 3722
5611/* AR9287 Revision 11 */ 3723/* AR9287 Revision 11 */
5612 3724
5613static const u_int32_t ar9287Modes_9287_1_1[][6] = { 3725static const u32 ar9287Modes_9287_1_1[][6] = {
5614 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 3726 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
5615 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 }, 3727 { 0x00001030, 0x00000000, 0x00000000, 0x000002c0, 0x00000160, 0x000001e0 },
5616 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 }, 3728 { 0x00001070, 0x00000000, 0x00000000, 0x00000318, 0x0000018c, 0x000001e0 },
@@ -5657,7 +3769,7 @@ static const u_int32_t ar9287Modes_9287_1_1[][6] = {
5657 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3769 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
5658}; 3770};
5659 3771
5660static const u_int32_t ar9287Common_9287_1_1[][2] = { 3772static const u32 ar9287Common_9287_1_1[][2] = {
5661 { 0x0000000c, 0x00000000 }, 3773 { 0x0000000c, 0x00000000 },
5662 { 0x00000030, 0x00020015 }, 3774 { 0x00000030, 0x00020015 },
5663 { 0x00000034, 0x00000005 }, 3775 { 0x00000034, 0x00000005 },
@@ -6027,21 +4139,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6027 4139
6028/* 4140/*
6029 * For Japanese regulatory requirements, 2484 MHz requires the following three 4141 * For Japanese regulatory requirements, 2484 MHz requires the following three
6030 * registers be programmed differently from the channel between 2412 and 2472 MHz. 4142 * registers be programmed differently from the channel between 2412 and
4143 * 2472 MHz.
6031 */ 4144 */
6032static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = { 4145static const u32 ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
6033 { 0x0000a1f4, 0x00fffeff }, 4146 { 0x0000a1f4, 0x00fffeff },
6034 { 0x0000a1f8, 0x00f5f9ff }, 4147 { 0x0000a1f8, 0x00f5f9ff },
6035 { 0x0000a1fc, 0xb79f6427 }, 4148 { 0x0000a1fc, 0xb79f6427 },
6036}; 4149};
6037 4150
6038static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = { 4151static const u32 ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
6039 { 0x0000a1f4, 0x00000000 }, 4152 { 0x0000a1f4, 0x00000000 },
6040 { 0x0000a1f8, 0xefff0301 }, 4153 { 0x0000a1f8, 0xefff0301 },
6041 { 0x0000a1fc, 0xca9228ee }, 4154 { 0x0000a1fc, 0xca9228ee },
6042}; 4155};
6043 4156
6044static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = { 4157static const u32 ar9287Modes_tx_gain_9287_1_1[][6] = {
6045 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 4158 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6046 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 4159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
6047 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 }, 4160 { 0x0000a304, 0x00000000, 0x00000000, 0x00004002, 0x00004002, 0x00004002 },
@@ -6090,7 +4203,7 @@ static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
6090 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 }, 4203 { 0x0000a274, 0x0a180000, 0x0a180000, 0x0a1aa000, 0x0a1aa000, 0x0a1aa000 },
6091}; 4204};
6092 4205
6093static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = { 4206static const u32 ar9287Modes_rx_gain_9287_1_1[][6] = {
6094 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 4207 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6095 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 }, 4208 { 0x00009a00, 0x00000000, 0x00000000, 0x0000a120, 0x0000a120, 0x0000a120 },
6096 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 }, 4209 { 0x00009a04, 0x00000000, 0x00000000, 0x0000a124, 0x0000a124, 0x0000a124 },
@@ -6352,7 +4465,7 @@ static const u_int32_t ar9287Modes_rx_gain_9287_1_1[][6] = {
6352 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 }, 4465 { 0x0000a848, 0x00000000, 0x00000000, 0x00001067, 0x00001067, 0x00001067 },
6353}; 4466};
6354 4467
6355static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = { 4468static const u32 ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
6356 {0x00004040, 0x9248fd00 }, 4469 {0x00004040, 0x9248fd00 },
6357 {0x00004040, 0x24924924 }, 4470 {0x00004040, 0x24924924 },
6358 {0x00004040, 0xa8000019 }, 4471 {0x00004040, 0xa8000019 },
@@ -6365,7 +4478,7 @@ static const u_int32_t ar9287PciePhy_clkreq_always_on_L1_9287_1_1[][2] = {
6365 {0x00004044, 0x00000000 }, 4478 {0x00004044, 0x00000000 },
6366}; 4479};
6367 4480
6368static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = { 4481static const u32 ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6369 {0x00004040, 0x9248fd00 }, 4482 {0x00004040, 0x9248fd00 },
6370 {0x00004040, 0x24924924 }, 4483 {0x00004040, 0x24924924 },
6371 {0x00004040, 0xa8000019 }, 4484 {0x00004040, 0xa8000019 },
@@ -6380,7 +4493,7 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6380 4493
6381 4494
6382/* AR9271 initialization values automaticaly created: 06/04/09 */ 4495/* AR9271 initialization values automaticaly created: 06/04/09 */
6383static const u_int32_t ar9271Modes_9271[][6] = { 4496static const u32 ar9271Modes_9271[][6] = {
6384 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 4497 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
6385 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 4498 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
6386 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 4499 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6441,7 +4554,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6441 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 4554 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
6442 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 4555 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
6443 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 4556 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
6444 { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 4557 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
6445 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 4558 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
6446 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 4559 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
6447 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 4560 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +4568,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6455 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 4568 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
6456 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 4569 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
6457 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 4570 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
6458 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 4571 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
6459 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4572 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
6460 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4573 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
6461 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 4574 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
6462 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 4575 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +4682,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6569 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 4682 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
6570 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 4683 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
6571 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 4684 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
6572 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 4685 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
6573 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 4686 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
6574 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 4687 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
6575 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 4688 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +4696,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6583 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 4696 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
6584 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 4697 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
6585 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 4698 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
6586 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 4699 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
6587 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4700 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
6588 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4701 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
6589 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 4702 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
6590 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 4703 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,29 +4796,10 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6683 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 4796 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
6684 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 4797 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
6685 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, 4798 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
6686 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
6687 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
6688 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
6689 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
6690 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
6691 { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
6692 { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
6693 { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
6694 { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
6695 { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
6696 { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
6697 { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
6698 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
6699 { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
6700 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
6701 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
6702 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
6703 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
6704 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
6705 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 4799 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
6706}; 4800};
6707 4801
6708static const u_int32_t ar9271Common_9271[][2] = { 4802static const u32 ar9271Common_9271[][2] = {
6709 { 0x0000000c, 0x00000000 }, 4803 { 0x0000000c, 0x00000000 },
6710 { 0x00000030, 0x00020045 }, 4804 { 0x00000030, 0x00020045 },
6711 { 0x00000034, 0x00000005 }, 4805 { 0x00000034, 0x00000005 },
@@ -6910,13 +5004,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
6910 { 0x00007810, 0x71c0d388 }, 5004 { 0x00007810, 0x71c0d388 },
6911 { 0x00007814, 0x924934a8 }, 5005 { 0x00007814, 0x924934a8 },
6912 { 0x0000781c, 0x00000000 }, 5006 { 0x0000781c, 0x00000000 },
6913 { 0x00007820, 0x00000c04 },
6914 { 0x00007824, 0x00d8abff },
6915 { 0x00007828, 0x66964300 }, 5007 { 0x00007828, 0x66964300 },
6916 { 0x0000782c, 0x8db6d961 }, 5008 { 0x0000782c, 0x8db6d961 },
6917 { 0x00007830, 0x8db6d96c }, 5009 { 0x00007830, 0x8db6d96c },
6918 { 0x00007834, 0x6140008b }, 5010 { 0x00007834, 0x6140008b },
6919 { 0x00007838, 0x00000029 },
6920 { 0x0000783c, 0x72ee0a72 }, 5011 { 0x0000783c, 0x72ee0a72 },
6921 { 0x00007840, 0xbbfffffc }, 5012 { 0x00007840, 0xbbfffffc },
6922 { 0x00007844, 0x000c0db6 }, 5013 { 0x00007844, 0x000c0db6 },
@@ -6929,7 +5020,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
6929 { 0x00007860, 0x21084210 }, 5020 { 0x00007860, 0x21084210 },
6930 { 0x00007864, 0xf7d7ffde }, 5021 { 0x00007864, 0xf7d7ffde },
6931 { 0x00007868, 0xc2034080 }, 5022 { 0x00007868, 0xc2034080 },
6932 { 0x0000786c, 0x48609eb4 },
6933 { 0x00007870, 0x10142c00 }, 5023 { 0x00007870, 0x10142c00 },
6934 { 0x00009808, 0x00000000 }, 5024 { 0x00009808, 0x00000000 },
6935 { 0x0000980c, 0xafe68e30 }, 5025 { 0x0000980c, 0xafe68e30 },
@@ -6982,9 +5072,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
6982 { 0x000099e8, 0x3c466478 }, 5072 { 0x000099e8, 0x3c466478 },
6983 { 0x000099ec, 0x0cc80caa }, 5073 { 0x000099ec, 0x0cc80caa },
6984 { 0x000099f0, 0x00000000 }, 5074 { 0x000099f0, 0x00000000 },
6985 { 0x0000a1f4, 0x00000000 },
6986 { 0x0000a1f8, 0x71733d01 },
6987 { 0x0000a1fc, 0xd0ad5c12 },
6988 { 0x0000a208, 0x803e68c8 }, 5075 { 0x0000a208, 0x803e68c8 },
6989 { 0x0000a210, 0x4080a333 }, 5076 { 0x0000a210, 0x4080a333 },
6990 { 0x0000a214, 0x00206c10 }, 5077 { 0x0000a214, 0x00206c10 },
@@ -7004,13 +5091,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
7004 { 0x0000a260, 0xdfa90f01 }, 5091 { 0x0000a260, 0xdfa90f01 },
7005 { 0x0000a268, 0x00000000 }, 5092 { 0x0000a268, 0x00000000 },
7006 { 0x0000a26c, 0x0ebae9e6 }, 5093 { 0x0000a26c, 0x0ebae9e6 },
7007 { 0x0000a278, 0x3bdef7bd },
7008 { 0x0000a27c, 0x050e83bd },
7009 { 0x0000a388, 0x0c000000 }, 5094 { 0x0000a388, 0x0c000000 },
7010 { 0x0000a38c, 0x20202020 }, 5095 { 0x0000a38c, 0x20202020 },
7011 { 0x0000a390, 0x20202020 }, 5096 { 0x0000a390, 0x20202020 },
7012 { 0x0000a394, 0x3bdef7bd },
7013 { 0x0000a398, 0x000003bd },
7014 { 0x0000a39c, 0x00000001 }, 5097 { 0x0000a39c, 0x00000001 },
7015 { 0x0000a3a0, 0x00000000 }, 5098 { 0x0000a3a0, 0x00000000 },
7016 { 0x0000a3a4, 0x00000000 }, 5099 { 0x0000a3a4, 0x00000000 },
@@ -7025,8 +5108,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
7025 { 0x0000a3cc, 0x20202020 }, 5108 { 0x0000a3cc, 0x20202020 },
7026 { 0x0000a3d0, 0x20202020 }, 5109 { 0x0000a3d0, 0x20202020 },
7027 { 0x0000a3d4, 0x20202020 }, 5110 { 0x0000a3d4, 0x20202020 },
7028 { 0x0000a3dc, 0x3bdef7bd },
7029 { 0x0000a3e0, 0x000003bd },
7030 { 0x0000a3e4, 0x00000000 }, 5111 { 0x0000a3e4, 0x00000000 },
7031 { 0x0000a3e8, 0x18c43433 }, 5112 { 0x0000a3e8, 0x18c43433 },
7032 { 0x0000a3ec, 0x00f70081 }, 5113 { 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +5127,104 @@ static const u_int32_t ar9271Common_9271[][2] = {
7046 { 0x0000d384, 0xf3307ff0 }, 5127 { 0x0000d384, 0xf3307ff0 },
7047}; 5128};
7048 5129
7049static const u_int32_t ar9271Modes_9271_1_0_only[][6] = { 5130static const u32 ar9271Common_normal_cck_fir_coeff_9271[][2] = {
5131 { 0x0000a1f4, 0x00fffeff },
5132 { 0x0000a1f8, 0x00f5f9ff },
5133 { 0x0000a1fc, 0xb79f6427 },
5134};
5135
5136static const u32 ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
5137 { 0x0000a1f4, 0x00000000 },
5138 { 0x0000a1f8, 0xefff0301 },
5139 { 0x0000a1fc, 0xca9228ee },
5140};
5141
5142static const u32 ar9271Modes_9271_1_0_only[][6] = {
7050 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, 5143 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
7051 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 5144 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
7052}; 5145};
5146
5147static const u32 ar9271Modes_9271_ANI_reg[][6] = {
5148 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
5149 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
5150 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
5151 { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
5152 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
5153 { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
5154 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
5155 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
5156};
5157
5158static const u32 ar9271Modes_normal_power_tx_gain_9271[][6] = {
5159 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
5160 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
5161 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
5162 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
5163 { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
5164 { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
5165 { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
5166 { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
5167 { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
5168 { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
5169 { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
5170 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
5171 { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
5172 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
5173 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
5174 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
5175 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5176 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5177 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5178 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5179 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5180 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5181 { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
5182 { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
5183 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
5184 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
5185 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
5186 { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
5187 { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
5188 { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
5189 { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
5190 { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
5191 { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
5192};
5193
5194static const u32 ar9271Modes_high_power_tx_gain_9271[][6] = {
5195 { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
5196 { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
5197 { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
5198 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
5199 { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
5200 { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
5201 { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
5202 { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
5203 { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
5204 { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
5205 { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
5206 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
5207 { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
5208 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
5209 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
5210 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
5211 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5212 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5213 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5214 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5215 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5216 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
5217 { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
5218 { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
5219 { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
5220 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
5221 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
5222 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
5223 { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
5224 { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
5225 { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
5226 { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
5227 { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
5228};
5229
5230#endif /* INITVALS_9002_10_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
new file mode 100644
index 000000000000..2be20d2070c4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -0,0 +1,480 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18
19#define AR_BufLen 0x00000fff
20
21static void ar9002_hw_rx_enable(struct ath_hw *ah)
22{
23 REG_WRITE(ah, AR_CR, AR_CR_RXE);
24}
25
26static void ar9002_hw_set_desc_link(void *ds, u32 ds_link)
27{
28 ((struct ath_desc*) ds)->ds_link = ds_link;
29}
30
31static void ar9002_hw_get_desc_link(void *ds, u32 **ds_link)
32{
33 *ds_link = &((struct ath_desc *)ds)->ds_link;
34}
35
36static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
37{
38 u32 isr = 0;
39 u32 mask2 = 0;
40 struct ath9k_hw_capabilities *pCap = &ah->caps;
41 u32 sync_cause = 0;
42 bool fatal_int = false;
43 struct ath_common *common = ath9k_hw_common(ah);
44
45 if (!AR_SREV_9100(ah)) {
46 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
47 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
48 == AR_RTC_STATUS_ON) {
49 isr = REG_READ(ah, AR_ISR);
50 }
51 }
52
53 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
54 AR_INTR_SYNC_DEFAULT;
55
56 *masked = 0;
57
58 if (!isr && !sync_cause)
59 return false;
60 } else {
61 *masked = 0;
62 isr = REG_READ(ah, AR_ISR);
63 }
64
65 if (isr) {
66 if (isr & AR_ISR_BCNMISC) {
67 u32 isr2;
68 isr2 = REG_READ(ah, AR_ISR_S2);
69 if (isr2 & AR_ISR_S2_TIM)
70 mask2 |= ATH9K_INT_TIM;
71 if (isr2 & AR_ISR_S2_DTIM)
72 mask2 |= ATH9K_INT_DTIM;
73 if (isr2 & AR_ISR_S2_DTIMSYNC)
74 mask2 |= ATH9K_INT_DTIMSYNC;
75 if (isr2 & (AR_ISR_S2_CABEND))
76 mask2 |= ATH9K_INT_CABEND;
77 if (isr2 & AR_ISR_S2_GTT)
78 mask2 |= ATH9K_INT_GTT;
79 if (isr2 & AR_ISR_S2_CST)
80 mask2 |= ATH9K_INT_CST;
81 if (isr2 & AR_ISR_S2_TSFOOR)
82 mask2 |= ATH9K_INT_TSFOOR;
83 }
84
85 isr = REG_READ(ah, AR_ISR_RAC);
86 if (isr == 0xffffffff) {
87 *masked = 0;
88 return false;
89 }
90
91 *masked = isr & ATH9K_INT_COMMON;
92
93 if (ah->config.rx_intr_mitigation) {
94 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
95 *masked |= ATH9K_INT_RX;
96 }
97
98 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
99 *masked |= ATH9K_INT_RX;
100 if (isr &
101 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
102 AR_ISR_TXEOL)) {
103 u32 s0_s, s1_s;
104
105 *masked |= ATH9K_INT_TX;
106
107 s0_s = REG_READ(ah, AR_ISR_S0_S);
108 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
109 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
110
111 s1_s = REG_READ(ah, AR_ISR_S1_S);
112 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
113 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
114 }
115
116 if (isr & AR_ISR_RXORN) {
117 ath_print(common, ATH_DBG_INTERRUPT,
118 "receive FIFO overrun interrupt\n");
119 }
120
121 if (!AR_SREV_9100(ah)) {
122 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
123 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
124 if (isr5 & AR_ISR_S5_TIM_TIMER)
125 *masked |= ATH9K_INT_TIM_TIMER;
126 }
127 }
128
129 *masked |= mask2;
130 }
131
132 if (AR_SREV_9100(ah))
133 return true;
134
135 if (isr & AR_ISR_GENTMR) {
136 u32 s5_s;
137
138 s5_s = REG_READ(ah, AR_ISR_S5_S);
139 if (isr & AR_ISR_GENTMR) {
140 ah->intr_gen_timer_trigger =
141 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
142
143 ah->intr_gen_timer_thresh =
144 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
145
146 if (ah->intr_gen_timer_trigger)
147 *masked |= ATH9K_INT_GENTIMER;
148
149 }
150 }
151
152 if (sync_cause) {
153 fatal_int =
154 (sync_cause &
155 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
156 ? true : false;
157
158 if (fatal_int) {
159 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
160 ath_print(common, ATH_DBG_ANY,
161 "received PCI FATAL interrupt\n");
162 }
163 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
164 ath_print(common, ATH_DBG_ANY,
165 "received PCI PERR interrupt\n");
166 }
167 *masked |= ATH9K_INT_FATAL;
168 }
169 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
170 ath_print(common, ATH_DBG_INTERRUPT,
171 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
172 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
173 REG_WRITE(ah, AR_RC, 0);
174 *masked |= ATH9K_INT_FATAL;
175 }
176 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
177 ath_print(common, ATH_DBG_INTERRUPT,
178 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
179 }
180
181 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
182 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
183 }
184
185 return true;
186}
187
188static void ar9002_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
189 bool is_firstseg, bool is_lastseg,
190 const void *ds0, dma_addr_t buf_addr,
191 unsigned int qcu)
192{
193 struct ar5416_desc *ads = AR5416DESC(ds);
194
195 ads->ds_data = buf_addr;
196
197 if (is_firstseg) {
198 ads->ds_ctl1 |= seglen | (is_lastseg ? 0 : AR_TxMore);
199 } else if (is_lastseg) {
200 ads->ds_ctl0 = 0;
201 ads->ds_ctl1 = seglen;
202 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
203 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
204 } else {
205 ads->ds_ctl0 = 0;
206 ads->ds_ctl1 = seglen | AR_TxMore;
207 ads->ds_ctl2 = 0;
208 ads->ds_ctl3 = 0;
209 }
210 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
211 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
212 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
213 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
214 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
215}
216
217static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
218 struct ath_tx_status *ts)
219{
220 struct ar5416_desc *ads = AR5416DESC(ds);
221
222 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
223 return -EINPROGRESS;
224
225 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
226 ts->ts_tstamp = ads->AR_SendTimestamp;
227 ts->ts_status = 0;
228 ts->ts_flags = 0;
229
230 if (ads->ds_txstatus1 & AR_FrmXmitOK)
231 ts->ts_status |= ATH9K_TX_ACKED;
232 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
233 ts->ts_status |= ATH9K_TXERR_XRETRY;
234 if (ads->ds_txstatus1 & AR_Filtered)
235 ts->ts_status |= ATH9K_TXERR_FILT;
236 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
237 ts->ts_status |= ATH9K_TXERR_FIFO;
238 ath9k_hw_updatetxtriglevel(ah, true);
239 }
240 if (ads->ds_txstatus9 & AR_TxOpExceeded)
241 ts->ts_status |= ATH9K_TXERR_XTXOP;
242 if (ads->ds_txstatus1 & AR_TxTimerExpired)
243 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
244
245 if (ads->ds_txstatus1 & AR_DescCfgErr)
246 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
247 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
248 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
249 ath9k_hw_updatetxtriglevel(ah, true);
250 }
251 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
252 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
253 ath9k_hw_updatetxtriglevel(ah, true);
254 }
255 if (ads->ds_txstatus0 & AR_TxBaStatus) {
256 ts->ts_flags |= ATH9K_TX_BA;
257 ts->ba_low = ads->AR_BaBitmapLow;
258 ts->ba_high = ads->AR_BaBitmapHigh;
259 }
260
261 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
262 switch (ts->ts_rateindex) {
263 case 0:
264 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
265 break;
266 case 1:
267 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
268 break;
269 case 2:
270 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
271 break;
272 case 3:
273 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
274 break;
275 }
276
277 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
278 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
279 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
280 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
281 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
282 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
283 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
284 ts->evm0 = ads->AR_TxEVM0;
285 ts->evm1 = ads->AR_TxEVM1;
286 ts->evm2 = ads->AR_TxEVM2;
287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
290 ts->ts_antenna = 0;
291
292 return 0;
293}
294
295static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
296 u32 pktLen, enum ath9k_pkt_type type,
297 u32 txPower, u32 keyIx,
298 enum ath9k_key_type keyType, u32 flags)
299{
300 struct ar5416_desc *ads = AR5416DESC(ds);
301
302 txPower += ah->txpower_indexoffset;
303 if (txPower > 63)
304 txPower = 63;
305
306 ads->ds_ctl0 = (pktLen & AR_FrameLen)
307 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
308 | SM(txPower, AR_XmitPower)
309 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
310 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
311 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
312 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
313
314 ads->ds_ctl1 =
315 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
316 | SM(type, AR_FrameType)
317 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
318 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
319 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
320
321 ads->ds_ctl6 = SM(keyType, AR_EncrType);
322
323 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
324 ads->ds_ctl8 = 0;
325 ads->ds_ctl9 = 0;
326 ads->ds_ctl10 = 0;
327 ads->ds_ctl11 = 0;
328 }
329}
330
331static void ar9002_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
332 void *lastds,
333 u32 durUpdateEn, u32 rtsctsRate,
334 u32 rtsctsDuration,
335 struct ath9k_11n_rate_series series[],
336 u32 nseries, u32 flags)
337{
338 struct ar5416_desc *ads = AR5416DESC(ds);
339 struct ar5416_desc *last_ads = AR5416DESC(lastds);
340 u32 ds_ctl0;
341
342 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
343 ds_ctl0 = ads->ds_ctl0;
344
345 if (flags & ATH9K_TXDESC_RTSENA) {
346 ds_ctl0 &= ~AR_CTSEnable;
347 ds_ctl0 |= AR_RTSEnable;
348 } else {
349 ds_ctl0 &= ~AR_RTSEnable;
350 ds_ctl0 |= AR_CTSEnable;
351 }
352
353 ads->ds_ctl0 = ds_ctl0;
354 } else {
355 ads->ds_ctl0 =
356 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
357 }
358
359 ads->ds_ctl2 = set11nTries(series, 0)
360 | set11nTries(series, 1)
361 | set11nTries(series, 2)
362 | set11nTries(series, 3)
363 | (durUpdateEn ? AR_DurUpdateEna : 0)
364 | SM(0, AR_BurstDur);
365
366 ads->ds_ctl3 = set11nRate(series, 0)
367 | set11nRate(series, 1)
368 | set11nRate(series, 2)
369 | set11nRate(series, 3);
370
371 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
372 | set11nPktDurRTSCTS(series, 1);
373
374 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
375 | set11nPktDurRTSCTS(series, 3);
376
377 ads->ds_ctl7 = set11nRateFlags(series, 0)
378 | set11nRateFlags(series, 1)
379 | set11nRateFlags(series, 2)
380 | set11nRateFlags(series, 3)
381 | SM(rtsctsRate, AR_RTSCTSRate);
382 last_ads->ds_ctl2 = ads->ds_ctl2;
383 last_ads->ds_ctl3 = ads->ds_ctl3;
384}
385
386static void ar9002_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
387 u32 aggrLen)
388{
389 struct ar5416_desc *ads = AR5416DESC(ds);
390
391 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
392 ads->ds_ctl6 &= ~AR_AggrLen;
393 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
394}
395
396static void ar9002_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
397 u32 numDelims)
398{
399 struct ar5416_desc *ads = AR5416DESC(ds);
400 unsigned int ctl6;
401
402 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
403
404 ctl6 = ads->ds_ctl6;
405 ctl6 &= ~AR_PadDelim;
406 ctl6 |= SM(numDelims, AR_PadDelim);
407 ads->ds_ctl6 = ctl6;
408}
409
410static void ar9002_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
411{
412 struct ar5416_desc *ads = AR5416DESC(ds);
413
414 ads->ds_ctl1 |= AR_IsAggr;
415 ads->ds_ctl1 &= ~AR_MoreAggr;
416 ads->ds_ctl6 &= ~AR_PadDelim;
417}
418
419static void ar9002_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
420{
421 struct ar5416_desc *ads = AR5416DESC(ds);
422
423 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
424}
425
426static void ar9002_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
427 u32 burstDuration)
428{
429 struct ar5416_desc *ads = AR5416DESC(ds);
430
431 ads->ds_ctl2 &= ~AR_BurstDur;
432 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
433}
434
435static void ar9002_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
436 u32 vmf)
437{
438 struct ar5416_desc *ads = AR5416DESC(ds);
439
440 if (vmf)
441 ads->ds_ctl0 |= AR_VirtMoreFrag;
442 else
443 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
444}
445
446void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
447 u32 size, u32 flags)
448{
449 struct ar5416_desc *ads = AR5416DESC(ds);
450 struct ath9k_hw_capabilities *pCap = &ah->caps;
451
452 ads->ds_ctl1 = size & AR_BufLen;
453 if (flags & ATH9K_RXDESC_INTREQ)
454 ads->ds_ctl1 |= AR_RxIntrReq;
455
456 ads->ds_rxstatus8 &= ~AR_RxDone;
457 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
458 memset(&(ads->u), 0, sizeof(ads->u));
459}
460EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
461
462void ar9002_hw_attach_mac_ops(struct ath_hw *ah)
463{
464 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
465
466 ops->rx_enable = ar9002_hw_rx_enable;
467 ops->set_desc_link = ar9002_hw_set_desc_link;
468 ops->get_desc_link = ar9002_hw_get_desc_link;
469 ops->get_isr = ar9002_hw_get_isr;
470 ops->fill_txdesc = ar9002_hw_fill_txdesc;
471 ops->proc_txdesc = ar9002_hw_proc_txdesc;
472 ops->set11n_txdesc = ar9002_hw_set11n_txdesc;
473 ops->set11n_ratescenario = ar9002_hw_set11n_ratescenario;
474 ops->set11n_aggr_first = ar9002_hw_set11n_aggr_first;
475 ops->set11n_aggr_middle = ar9002_hw_set11n_aggr_middle;
476 ops->set11n_aggr_last = ar9002_hw_set11n_aggr_last;
477 ops->clr11n_aggr = ar9002_hw_clr11n_aggr;
478 ops->set11n_burstduration = ar9002_hw_set11n_burstduration;
479 ops->set11n_virtualmorefrag = ar9002_hw_set11n_virtualmorefrag;
480}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
new file mode 100644
index 000000000000..ed314e89bfe1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -0,0 +1,535 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
41
42#include "hw.h"
43#include "ar9002_phy.h"
44
45/**
46 * ar9002_hw_set_channel - set channel on single-chip device
47 * @ah: atheros hardware structure
48 * @chan:
49 *
50 * This is the function to change channel on single-chip devices, that is
51 * all devices after ar9280.
52 *
53 * This function takes the channel value in MHz and sets
54 * hardware channel value. Assumes writes have been enabled to analog bus.
55 *
56 * Actual Expression,
57 *
58 * For 2GHz channel,
59 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
60 * (freq_ref = 40MHz)
61 *
62 * For 5GHz channel,
63 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
64 * (freq_ref = 40MHz/(24>>amodeRefSel))
65 */
66static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
67{
68 u16 bMode, fracMode, aModeRefSel = 0;
69 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
70 struct chan_centers centers;
71 u32 refDivA = 24;
72
73 ath9k_hw_get_channel_centers(ah, chan, &centers);
74 freq = centers.synth_center;
75
76 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
77 reg32 &= 0xc0000000;
78
79 if (freq < 4800) { /* 2 GHz, fractional mode */
80 u32 txctl;
81 int regWrites = 0;
82
83 bMode = 1;
84 fracMode = 1;
85 aModeRefSel = 0;
86 channelSel = CHANSEL_2G(freq);
87
88 if (AR_SREV_9287_11_OR_LATER(ah)) {
89 if (freq == 2484) {
90 /* Enable channel spreading for channel 14 */
91 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
92 1, regWrites);
93 } else {
94 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
95 1, regWrites);
96 }
97 } else {
98 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
99 if (freq == 2484) {
100 /* Enable channel spreading for channel 14 */
101 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
102 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
103 } else {
104 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
105 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
106 }
107 }
108 } else {
109 bMode = 0;
110 fracMode = 0;
111
112 switch (ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
113 case 0:
114 if ((freq % 20) == 0)
115 aModeRefSel = 3;
116 else if ((freq % 10) == 0)
117 aModeRefSel = 2;
118 if (aModeRefSel)
119 break;
120 case 1:
121 default:
122 aModeRefSel = 0;
123 /*
124 * Enable 2G (fractional) mode for channels
125 * which are 5MHz spaced.
126 */
127 fracMode = 1;
128 refDivA = 1;
129 channelSel = CHANSEL_5G(freq);
130
131 /* RefDivA setting */
132 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
133 AR_AN_SYNTH9_REFDIVA, refDivA);
134
135 }
136
137 if (!fracMode) {
138 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
139 channelSel = ndiv & 0x1ff;
140 channelFrac = (ndiv & 0xfffffe00) * 2;
141 channelSel = (channelSel << 17) | channelFrac;
142 }
143 }
144
145 reg32 = reg32 |
146 (bMode << 29) |
147 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
148
149 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
150
151 ah->curchan = chan;
152 ah->curchan_rad_index = -1;
153
154 return 0;
155}
156
157/**
158 * ar9002_hw_spur_mitigate - convert baseband spur frequency
159 * @ah: atheros hardware structure
160 * @chan:
161 *
162 * For single-chip solutions. Converts to baseband spur frequency given the
163 * input channel frequency and compute register settings below.
164 */
165static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
166 struct ath9k_channel *chan)
167{
168 int bb_spur = AR_NO_SPUR;
169 int freq;
170 int bin, cur_bin;
171 int bb_spur_off, spur_subchannel_sd;
172 int spur_freq_sd;
173 int spur_delta_phase;
174 int denominator;
175 int upper, lower, cur_vit_mask;
176 int tmp, newVal;
177 int i;
178 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
179 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
180 };
181 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
182 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
183 };
184 int inc[4] = { 0, 100, 0, 0 };
185 struct chan_centers centers;
186
187 int8_t mask_m[123];
188 int8_t mask_p[123];
189 int8_t mask_amt;
190 int tmp_mask;
191 int cur_bb_spur;
192 bool is2GHz = IS_CHAN_2GHZ(chan);
193
194 memset(&mask_m, 0, sizeof(int8_t) * 123);
195 memset(&mask_p, 0, sizeof(int8_t) * 123);
196
197 ath9k_hw_get_channel_centers(ah, chan, &centers);
198 freq = centers.synth_center;
199
200 ah->config.spurmode = SPUR_ENABLE_EEPROM;
201 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
202 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
203
204 if (is2GHz)
205 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
206 else
207 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
208
209 if (AR_NO_SPUR == cur_bb_spur)
210 break;
211 cur_bb_spur = cur_bb_spur - freq;
212
213 if (IS_CHAN_HT40(chan)) {
214 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
215 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
216 bb_spur = cur_bb_spur;
217 break;
218 }
219 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
220 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
221 bb_spur = cur_bb_spur;
222 break;
223 }
224 }
225
226 if (AR_NO_SPUR == bb_spur) {
227 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
228 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
229 return;
230 } else {
231 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
232 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
233 }
234
235 bin = bb_spur * 320;
236
237 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
238
239 ENABLE_REGWRITE_BUFFER(ah);
240
241 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
242 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
243 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
244 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
245 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
246
247 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
248 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
249 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
250 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
251 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
252 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
253
254 if (IS_CHAN_HT40(chan)) {
255 if (bb_spur < 0) {
256 spur_subchannel_sd = 1;
257 bb_spur_off = bb_spur + 10;
258 } else {
259 spur_subchannel_sd = 0;
260 bb_spur_off = bb_spur - 10;
261 }
262 } else {
263 spur_subchannel_sd = 0;
264 bb_spur_off = bb_spur;
265 }
266
267 if (IS_CHAN_HT40(chan))
268 spur_delta_phase =
269 ((bb_spur * 262144) /
270 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
271 else
272 spur_delta_phase =
273 ((bb_spur * 524288) /
274 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
275
276 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
277 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
278
279 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
280 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
281 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
282 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
283
284 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
285 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
286
287 cur_bin = -6000;
288 upper = bin + 100;
289 lower = bin - 100;
290
291 for (i = 0; i < 4; i++) {
292 int pilot_mask = 0;
293 int chan_mask = 0;
294 int bp = 0;
295 for (bp = 0; bp < 30; bp++) {
296 if ((cur_bin > lower) && (cur_bin < upper)) {
297 pilot_mask = pilot_mask | 0x1 << bp;
298 chan_mask = chan_mask | 0x1 << bp;
299 }
300 cur_bin += 100;
301 }
302 cur_bin += inc[i];
303 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
304 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
305 }
306
307 cur_vit_mask = 6100;
308 upper = bin + 120;
309 lower = bin - 120;
310
311 for (i = 0; i < 123; i++) {
312 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
313
314 /* workaround for gcc bug #37014 */
315 volatile int tmp_v = abs(cur_vit_mask - bin);
316
317 if (tmp_v < 75)
318 mask_amt = 1;
319 else
320 mask_amt = 0;
321 if (cur_vit_mask < 0)
322 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
323 else
324 mask_p[cur_vit_mask / 100] = mask_amt;
325 }
326 cur_vit_mask -= 100;
327 }
328
329 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
330 | (mask_m[48] << 26) | (mask_m[49] << 24)
331 | (mask_m[50] << 22) | (mask_m[51] << 20)
332 | (mask_m[52] << 18) | (mask_m[53] << 16)
333 | (mask_m[54] << 14) | (mask_m[55] << 12)
334 | (mask_m[56] << 10) | (mask_m[57] << 8)
335 | (mask_m[58] << 6) | (mask_m[59] << 4)
336 | (mask_m[60] << 2) | (mask_m[61] << 0);
337 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
338 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
339
340 tmp_mask = (mask_m[31] << 28)
341 | (mask_m[32] << 26) | (mask_m[33] << 24)
342 | (mask_m[34] << 22) | (mask_m[35] << 20)
343 | (mask_m[36] << 18) | (mask_m[37] << 16)
344 | (mask_m[48] << 14) | (mask_m[39] << 12)
345 | (mask_m[40] << 10) | (mask_m[41] << 8)
346 | (mask_m[42] << 6) | (mask_m[43] << 4)
347 | (mask_m[44] << 2) | (mask_m[45] << 0);
348 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
349 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
350
351 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
352 | (mask_m[18] << 26) | (mask_m[18] << 24)
353 | (mask_m[20] << 22) | (mask_m[20] << 20)
354 | (mask_m[22] << 18) | (mask_m[22] << 16)
355 | (mask_m[24] << 14) | (mask_m[24] << 12)
356 | (mask_m[25] << 10) | (mask_m[26] << 8)
357 | (mask_m[27] << 6) | (mask_m[28] << 4)
358 | (mask_m[29] << 2) | (mask_m[30] << 0);
359 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
360 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
361
362 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
363 | (mask_m[2] << 26) | (mask_m[3] << 24)
364 | (mask_m[4] << 22) | (mask_m[5] << 20)
365 | (mask_m[6] << 18) | (mask_m[7] << 16)
366 | (mask_m[8] << 14) | (mask_m[9] << 12)
367 | (mask_m[10] << 10) | (mask_m[11] << 8)
368 | (mask_m[12] << 6) | (mask_m[13] << 4)
369 | (mask_m[14] << 2) | (mask_m[15] << 0);
370 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
371 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
372
373 tmp_mask = (mask_p[15] << 28)
374 | (mask_p[14] << 26) | (mask_p[13] << 24)
375 | (mask_p[12] << 22) | (mask_p[11] << 20)
376 | (mask_p[10] << 18) | (mask_p[9] << 16)
377 | (mask_p[8] << 14) | (mask_p[7] << 12)
378 | (mask_p[6] << 10) | (mask_p[5] << 8)
379 | (mask_p[4] << 6) | (mask_p[3] << 4)
380 | (mask_p[2] << 2) | (mask_p[1] << 0);
381 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
382 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
383
384 tmp_mask = (mask_p[30] << 28)
385 | (mask_p[29] << 26) | (mask_p[28] << 24)
386 | (mask_p[27] << 22) | (mask_p[26] << 20)
387 | (mask_p[25] << 18) | (mask_p[24] << 16)
388 | (mask_p[23] << 14) | (mask_p[22] << 12)
389 | (mask_p[21] << 10) | (mask_p[20] << 8)
390 | (mask_p[19] << 6) | (mask_p[18] << 4)
391 | (mask_p[17] << 2) | (mask_p[16] << 0);
392 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
393 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
394
395 tmp_mask = (mask_p[45] << 28)
396 | (mask_p[44] << 26) | (mask_p[43] << 24)
397 | (mask_p[42] << 22) | (mask_p[41] << 20)
398 | (mask_p[40] << 18) | (mask_p[39] << 16)
399 | (mask_p[38] << 14) | (mask_p[37] << 12)
400 | (mask_p[36] << 10) | (mask_p[35] << 8)
401 | (mask_p[34] << 6) | (mask_p[33] << 4)
402 | (mask_p[32] << 2) | (mask_p[31] << 0);
403 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
404 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
405
406 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
407 | (mask_p[59] << 26) | (mask_p[58] << 24)
408 | (mask_p[57] << 22) | (mask_p[56] << 20)
409 | (mask_p[55] << 18) | (mask_p[54] << 16)
410 | (mask_p[53] << 14) | (mask_p[52] << 12)
411 | (mask_p[51] << 10) | (mask_p[50] << 8)
412 | (mask_p[49] << 6) | (mask_p[48] << 4)
413 | (mask_p[47] << 2) | (mask_p[46] << 0);
414 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
415 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
416
417 REGWRITE_BUFFER_FLUSH(ah);
418 DISABLE_REGWRITE_BUFFER(ah);
419}
420
421static void ar9002_olc_init(struct ath_hw *ah)
422{
423 u32 i;
424
425 if (!OLC_FOR_AR9280_20_LATER)
426 return;
427
428 if (OLC_FOR_AR9287_10_LATER) {
429 REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
430 AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
431 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
432 AR9287_AN_TXPC0_TXPCMODE,
433 AR9287_AN_TXPC0_TXPCMODE_S,
434 AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
435 udelay(100);
436 } else {
437 for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
438 ah->originalGain[i] =
439 MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
440 AR_PHY_TX_GAIN);
441 ah->PDADCdelta = 0;
442 }
443}
444
445static u32 ar9002_hw_compute_pll_control(struct ath_hw *ah,
446 struct ath9k_channel *chan)
447{
448 u32 pll;
449
450 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
451
452 if (chan && IS_CHAN_HALF_RATE(chan))
453 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
454 else if (chan && IS_CHAN_QUARTER_RATE(chan))
455 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
456
457 if (chan && IS_CHAN_5GHZ(chan)) {
458 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
459 pll = 0x142c;
460 else if (AR_SREV_9280_20(ah))
461 pll = 0x2850;
462 else
463 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
464 } else {
465 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
466 }
467
468 return pll;
469}
470
471static void ar9002_hw_do_getnf(struct ath_hw *ah,
472 int16_t nfarray[NUM_NF_READINGS])
473{
474 struct ath_common *common = ath9k_hw_common(ah);
475 int16_t nf;
476
477 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
478
479 if (nf & 0x100)
480 nf = 0 - ((nf ^ 0x1ff) + 1);
481 ath_print(common, ATH_DBG_CALIBRATE,
482 "NF calibrated [ctl] [chain 0] is %d\n", nf);
483
484 if (AR_SREV_9271(ah) && (nf >= -114))
485 nf = -116;
486
487 nfarray[0] = nf;
488
489 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
490 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
491 AR9280_PHY_CH1_MINCCA_PWR);
492
493 if (nf & 0x100)
494 nf = 0 - ((nf ^ 0x1ff) + 1);
495 ath_print(common, ATH_DBG_CALIBRATE,
496 "NF calibrated [ctl] [chain 1] is %d\n", nf);
497 nfarray[1] = nf;
498 }
499
500 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
501 if (nf & 0x100)
502 nf = 0 - ((nf ^ 0x1ff) + 1);
503 ath_print(common, ATH_DBG_CALIBRATE,
504 "NF calibrated [ext] [chain 0] is %d\n", nf);
505
506 if (AR_SREV_9271(ah) && (nf >= -114))
507 nf = -116;
508
509 nfarray[3] = nf;
510
511 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
512 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
513 AR9280_PHY_CH1_EXT_MINCCA_PWR);
514
515 if (nf & 0x100)
516 nf = 0 - ((nf ^ 0x1ff) + 1);
517 ath_print(common, ATH_DBG_CALIBRATE,
518 "NF calibrated [ext] [chain 1] is %d\n", nf);
519 nfarray[4] = nf;
520 }
521}
522
523void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
524{
525 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
526
527 priv_ops->set_rf_regs = NULL;
528 priv_ops->rf_alloc_ext_banks = NULL;
529 priv_ops->rf_free_ext_banks = NULL;
530 priv_ops->rf_set_freq = ar9002_hw_set_channel;
531 priv_ops->spur_mitigate_freq = ar9002_hw_spur_mitigate;
532 priv_ops->olc_init = ar9002_olc_init;
533 priv_ops->compute_pll_control = ar9002_hw_compute_pll_control;
534 priv_ops->do_getnf = ar9002_hw_do_getnf;
535}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
new file mode 100644
index 000000000000..81bf6e5840e1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -0,0 +1,572 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef AR9002_PHY_H
17#define AR9002_PHY_H
18
19#define AR_PHY_TEST 0x9800
20#define PHY_AGC_CLR 0x10000000
21#define RFSILENT_BB 0x00002000
22
23#define AR_PHY_TURBO 0x9804
24#define AR_PHY_FC_TURBO_MODE 0x00000001
25#define AR_PHY_FC_TURBO_SHORT 0x00000002
26#define AR_PHY_FC_DYN2040_EN 0x00000004
27#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
28#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
29/* For 25 MHz channel spacing -- not used but supported by hw */
30#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
31#define AR_PHY_FC_HT_EN 0x00000040
32#define AR_PHY_FC_SHORT_GI_40 0x00000080
33#define AR_PHY_FC_WALSH 0x00000100
34#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
35#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
36
37#define AR_PHY_TEST2 0x9808
38
39#define AR_PHY_TIMING2 0x9810
40#define AR_PHY_TIMING3 0x9814
41#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
42#define AR_PHY_TIMING3_DSC_MAN_S 17
43#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
44#define AR_PHY_TIMING3_DSC_EXP_S 13
45
46#define AR_PHY_CHIP_ID_REV_0 0x80
47#define AR_PHY_CHIP_ID_REV_1 0x81
48#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
49
50#define AR_PHY_ACTIVE 0x981C
51#define AR_PHY_ACTIVE_EN 0x00000001
52#define AR_PHY_ACTIVE_DIS 0x00000000
53
54#define AR_PHY_RF_CTL2 0x9824
55#define AR_PHY_TX_END_DATA_START 0x000000FF
56#define AR_PHY_TX_END_DATA_START_S 0
57#define AR_PHY_TX_END_PA_ON 0x0000FF00
58#define AR_PHY_TX_END_PA_ON_S 8
59
60#define AR_PHY_RF_CTL3 0x9828
61#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
62#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
63
64#define AR_PHY_ADC_CTL 0x982C
65#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
66#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
67#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
68#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
69#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
70#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
71#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
72
73#define AR_PHY_ADC_SERIAL_CTL 0x9830
74#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
75#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
76
77#define AR_PHY_RF_CTL4 0x9834
78#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
79#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
80#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
81#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
82#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
83#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
84#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
85#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
86
87#define AR_PHY_TSTDAC_CONST 0x983c
88
89#define AR_PHY_SETTLING 0x9844
90#define AR_PHY_SETTLING_SWITCH 0x00003F80
91#define AR_PHY_SETTLING_SWITCH_S 7
92
93#define AR_PHY_RXGAIN 0x9848
94#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
95#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
96#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
97#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
98#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
99#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
100#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
101#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
102
103#define AR_PHY_DESIRED_SZ 0x9850
104#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
105#define AR_PHY_DESIRED_SZ_ADC_S 0
106#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
107#define AR_PHY_DESIRED_SZ_PGA_S 8
108#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
109#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
110
111#define AR_PHY_FIND_SIG 0x9858
112#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
113#define AR_PHY_FIND_SIG_FIRSTEP_S 12
114#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
115#define AR_PHY_FIND_SIG_FIRPWR_S 18
116
117#define AR_PHY_AGC_CTL1 0x985C
118#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
119#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
120#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
121#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
122
123#define AR_PHY_CCA 0x9864
124#define AR_PHY_MINCCA_PWR 0x0FF80000
125#define AR_PHY_MINCCA_PWR_S 19
126#define AR_PHY_CCA_THRESH62 0x0007F000
127#define AR_PHY_CCA_THRESH62_S 12
128#define AR9280_PHY_MINCCA_PWR 0x1FF00000
129#define AR9280_PHY_MINCCA_PWR_S 20
130#define AR9280_PHY_CCA_THRESH62 0x000FF000
131#define AR9280_PHY_CCA_THRESH62_S 12
132
133#define AR_PHY_SFCORR_LOW 0x986C
134#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
135#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
136#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
137#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
138#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
139#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
140#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
141
142#define AR_PHY_SFCORR 0x9868
143#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
144#define AR_PHY_SFCORR_M2COUNT_THR_S 0
145#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
146#define AR_PHY_SFCORR_M1_THRESH_S 17
147#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
148#define AR_PHY_SFCORR_M2_THRESH_S 24
149
150#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
151#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
152#define AR_PHY_SYNTH_CONTROL 0x9874
153#define AR_PHY_SLEEP_SCAL 0x9878
154
155#define AR_PHY_PLL_CTL 0x987c
156#define AR_PHY_PLL_CTL_40 0xaa
157#define AR_PHY_PLL_CTL_40_5413 0x04
158#define AR_PHY_PLL_CTL_44 0xab
159#define AR_PHY_PLL_CTL_44_2133 0xeb
160#define AR_PHY_PLL_CTL_40_2133 0xea
161
162#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
163#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
164#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
165#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
166#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
167#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
168#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
169#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
170#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
171#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
172#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
173#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
174#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
175#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
176
177#define AR_PHY_RX_DELAY 0x9914
178#define AR_PHY_SEARCH_START_DELAY 0x9918
179#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
180
181#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
182#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
183#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
184#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
185#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
186#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
187#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
188#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
189#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
190
191#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
192#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
193#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
194#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
195
196#define AR_PHY_TIMING5 0x9924
197#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
198#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
199
200#define AR_PHY_POWER_TX_RATE1 0x9934
201#define AR_PHY_POWER_TX_RATE2 0x9938
202#define AR_PHY_POWER_TX_RATE_MAX 0x993c
203#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
204
205#define AR_PHY_FRAME_CTL 0x9944
206#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
207#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
208
209#define AR_PHY_TXPWRADJ 0x994C
210#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
211#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
212#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
213#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
214
215#define AR_PHY_RADAR_EXT 0x9940
216#define AR_PHY_RADAR_EXT_ENA 0x00004000
217
218#define AR_PHY_RADAR_0 0x9954
219#define AR_PHY_RADAR_0_ENA 0x00000001
220#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
221#define AR_PHY_RADAR_0_INBAND 0x0000003e
222#define AR_PHY_RADAR_0_INBAND_S 1
223#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
224#define AR_PHY_RADAR_0_PRSSI_S 6
225#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
226#define AR_PHY_RADAR_0_HEIGHT_S 12
227#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
228#define AR_PHY_RADAR_0_RRSSI_S 18
229#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
230#define AR_PHY_RADAR_0_FIRPWR_S 24
231
232#define AR_PHY_RADAR_1 0x9958
233#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
234#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
235#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
236#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
237#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
238#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
239#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
240#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
241#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
242#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
243#define AR_PHY_RADAR_1_MAXLEN_S 0
244
245#define AR_PHY_SWITCH_CHAIN_0 0x9960
246#define AR_PHY_SWITCH_COM 0x9964
247
248#define AR_PHY_SIGMA_DELTA 0x996C
249#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
250#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
251#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
252#define AR_PHY_SIGMA_DELTA_FILT2_S 3
253#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
254#define AR_PHY_SIGMA_DELTA_FILT1_S 8
255#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
256#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
257
258#define AR_PHY_RESTART 0x9970
259#define AR_PHY_RESTART_DIV_GC 0x001C0000
260#define AR_PHY_RESTART_DIV_GC_S 18
261
262#define AR_PHY_RFBUS_REQ 0x997C
263#define AR_PHY_RFBUS_REQ_EN 0x00000001
264
265#define AR_PHY_TIMING7 0x9980
266#define AR_PHY_TIMING8 0x9984
267#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
268#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
269
270#define AR_PHY_BIN_MASK2_1 0x9988
271#define AR_PHY_BIN_MASK2_2 0x998c
272#define AR_PHY_BIN_MASK2_3 0x9990
273#define AR_PHY_BIN_MASK2_4 0x9994
274
275#define AR_PHY_BIN_MASK_1 0x9900
276#define AR_PHY_BIN_MASK_2 0x9904
277#define AR_PHY_BIN_MASK_3 0x9908
278
279#define AR_PHY_MASK_CTL 0x990c
280
281#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
282#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
283
284#define AR_PHY_TIMING9 0x9998
285#define AR_PHY_TIMING10 0x999c
286#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
287#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
288
289#define AR_PHY_TIMING11 0x99a0
290#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
291#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
292#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
293#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
294
295#define AR_PHY_RX_CHAINMASK 0x99a4
296#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
297#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
298#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
299
300#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
301#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
302#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
303#define AR_PHY_9285_ANT_DIV_CTL_S 24
304#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
305#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
306#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
307#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
308#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
309#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
310#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
311#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
312#define AR_PHY_9285_ANT_DIV_LNA1 2
313#define AR_PHY_9285_ANT_DIV_LNA2 1
314#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
315#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
316#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
317#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
318
319#define AR_PHY_EXT_CCA0 0x99b8
320#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
321#define AR_PHY_EXT_CCA0_THRESH62_S 0
322
323#define AR_PHY_EXT_CCA 0x99bc
324#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
325#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
326#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
327#define AR_PHY_EXT_CCA_THRESH62_S 16
328#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
329#define AR_PHY_EXT_MINCCA_PWR_S 23
330#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
331#define AR9280_PHY_EXT_MINCCA_PWR_S 16
332
333#define AR_PHY_SFCORR_EXT 0x99c0
334#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
335#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
336#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
337#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
338#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
339#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
340#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
341#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
342#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
343
344#define AR_PHY_HALFGI 0x99D0
345#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
346#define AR_PHY_HALFGI_DSC_MAN_S 4
347#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
348#define AR_PHY_HALFGI_DSC_EXP_S 0
349
350#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
351#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
352
353#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
354
355#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
356#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
357
358#define AR_PHY_M_SLEEP 0x99f0
359#define AR_PHY_REFCLKDLY 0x99f4
360#define AR_PHY_REFCLKPD 0x99f8
361
362#define AR_PHY_CALMODE 0x99f0
363
364#define AR_PHY_CALMODE_IQ 0x00000000
365#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
366#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
367#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
368
369#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
370#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
371#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
372#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
373
374#define AR_PHY_CURRENT_RSSI 0x9c1c
375#define AR9280_PHY_CURRENT_RSSI 0x9c3c
376
377#define AR_PHY_RFBUS_GRANT 0x9C20
378#define AR_PHY_RFBUS_GRANT_EN 0x00000001
379
380#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
381#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
382
383#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
384
385#define AR_PHY_MODE 0xA200
386#define AR_PHY_MODE_ASYNCFIFO 0x80
387#define AR_PHY_MODE_AR2133 0x08
388#define AR_PHY_MODE_AR5111 0x00
389#define AR_PHY_MODE_AR5112 0x08
390#define AR_PHY_MODE_DYNAMIC 0x04
391#define AR_PHY_MODE_RF2GHZ 0x02
392#define AR_PHY_MODE_RF5GHZ 0x00
393#define AR_PHY_MODE_CCK 0x01
394#define AR_PHY_MODE_OFDM 0x00
395#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
396
397#define AR_PHY_CCK_TX_CTRL 0xA204
398#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
399#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
400#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
401
402#define AR_PHY_CCK_DETECT 0xA208
403#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
404#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
405/* [12:6] settling time for antenna switch */
406#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
407#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
408#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
409#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
410
411#define AR_PHY_GAIN_2GHZ 0xA20C
412#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
413#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
414#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
415#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
416#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
417#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
418
419#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
420#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
421#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
422#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
423#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
424#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
425#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
426#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
427
428#define AR_PHY_CCK_RXCTRL4 0xA21C
429#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
430#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
431
432#define AR_PHY_DAG_CTRLCCK 0xA228
433#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
434#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
435#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
436
437#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
438#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
439
440#define AR_PHY_POWER_TX_RATE3 0xA234
441#define AR_PHY_POWER_TX_RATE4 0xA238
442
443#define AR_PHY_SCRM_SEQ_XR 0xA23C
444#define AR_PHY_HEADER_DETECT_XR 0xA240
445#define AR_PHY_CHIRP_DETECTED_XR 0xA244
446#define AR_PHY_BLUETOOTH 0xA254
447
448#define AR_PHY_TPCRG1 0xA258
449#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
450#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
451
452#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
453#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
454#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
455#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
456#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
457#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
458
459#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
460#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
461
462#define AR_PHY_TX_PWRCTRL4 0xa264
463#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
464#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
465#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
466#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
467
468#define AR_PHY_TX_PWRCTRL6_0 0xa270
469#define AR_PHY_TX_PWRCTRL6_1 0xb270
470#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
471#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
472
473#define AR_PHY_TX_PWRCTRL7 0xa274
474#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
475#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
476
477#define AR_PHY_TX_PWRCTRL9 0xa27C
478#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
479#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
480#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
481#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
482
483#define AR_PHY_TX_GAIN_TBL1 0xa300
484#define AR_PHY_TX_GAIN 0x0007F000
485#define AR_PHY_TX_GAIN_S 12
486
487#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
488#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
489#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
490#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
491
492#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
493#define AR_PHY_MASK2_M_31_45 0xa3a4
494#define AR_PHY_MASK2_M_16_30 0xa3a8
495#define AR_PHY_MASK2_M_00_15 0xa3ac
496#define AR_PHY_MASK2_P_15_01 0xa3b8
497#define AR_PHY_MASK2_P_30_16 0xa3bc
498#define AR_PHY_MASK2_P_45_31 0xa3c0
499#define AR_PHY_MASK2_P_61_45 0xa3c4
500#define AR_PHY_SPUR_REG 0x994c
501
502#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
503#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
504
505#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
506#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
507#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
508#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
509#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
510#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
511
512#define AR_PHY_PILOT_MASK_01_30 0xa3b0
513#define AR_PHY_PILOT_MASK_31_60 0xa3b4
514
515#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
516#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
517
518#define AR_PHY_ANALOG_SWAP 0xa268
519#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
520
521#define AR_PHY_TPCRG5 0xA26C
522#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
523#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
524#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
525#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
526#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
527#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
528#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
529#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
530#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
531#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
532
533/* Carrier leak calibration control, do it after AGC calibration */
534#define AR_PHY_CL_CAL_CTL 0xA358
535#define AR_PHY_CL_CAL_ENABLE 0x00000002
536#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
537
538#define AR_PHY_POWER_TX_RATE5 0xA38C
539#define AR_PHY_POWER_TX_RATE6 0xA390
540
541#define AR_PHY_CAL_CHAINMASK 0xA39C
542
543#define AR_PHY_POWER_TX_SUB 0xA3C8
544#define AR_PHY_POWER_TX_RATE7 0xA3CC
545#define AR_PHY_POWER_TX_RATE8 0xA3D0
546#define AR_PHY_POWER_TX_RATE9 0xA3D4
547
548#define AR_PHY_XPA_CFG 0xA3D8
549#define AR_PHY_FORCE_XPA_CFG 0x000000001
550#define AR_PHY_FORCE_XPA_CFG_S 0
551
552#define AR_PHY_CH1_CCA 0xa864
553#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
554#define AR_PHY_CH1_MINCCA_PWR_S 19
555#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
556#define AR9280_PHY_CH1_MINCCA_PWR_S 20
557
558#define AR_PHY_CH2_CCA 0xb864
559#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
560#define AR_PHY_CH2_MINCCA_PWR_S 19
561
562#define AR_PHY_CH1_EXT_CCA 0xa9bc
563#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
564#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
565#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
566#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
567
568#define AR_PHY_CH2_EXT_CCA 0xb9bc
569#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
570#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
571
572#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
new file mode 100644
index 000000000000..56a9e5fa6d66
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -0,0 +1,802 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "hw-ops.h"
19#include "ar9003_phy.h"
20
21static void ar9003_hw_setup_calibration(struct ath_hw *ah,
22 struct ath9k_cal_list *currCal)
23{
24 struct ath_common *common = ath9k_hw_common(ah);
25
26 /* Select calibration to run */
27 switch (currCal->calData->calType) {
28 case IQ_MISMATCH_CAL:
29 /*
30 * Start calibration with
31 * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples
32 */
33 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
34 AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX,
35 currCal->calData->calCountMax);
36 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
37
38 ath_print(common, ATH_DBG_CALIBRATE,
39 "starting IQ Mismatch Calibration\n");
40
41 /* Kick-off cal */
42 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
43 break;
44 case TEMP_COMP_CAL:
45 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
46 AR_PHY_65NM_CH0_THERM_LOCAL, 1);
47 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
48 AR_PHY_65NM_CH0_THERM_START, 1);
49
50 ath_print(common, ATH_DBG_CALIBRATE,
51 "starting Temperature Compensation Calibration\n");
52 break;
53 case ADC_DC_INIT_CAL:
54 case ADC_GAIN_CAL:
55 case ADC_DC_CAL:
56 /* Not yet */
57 break;
58 }
59}
60
61/*
62 * Generic calibration routine.
63 * Recalibrate the lower PHY chips to account for temperature/environment
64 * changes.
65 */
66static bool ar9003_hw_per_calibration(struct ath_hw *ah,
67 struct ath9k_channel *ichan,
68 u8 rxchainmask,
69 struct ath9k_cal_list *currCal)
70{
71 /* Cal is assumed not done until explicitly set below */
72 bool iscaldone = false;
73
74 /* Calibration in progress. */
75 if (currCal->calState == CAL_RUNNING) {
76 /* Check to see if it has finished. */
77 if (!(REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL)) {
78 /*
79 * Accumulate cal measures for active chains
80 */
81 currCal->calData->calCollect(ah);
82 ah->cal_samples++;
83
84 if (ah->cal_samples >=
85 currCal->calData->calNumSamples) {
86 unsigned int i, numChains = 0;
87 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
88 if (rxchainmask & (1 << i))
89 numChains++;
90 }
91
92 /*
93 * Process accumulated data
94 */
95 currCal->calData->calPostProc(ah, numChains);
96
97 /* Calibration has finished. */
98 ichan->CalValid |= currCal->calData->calType;
99 currCal->calState = CAL_DONE;
100 iscaldone = true;
101 } else {
102 /*
103 * Set-up collection of another sub-sample until we
104 * get desired number
105 */
106 ar9003_hw_setup_calibration(ah, currCal);
107 }
108 }
109 } else if (!(ichan->CalValid & currCal->calData->calType)) {
110 /* If current cal is marked invalid in channel, kick it off */
111 ath9k_hw_reset_calibration(ah, currCal);
112 }
113
114 return iscaldone;
115}
116
117static bool ar9003_hw_calibrate(struct ath_hw *ah,
118 struct ath9k_channel *chan,
119 u8 rxchainmask,
120 bool longcal)
121{
122 bool iscaldone = true;
123 struct ath9k_cal_list *currCal = ah->cal_list_curr;
124
125 /*
126 * For given calibration:
127 * 1. Call generic cal routine
128 * 2. When this cal is done (isCalDone) if we have more cals waiting
129 * (eg after reset), mask this to upper layers by not propagating
130 * isCalDone if it is set to TRUE.
131 * Instead, change isCalDone to FALSE and setup the waiting cal(s)
132 * to be run.
133 */
134 if (currCal &&
135 (currCal->calState == CAL_RUNNING ||
136 currCal->calState == CAL_WAITING)) {
137 iscaldone = ar9003_hw_per_calibration(ah, chan,
138 rxchainmask, currCal);
139 if (iscaldone) {
140 ah->cal_list_curr = currCal = currCal->calNext;
141
142 if (currCal->calState == CAL_WAITING) {
143 iscaldone = false;
144 ath9k_hw_reset_calibration(ah, currCal);
145 }
146 }
147 }
148
149 /* Do NF cal only at longer intervals */
150 if (longcal) {
151 /*
152 * Load the NF from history buffer of the current channel.
153 * NF is slow time-variant, so it is OK to use a historical
154 * value.
155 */
156 ath9k_hw_loadnf(ah, ah->curchan);
157
158 /* start NF calibration, without updating BB NF register */
159 ath9k_hw_start_nfcal(ah);
160 }
161
162 return iscaldone;
163}
164
165static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
166{
167 int i;
168
169 /* Accumulate IQ cal measures for active chains */
170 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
171 ah->totalPowerMeasI[i] +=
172 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
173 ah->totalPowerMeasQ[i] +=
174 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
175 ah->totalIqCorrMeas[i] +=
176 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
177 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
178 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
179 ah->cal_samples, i, ah->totalPowerMeasI[i],
180 ah->totalPowerMeasQ[i],
181 ah->totalIqCorrMeas[i]);
182 }
183}
184
185static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
186{
187 struct ath_common *common = ath9k_hw_common(ah);
188 u32 powerMeasQ, powerMeasI, iqCorrMeas;
189 u32 qCoffDenom, iCoffDenom;
190 int32_t qCoff, iCoff;
191 int iqCorrNeg, i;
192 const u_int32_t offset_array[3] = {
193 AR_PHY_RX_IQCAL_CORR_B0,
194 AR_PHY_RX_IQCAL_CORR_B1,
195 AR_PHY_RX_IQCAL_CORR_B2,
196 };
197
198 for (i = 0; i < numChains; i++) {
199 powerMeasI = ah->totalPowerMeasI[i];
200 powerMeasQ = ah->totalPowerMeasQ[i];
201 iqCorrMeas = ah->totalIqCorrMeas[i];
202
203 ath_print(common, ATH_DBG_CALIBRATE,
204 "Starting IQ Cal and Correction for Chain %d\n",
205 i);
206
207 ath_print(common, ATH_DBG_CALIBRATE,
208 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
209 i, ah->totalIqCorrMeas[i]);
210
211 iqCorrNeg = 0;
212
213 if (iqCorrMeas > 0x80000000) {
214 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
215 iqCorrNeg = 1;
216 }
217
218 ath_print(common, ATH_DBG_CALIBRATE,
219 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
220 ath_print(common, ATH_DBG_CALIBRATE,
221 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
222 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
223 iqCorrNeg);
224
225 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
226 qCoffDenom = powerMeasQ / 64;
227
228 if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
229 iCoff = iqCorrMeas / iCoffDenom;
230 qCoff = powerMeasI / qCoffDenom - 64;
231 ath_print(common, ATH_DBG_CALIBRATE,
232 "Chn %d iCoff = 0x%08x\n", i, iCoff);
233 ath_print(common, ATH_DBG_CALIBRATE,
234 "Chn %d qCoff = 0x%08x\n", i, qCoff);
235
236 /* Force bounds on iCoff */
237 if (iCoff >= 63)
238 iCoff = 63;
239 else if (iCoff <= -63)
240 iCoff = -63;
241
242 /* Negate iCoff if iqCorrNeg == 0 */
243 if (iqCorrNeg == 0x0)
244 iCoff = -iCoff;
245
246 /* Force bounds on qCoff */
247 if (qCoff >= 63)
248 qCoff = 63;
249 else if (qCoff <= -63)
250 qCoff = -63;
251
252 iCoff = iCoff & 0x7f;
253 qCoff = qCoff & 0x7f;
254
255 ath_print(common, ATH_DBG_CALIBRATE,
256 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
257 i, iCoff, qCoff);
258 ath_print(common, ATH_DBG_CALIBRATE,
259 "Register offset (0x%04x) "
260 "before update = 0x%x\n",
261 offset_array[i],
262 REG_READ(ah, offset_array[i]));
263
264 REG_RMW_FIELD(ah, offset_array[i],
265 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
266 iCoff);
267 REG_RMW_FIELD(ah, offset_array[i],
268 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
269 qCoff);
270 ath_print(common, ATH_DBG_CALIBRATE,
271 "Register offset (0x%04x) QI COFF "
272 "(bitfields 0x%08x) after update = 0x%x\n",
273 offset_array[i],
274 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
275 REG_READ(ah, offset_array[i]));
276 ath_print(common, ATH_DBG_CALIBRATE,
277 "Register offset (0x%04x) QQ COFF "
278 "(bitfields 0x%08x) after update = 0x%x\n",
279 offset_array[i],
280 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
281 REG_READ(ah, offset_array[i]));
282
283 ath_print(common, ATH_DBG_CALIBRATE,
284 "IQ Cal and Correction done for Chain %d\n",
285 i);
286 }
287 }
288
289 REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
290 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
291 ath_print(common, ATH_DBG_CALIBRATE,
292 "IQ Cal and Correction (offset 0x%04x) enabled "
293 "(bit position 0x%08x). New Value 0x%08x\n",
294 (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
295 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
296 REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
297}
298
299static const struct ath9k_percal_data iq_cal_single_sample = {
300 IQ_MISMATCH_CAL,
301 MIN_CAL_SAMPLES,
302 PER_MAX_LOG_COUNT,
303 ar9003_hw_iqcal_collect,
304 ar9003_hw_iqcalibrate
305};
306
307static void ar9003_hw_init_cal_settings(struct ath_hw *ah)
308{
309 ah->iq_caldata.calData = &iq_cal_single_sample;
310 ah->supp_cals = IQ_MISMATCH_CAL;
311}
312
313static bool ar9003_hw_iscal_supported(struct ath_hw *ah,
314 enum ath9k_cal_types calType)
315{
316 switch (calType & ah->supp_cals) {
317 case IQ_MISMATCH_CAL:
318 /*
319 * XXX: Run IQ Mismatch for non-CCK only
320 * Note that CHANNEL_B is never set though.
321 */
322 return true;
323 case ADC_GAIN_CAL:
324 case ADC_DC_CAL:
325 return false;
326 case TEMP_COMP_CAL:
327 return true;
328 }
329
330 return false;
331}
332
333/*
334 * solve 4x4 linear equation used in loopback iq cal.
335 */
336static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
337 s32 sin_2phi_1,
338 s32 cos_2phi_1,
339 s32 sin_2phi_2,
340 s32 cos_2phi_2,
341 s32 mag_a0_d0,
342 s32 phs_a0_d0,
343 s32 mag_a1_d0,
344 s32 phs_a1_d0,
345 s32 solved_eq[])
346{
347 s32 f1 = cos_2phi_1 - cos_2phi_2,
348 f3 = sin_2phi_1 - sin_2phi_2,
349 f2;
350 s32 mag_tx, phs_tx, mag_rx, phs_rx;
351 const s32 result_shift = 1 << 15;
352 struct ath_common *common = ath9k_hw_common(ah);
353
354 f2 = (f1 * f1 + f3 * f3) / result_shift;
355
356 if (!f2) {
357 ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
358 return false;
359 }
360
361 /* mag mismatch, tx */
362 mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0);
363 /* phs mismatch, tx */
364 phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0);
365
366 mag_tx = (mag_tx / f2);
367 phs_tx = (phs_tx / f2);
368
369 /* mag mismatch, rx */
370 mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) /
371 result_shift;
372 /* phs mismatch, rx */
373 phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) /
374 result_shift;
375
376 solved_eq[0] = mag_tx;
377 solved_eq[1] = phs_tx;
378 solved_eq[2] = mag_rx;
379 solved_eq[3] = phs_rx;
380
381 return true;
382}
383
384static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im)
385{
386 s32 abs_i = abs(in_re),
387 abs_q = abs(in_im),
388 max_abs, min_abs;
389
390 if (abs_i > abs_q) {
391 max_abs = abs_i;
392 min_abs = abs_q;
393 } else {
394 max_abs = abs_q;
395 min_abs = abs_i;
396 }
397
398 return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4);
399}
400
401#define DELPT 32
402
403static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
404 s32 chain_idx,
405 const s32 iq_res[],
406 s32 iqc_coeff[])
407{
408 s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0,
409 i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1,
410 i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0,
411 i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1;
412 s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1,
413 phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1,
414 sin_2phi_1, cos_2phi_1,
415 sin_2phi_2, cos_2phi_2;
416 s32 mag_tx, phs_tx, mag_rx, phs_rx;
417 s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx,
418 q_q_coff, q_i_coff;
419 const s32 res_scale = 1 << 15;
420 const s32 delpt_shift = 1 << 8;
421 s32 mag1, mag2;
422 struct ath_common *common = ath9k_hw_common(ah);
423
424 i2_m_q2_a0_d0 = iq_res[0] & 0xfff;
425 i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff;
426 iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8);
427
428 if (i2_m_q2_a0_d0 > 0x800)
429 i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1);
430
431 if (i2_p_q2_a0_d0 > 0x800)
432 i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1);
433
434 if (iq_corr_a0_d0 > 0x800)
435 iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1);
436
437 i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff;
438 i2_p_q2_a0_d1 = (iq_res[2] & 0xfff);
439 iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff;
440
441 if (i2_m_q2_a0_d1 > 0x800)
442 i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
443
444 if (i2_p_q2_a0_d1 > 0x800)
445 i2_p_q2_a0_d1 = -((0xfff - i2_p_q2_a0_d1) + 1);
446
447 if (iq_corr_a0_d1 > 0x800)
448 iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
449
450 i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8);
451 i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff;
452 iq_corr_a1_d0 = iq_res[4] & 0xfff;
453
454 if (i2_m_q2_a1_d0 > 0x800)
455 i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1);
456
457 if (i2_p_q2_a1_d0 > 0x800)
458 i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1);
459
460 if (iq_corr_a1_d0 > 0x800)
461 iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1);
462
463 i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff;
464 i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8);
465 iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff;
466
467 if (i2_m_q2_a1_d1 > 0x800)
468 i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1);
469
470 if (i2_p_q2_a1_d1 > 0x800)
471 i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1);
472
473 if (iq_corr_a1_d1 > 0x800)
474 iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1);
475
476 if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
477 (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
478 ath_print(common, ATH_DBG_CALIBRATE,
479 "Divide by 0:\na0_d0=%d\n"
480 "a0_d1=%d\na2_d0=%d\na1_d1=%d\n",
481 i2_p_q2_a0_d0, i2_p_q2_a0_d1,
482 i2_p_q2_a1_d0, i2_p_q2_a1_d1);
483 return false;
484 }
485
486 mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0;
487 phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0;
488
489 mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1;
490 phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1;
491
492 mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0;
493 phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0;
494
495 mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1;
496 phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1;
497
498 /* w/o analog phase shift */
499 sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT);
500 /* w/o analog phase shift */
501 cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT);
502 /* w/ analog phase shift */
503 sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT);
504 /* w/ analog phase shift */
505 cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT);
506
507 /*
508 * force sin^2 + cos^2 = 1;
509 * find magnitude by approximation
510 */
511 mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1);
512 mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
513
514 if ((mag1 == 0) || (mag2 == 0)) {
515 ath_print(common, ATH_DBG_CALIBRATE,
516 "Divide by 0: mag1=%d, mag2=%d\n",
517 mag1, mag2);
518 return false;
519 }
520
521 /* normalization sin and cos by mag */
522 sin_2phi_1 = (sin_2phi_1 * res_scale / mag1);
523 cos_2phi_1 = (cos_2phi_1 * res_scale / mag1);
524 sin_2phi_2 = (sin_2phi_2 * res_scale / mag2);
525 cos_2phi_2 = (cos_2phi_2 * res_scale / mag2);
526
527 /* calculate IQ mismatch */
528 if (!ar9003_hw_solve_iq_cal(ah,
529 sin_2phi_1, cos_2phi_1,
530 sin_2phi_2, cos_2phi_2,
531 mag_a0_d0, phs_a0_d0,
532 mag_a1_d0,
533 phs_a1_d0, solved_eq)) {
534 ath_print(common, ATH_DBG_CALIBRATE,
535 "Call to ar9003_hw_solve_iq_cal() failed.\n");
536 return false;
537 }
538
539 mag_tx = solved_eq[0];
540 phs_tx = solved_eq[1];
541 mag_rx = solved_eq[2];
542 phs_rx = solved_eq[3];
543
544 ath_print(common, ATH_DBG_CALIBRATE,
545 "chain %d: mag mismatch=%d phase mismatch=%d\n",
546 chain_idx, mag_tx/res_scale, phs_tx/res_scale);
547
548 if (res_scale == mag_tx) {
549 ath_print(common, ATH_DBG_CALIBRATE,
550 "Divide by 0: mag_tx=%d, res_scale=%d\n",
551 mag_tx, res_scale);
552 return false;
553 }
554
555 /* calculate and quantize Tx IQ correction factor */
556 mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx);
557 phs_corr_tx = -phs_tx;
558
559 q_q_coff = (mag_corr_tx * 128 / res_scale);
560 q_i_coff = (phs_corr_tx * 256 / res_scale);
561
562 ath_print(common, ATH_DBG_CALIBRATE,
563 "tx chain %d: mag corr=%d phase corr=%d\n",
564 chain_idx, q_q_coff, q_i_coff);
565
566 if (q_i_coff < -63)
567 q_i_coff = -63;
568 if (q_i_coff > 63)
569 q_i_coff = 63;
570 if (q_q_coff < -63)
571 q_q_coff = -63;
572 if (q_q_coff > 63)
573 q_q_coff = 63;
574
575 iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
576
577 ath_print(common, ATH_DBG_CALIBRATE,
578 "tx chain %d: iq corr coeff=%x\n",
579 chain_idx, iqc_coeff[0]);
580
581 if (-mag_rx == res_scale) {
582 ath_print(common, ATH_DBG_CALIBRATE,
583 "Divide by 0: mag_rx=%d, res_scale=%d\n",
584 mag_rx, res_scale);
585 return false;
586 }
587
588 /* calculate and quantize Rx IQ correction factors */
589 mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx);
590 phs_corr_rx = -phs_rx;
591
592 q_q_coff = (mag_corr_rx * 128 / res_scale);
593 q_i_coff = (phs_corr_rx * 256 / res_scale);
594
595 ath_print(common, ATH_DBG_CALIBRATE,
596 "rx chain %d: mag corr=%d phase corr=%d\n",
597 chain_idx, q_q_coff, q_i_coff);
598
599 if (q_i_coff < -63)
600 q_i_coff = -63;
601 if (q_i_coff > 63)
602 q_i_coff = 63;
603 if (q_q_coff < -63)
604 q_q_coff = -63;
605 if (q_q_coff > 63)
606 q_q_coff = 63;
607
608 iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
609
610 ath_print(common, ATH_DBG_CALIBRATE,
611 "rx chain %d: iq corr coeff=%x\n",
612 chain_idx, iqc_coeff[1]);
613
614 return true;
615}
616
617static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
618{
619 struct ath_common *common = ath9k_hw_common(ah);
620 const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
621 AR_PHY_TX_IQCAL_STATUS_B0,
622 AR_PHY_TX_IQCAL_STATUS_B1,
623 AR_PHY_TX_IQCAL_STATUS_B2,
624 };
625 const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = {
626 AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
627 AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
628 AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
629 };
630 const u32 rx_corr[AR9300_MAX_CHAINS] = {
631 AR_PHY_RX_IQCAL_CORR_B0,
632 AR_PHY_RX_IQCAL_CORR_B1,
633 AR_PHY_RX_IQCAL_CORR_B2,
634 };
635 const u_int32_t chan_info_tab[] = {
636 AR_PHY_CHAN_INFO_TAB_0,
637 AR_PHY_CHAN_INFO_TAB_1,
638 AR_PHY_CHAN_INFO_TAB_2,
639 };
640 s32 iq_res[6];
641 s32 iqc_coeff[2];
642 s32 i, j;
643 u32 num_chains = 0;
644
645 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
646 if (ah->txchainmask & (1 << i))
647 num_chains++;
648 }
649
650 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
651 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
652 DELPT);
653 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
654 AR_PHY_TX_IQCAL_START_DO_CAL,
655 AR_PHY_TX_IQCAL_START_DO_CAL);
656
657 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
658 AR_PHY_TX_IQCAL_START_DO_CAL,
659 0, AH_WAIT_TIMEOUT)) {
660 ath_print(common, ATH_DBG_CALIBRATE,
661 "Tx IQ Cal not complete.\n");
662 goto TX_IQ_CAL_FAILED;
663 }
664
665 for (i = 0; i < num_chains; i++) {
666 ath_print(common, ATH_DBG_CALIBRATE,
667 "Doing Tx IQ Cal for chain %d.\n", i);
668
669 if (REG_READ(ah, txiqcal_status[i]) &
670 AR_PHY_TX_IQCAL_STATUS_FAILED) {
671 ath_print(common, ATH_DBG_CALIBRATE,
672 "Tx IQ Cal failed for chain %d.\n", i);
673 goto TX_IQ_CAL_FAILED;
674 }
675
676 for (j = 0; j < 3; j++) {
677 u_int8_t idx = 2 * j,
678 offset = 4 * j;
679
680 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
681 AR_PHY_CHAN_INFO_TAB_S2_READ, 0);
682
683 /* 32 bits */
684 iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
685
686 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
687 AR_PHY_CHAN_INFO_TAB_S2_READ, 1);
688
689 /* 16 bits */
690 iq_res[idx+1] = 0xffff & REG_READ(ah,
691 chan_info_tab[i] +
692 offset);
693
694 ath_print(common, ATH_DBG_CALIBRATE,
695 "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
696 idx, iq_res[idx], idx+1, iq_res[idx+1]);
697 }
698
699 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
700 ath_print(common, ATH_DBG_CALIBRATE,
701 "Failed in calculation of IQ correction.\n");
702 goto TX_IQ_CAL_FAILED;
703 }
704
705 ath_print(common, ATH_DBG_CALIBRATE,
706 "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
707 iqc_coeff[0], iqc_coeff[1]);
708
709 REG_RMW_FIELD(ah, tx_corr_coeff[i],
710 AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
711 iqc_coeff[0]);
712 REG_RMW_FIELD(ah, rx_corr[i],
713 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
714 iqc_coeff[1] >> 7);
715 REG_RMW_FIELD(ah, rx_corr[i],
716 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
717 iqc_coeff[1]);
718 }
719
720 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
721 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
722 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
723 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
724
725 return;
726
727TX_IQ_CAL_FAILED:
728 ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
729}
730
731static bool ar9003_hw_init_cal(struct ath_hw *ah,
732 struct ath9k_channel *chan)
733{
734 struct ath_common *common = ath9k_hw_common(ah);
735
736 /*
737 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before
738 * running AGC/TxIQ cals
739 */
740 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
741
742 /* Calibrate the AGC */
743 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
744 REG_READ(ah, AR_PHY_AGC_CONTROL) |
745 AR_PHY_AGC_CONTROL_CAL);
746
747 /* Poll for offset calibration complete */
748 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
749 0, AH_WAIT_TIMEOUT)) {
750 ath_print(common, ATH_DBG_CALIBRATE,
751 "offset calibration failed to "
752 "complete in 1ms; noisy environment?\n");
753 return false;
754 }
755
756 /* Do Tx IQ Calibration */
757 if (ah->config.tx_iq_calibration)
758 ar9003_hw_tx_iq_cal(ah);
759
760 /* Revert chainmasks to their original values before NF cal */
761 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
762
763 /* Initialize list pointers */
764 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
765
766 if (ar9003_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
767 INIT_CAL(&ah->iq_caldata);
768 INSERT_CAL(ah, &ah->iq_caldata);
769 ath_print(common, ATH_DBG_CALIBRATE,
770 "enabling IQ Calibration.\n");
771 }
772
773 if (ar9003_hw_iscal_supported(ah, TEMP_COMP_CAL)) {
774 INIT_CAL(&ah->tempCompCalData);
775 INSERT_CAL(ah, &ah->tempCompCalData);
776 ath_print(common, ATH_DBG_CALIBRATE,
777 "enabling Temperature Compensation Calibration.\n");
778 }
779
780 /* Initialize current pointer to first element in list */
781 ah->cal_list_curr = ah->cal_list;
782
783 if (ah->cal_list_curr)
784 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
785
786 chan->CalValid = 0;
787
788 return true;
789}
790
791void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
792{
793 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
794 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
795
796 priv_ops->init_cal_settings = ar9003_hw_init_cal_settings;
797 priv_ops->init_cal = ar9003_hw_init_cal;
798 priv_ops->setup_calibration = ar9003_hw_setup_calibration;
799 priv_ops->iscal_supported = ar9003_hw_iscal_supported;
800
801 ops->calibrate = ar9003_hw_calibrate;
802}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
new file mode 100644
index 000000000000..23eb60ea5455
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -0,0 +1,1838 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_phy.h"
19#include "ar9003_eeprom.h"
20
21#define COMP_HDR_LEN 4
22#define COMP_CKSUM_LEN 2
23
24#define AR_CH0_TOP (0x00016288)
25#define AR_CH0_TOP_XPABIASLVL (0x3)
26#define AR_CH0_TOP_XPABIASLVL_S (8)
27
28#define AR_CH0_THERM (0x00016290)
29#define AR_CH0_THERM_SPARE (0x3f)
30#define AR_CH0_THERM_SPARE_S (0)
31
32#define AR_SWITCH_TABLE_COM_ALL (0xffff)
33#define AR_SWITCH_TABLE_COM_ALL_S (0)
34
35#define AR_SWITCH_TABLE_COM2_ALL (0xffffff)
36#define AR_SWITCH_TABLE_COM2_ALL_S (0)
37
38#define AR_SWITCH_TABLE_ALL (0xfff)
39#define AR_SWITCH_TABLE_ALL_S (0)
40
41#define LE16(x) __constant_cpu_to_le16(x)
42#define LE32(x) __constant_cpu_to_le32(x)
43
44static const struct ar9300_eeprom ar9300_default = {
45 .eepromVersion = 2,
46 .templateVersion = 2,
47 .macAddr = {1, 2, 3, 4, 5, 6},
48 .custData = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
49 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
50 .baseEepHeader = {
51 .regDmn = { LE16(0), LE16(0x1f) },
52 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
53 .opCapFlags = {
54 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A,
55 .eepMisc = 0,
56 },
57 .rfSilent = 0,
58 .blueToothOptions = 0,
59 .deviceCap = 0,
60 .deviceType = 5, /* takes lower byte in eeprom location */
61 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
62 .params_for_tuning_caps = {0, 0},
63 .featureEnable = 0x0c,
64 /*
65 * bit0 - enable tx temp comp - disabled
66 * bit1 - enable tx volt comp - disabled
67 * bit2 - enable fastClock - enabled
68 * bit3 - enable doubling - enabled
69 * bit4 - enable internal regulator - disabled
70 */
71 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
72 .eepromWriteEnableGpio = 3,
73 .wlanDisableGpio = 0,
74 .wlanLedGpio = 8,
75 .rxBandSelectGpio = 0xff,
76 .txrxgain = 0,
77 .swreg = 0,
78 },
79 .modalHeader2G = {
80 /* ar9300_modal_eep_header 2g */
81 /* 4 idle,t1,t2,b(4 bits per setting) */
82 .antCtrlCommon = LE32(0x110),
83 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
84 .antCtrlCommon2 = LE32(0x22222),
85
86 /*
87 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
88 * rx1, rx12, b (2 bits each)
89 */
90 .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
91
92 /*
93 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
94 * for ar9280 (0xa20c/b20c 5:0)
95 */
96 .xatten1DB = {0, 0, 0},
97
98 /*
99 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
100 * for ar9280 (0xa20c/b20c 16:12
101 */
102 .xatten1Margin = {0, 0, 0},
103 .tempSlope = 36,
104 .voltSlope = 0,
105
106 /*
107 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
108 * channels in usual fbin coding format
109 */
110 .spurChans = {0, 0, 0, 0, 0},
111
112 /*
113 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
114 * if the register is per chain
115 */
116 .noiseFloorThreshCh = {-1, 0, 0},
117 .ob = {1, 1, 1},/* 3 chain */
118 .db_stage2 = {1, 1, 1}, /* 3 chain */
119 .db_stage3 = {0, 0, 0},
120 .db_stage4 = {0, 0, 0},
121 .xpaBiasLvl = 0,
122 .txFrameToDataStart = 0x0e,
123 .txFrameToPaOn = 0x0e,
124 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
125 .antennaGain = 0,
126 .switchSettling = 0x2c,
127 .adcDesiredSize = -30,
128 .txEndToXpaOff = 0,
129 .txEndToRxOn = 0x2,
130 .txFrameToXpaOn = 0xe,
131 .thresh62 = 28,
132 .futureModal = { /* [32] */
133 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
134 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
135 },
136 },
137 .calFreqPier2G = {
138 FREQ2FBIN(2412, 1),
139 FREQ2FBIN(2437, 1),
140 FREQ2FBIN(2472, 1),
141 },
142 /* ar9300_cal_data_per_freq_op_loop 2g */
143 .calPierData2G = {
144 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
145 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
146 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
147 },
148 .calTarget_freqbin_Cck = {
149 FREQ2FBIN(2412, 1),
150 FREQ2FBIN(2484, 1),
151 },
152 .calTarget_freqbin_2G = {
153 FREQ2FBIN(2412, 1),
154 FREQ2FBIN(2437, 1),
155 FREQ2FBIN(2472, 1)
156 },
157 .calTarget_freqbin_2GHT20 = {
158 FREQ2FBIN(2412, 1),
159 FREQ2FBIN(2437, 1),
160 FREQ2FBIN(2472, 1)
161 },
162 .calTarget_freqbin_2GHT40 = {
163 FREQ2FBIN(2412, 1),
164 FREQ2FBIN(2437, 1),
165 FREQ2FBIN(2472, 1)
166 },
167 .calTargetPowerCck = {
168 /* 1L-5L,5S,11L,11S */
169 { {36, 36, 36, 36} },
170 { {36, 36, 36, 36} },
171 },
172 .calTargetPower2G = {
173 /* 6-24,36,48,54 */
174 { {32, 32, 28, 24} },
175 { {32, 32, 28, 24} },
176 { {32, 32, 28, 24} },
177 },
178 .calTargetPower2GHT20 = {
179 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
180 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
181 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
182 },
183 .calTargetPower2GHT40 = {
184 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
185 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
186 { {32, 32, 32, 32, 28, 20, 32, 32, 28, 20, 32, 32, 28, 20} },
187 },
188 .ctlIndex_2G = {
189 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
190 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
191 },
192 .ctl_freqbin_2G = {
193 {
194 FREQ2FBIN(2412, 1),
195 FREQ2FBIN(2417, 1),
196 FREQ2FBIN(2457, 1),
197 FREQ2FBIN(2462, 1)
198 },
199 {
200 FREQ2FBIN(2412, 1),
201 FREQ2FBIN(2417, 1),
202 FREQ2FBIN(2462, 1),
203 0xFF,
204 },
205
206 {
207 FREQ2FBIN(2412, 1),
208 FREQ2FBIN(2417, 1),
209 FREQ2FBIN(2462, 1),
210 0xFF,
211 },
212 {
213 FREQ2FBIN(2422, 1),
214 FREQ2FBIN(2427, 1),
215 FREQ2FBIN(2447, 1),
216 FREQ2FBIN(2452, 1)
217 },
218
219 {
220 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
221 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
222 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
223 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
224 },
225
226 {
227 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
228 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
229 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
230 0,
231 },
232
233 {
234 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
235 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
236 FREQ2FBIN(2472, 1),
237 0,
238 },
239
240 {
241 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
242 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
243 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
244 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
245 },
246
247 {
248 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
249 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
250 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
251 },
252
253 {
254 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
255 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
256 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
257 0
258 },
259
260 {
261 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
262 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
263 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
264 0
265 },
266
267 {
268 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
269 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
270 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
271 /* Data[11].ctlEdges[3].bChannel */
272 FREQ2FBIN(2462, 1),
273 }
274 },
275 .ctlPowerData_2G = {
276 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
277 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
278 { { {60, 1}, {60, 0}, {60, 0}, {60, 1} } },
279
280 { { {60, 1}, {60, 0}, {0, 0}, {0, 0} } },
281 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
282 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
283
284 { { {60, 0}, {60, 1}, {60, 1}, {60, 0} } },
285 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
286 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
287
288 { { {60, 0}, {60, 1}, {60, 0}, {60, 0} } },
289 { { {60, 0}, {60, 1}, {60, 1}, {60, 1} } },
290 },
291 .modalHeader5G = {
292 /* 4 idle,t1,t2,b (4 bits per setting) */
293 .antCtrlCommon = LE32(0x110),
294 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
295 .antCtrlCommon2 = LE32(0x22222),
296 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
297 .antCtrlChain = {
298 LE16(0x000), LE16(0x000), LE16(0x000),
299 },
300 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
301 .xatten1DB = {0, 0, 0},
302
303 /*
304 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
305 * for merlin (0xa20c/b20c 16:12
306 */
307 .xatten1Margin = {0, 0, 0},
308 .tempSlope = 68,
309 .voltSlope = 0,
310 /* spurChans spur channels in usual fbin coding format */
311 .spurChans = {0, 0, 0, 0, 0},
312 /* noiseFloorThreshCh Check if the register is per chain */
313 .noiseFloorThreshCh = {-1, 0, 0},
314 .ob = {3, 3, 3}, /* 3 chain */
315 .db_stage2 = {3, 3, 3}, /* 3 chain */
316 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
317 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
318 .xpaBiasLvl = 0,
319 .txFrameToDataStart = 0x0e,
320 .txFrameToPaOn = 0x0e,
321 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
322 .antennaGain = 0,
323 .switchSettling = 0x2d,
324 .adcDesiredSize = -30,
325 .txEndToXpaOff = 0,
326 .txEndToRxOn = 0x2,
327 .txFrameToXpaOn = 0xe,
328 .thresh62 = 28,
329 .futureModal = {
330 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
331 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
332 },
333 },
334 .calFreqPier5G = {
335 FREQ2FBIN(5180, 0),
336 FREQ2FBIN(5220, 0),
337 FREQ2FBIN(5320, 0),
338 FREQ2FBIN(5400, 0),
339 FREQ2FBIN(5500, 0),
340 FREQ2FBIN(5600, 0),
341 FREQ2FBIN(5725, 0),
342 FREQ2FBIN(5825, 0)
343 },
344 .calPierData5G = {
345 {
346 {0, 0, 0, 0, 0},
347 {0, 0, 0, 0, 0},
348 {0, 0, 0, 0, 0},
349 {0, 0, 0, 0, 0},
350 {0, 0, 0, 0, 0},
351 {0, 0, 0, 0, 0},
352 {0, 0, 0, 0, 0},
353 {0, 0, 0, 0, 0},
354 },
355 {
356 {0, 0, 0, 0, 0},
357 {0, 0, 0, 0, 0},
358 {0, 0, 0, 0, 0},
359 {0, 0, 0, 0, 0},
360 {0, 0, 0, 0, 0},
361 {0, 0, 0, 0, 0},
362 {0, 0, 0, 0, 0},
363 {0, 0, 0, 0, 0},
364 },
365 {
366 {0, 0, 0, 0, 0},
367 {0, 0, 0, 0, 0},
368 {0, 0, 0, 0, 0},
369 {0, 0, 0, 0, 0},
370 {0, 0, 0, 0, 0},
371 {0, 0, 0, 0, 0},
372 {0, 0, 0, 0, 0},
373 {0, 0, 0, 0, 0},
374 },
375
376 },
377 .calTarget_freqbin_5G = {
378 FREQ2FBIN(5180, 0),
379 FREQ2FBIN(5220, 0),
380 FREQ2FBIN(5320, 0),
381 FREQ2FBIN(5400, 0),
382 FREQ2FBIN(5500, 0),
383 FREQ2FBIN(5600, 0),
384 FREQ2FBIN(5725, 0),
385 FREQ2FBIN(5825, 0)
386 },
387 .calTarget_freqbin_5GHT20 = {
388 FREQ2FBIN(5180, 0),
389 FREQ2FBIN(5240, 0),
390 FREQ2FBIN(5320, 0),
391 FREQ2FBIN(5500, 0),
392 FREQ2FBIN(5700, 0),
393 FREQ2FBIN(5745, 0),
394 FREQ2FBIN(5725, 0),
395 FREQ2FBIN(5825, 0)
396 },
397 .calTarget_freqbin_5GHT40 = {
398 FREQ2FBIN(5180, 0),
399 FREQ2FBIN(5240, 0),
400 FREQ2FBIN(5320, 0),
401 FREQ2FBIN(5500, 0),
402 FREQ2FBIN(5700, 0),
403 FREQ2FBIN(5745, 0),
404 FREQ2FBIN(5725, 0),
405 FREQ2FBIN(5825, 0)
406 },
407 .calTargetPower5G = {
408 /* 6-24,36,48,54 */
409 { {20, 20, 20, 10} },
410 { {20, 20, 20, 10} },
411 { {20, 20, 20, 10} },
412 { {20, 20, 20, 10} },
413 { {20, 20, 20, 10} },
414 { {20, 20, 20, 10} },
415 { {20, 20, 20, 10} },
416 { {20, 20, 20, 10} },
417 },
418 .calTargetPower5GHT20 = {
419 /*
420 * 0_8_16,1-3_9-11_17-19,
421 * 4,5,6,7,12,13,14,15,20,21,22,23
422 */
423 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
424 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
425 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
426 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
427 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
428 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
429 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
430 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
431 },
432 .calTargetPower5GHT40 = {
433 /*
434 * 0_8_16,1-3_9-11_17-19,
435 * 4,5,6,7,12,13,14,15,20,21,22,23
436 */
437 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
438 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
439 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
440 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
441 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
442 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
443 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
444 { {20, 20, 10, 10, 0, 0, 10, 10, 0, 0, 10, 10, 0, 0} },
445 },
446 .ctlIndex_5G = {
447 0x10, 0x16, 0x18, 0x40, 0x46,
448 0x48, 0x30, 0x36, 0x38
449 },
450 .ctl_freqbin_5G = {
451 {
452 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
453 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
454 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
455 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
456 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
457 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
458 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
459 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
460 },
461 {
462 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
463 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
464 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
465 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
466 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
467 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
468 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
469 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
470 },
471
472 {
473 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
474 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
475 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
476 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
477 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
478 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
479 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
480 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
481 },
482
483 {
484 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
485 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
486 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
487 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
488 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
489 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
490 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
491 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
492 },
493
494 {
495 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
496 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
497 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
498 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
499 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
500 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
501 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
502 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
503 },
504
505 {
506 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
507 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
508 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
509 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
510 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
511 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
512 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
513 /* Data[5].ctlEdges[7].bChannel */ 0xFF
514 },
515
516 {
517 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
518 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
519 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
520 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
521 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
522 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
523 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
524 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
525 },
526
527 {
528 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
529 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
530 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
531 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
532 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
533 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
534 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
535 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
536 },
537
538 {
539 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
540 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
541 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
542 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
543 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
544 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
545 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
546 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
547 }
548 },
549 .ctlPowerData_5G = {
550 {
551 {
552 {60, 1}, {60, 1}, {60, 1}, {60, 1},
553 {60, 1}, {60, 1}, {60, 1}, {60, 0},
554 }
555 },
556 {
557 {
558 {60, 1}, {60, 1}, {60, 1}, {60, 1},
559 {60, 1}, {60, 1}, {60, 1}, {60, 0},
560 }
561 },
562 {
563 {
564 {60, 0}, {60, 1}, {60, 0}, {60, 1},
565 {60, 1}, {60, 1}, {60, 1}, {60, 1},
566 }
567 },
568 {
569 {
570 {60, 0}, {60, 1}, {60, 1}, {60, 0},
571 {60, 1}, {60, 0}, {60, 0}, {60, 0},
572 }
573 },
574 {
575 {
576 {60, 1}, {60, 1}, {60, 1}, {60, 0},
577 {60, 0}, {60, 0}, {60, 0}, {60, 0},
578 }
579 },
580 {
581 {
582 {60, 1}, {60, 1}, {60, 1}, {60, 1},
583 {60, 1}, {60, 0}, {60, 0}, {60, 0},
584 }
585 },
586 {
587 {
588 {60, 1}, {60, 1}, {60, 1}, {60, 1},
589 {60, 1}, {60, 1}, {60, 1}, {60, 1},
590 }
591 },
592 {
593 {
594 {60, 1}, {60, 1}, {60, 0}, {60, 1},
595 {60, 1}, {60, 1}, {60, 1}, {60, 0},
596 }
597 },
598 {
599 {
600 {60, 1}, {60, 0}, {60, 1}, {60, 1},
601 {60, 1}, {60, 1}, {60, 0}, {60, 1},
602 }
603 },
604 }
605};
606
607static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
608{
609 return 0;
610}
611
612static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
613 enum eeprom_param param)
614{
615 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
616 struct ar9300_base_eep_hdr *pBase = &eep->baseEepHeader;
617
618 switch (param) {
619 case EEP_MAC_LSW:
620 return eep->macAddr[0] << 8 | eep->macAddr[1];
621 case EEP_MAC_MID:
622 return eep->macAddr[2] << 8 | eep->macAddr[3];
623 case EEP_MAC_MSW:
624 return eep->macAddr[4] << 8 | eep->macAddr[5];
625 case EEP_REG_0:
626 return le16_to_cpu(pBase->regDmn[0]);
627 case EEP_REG_1:
628 return le16_to_cpu(pBase->regDmn[1]);
629 case EEP_OP_CAP:
630 return pBase->deviceCap;
631 case EEP_OP_MODE:
632 return pBase->opCapFlags.opFlags;
633 case EEP_RF_SILENT:
634 return pBase->rfSilent;
635 case EEP_TX_MASK:
636 return (pBase->txrxMask >> 4) & 0xf;
637 case EEP_RX_MASK:
638 return pBase->txrxMask & 0xf;
639 case EEP_DRIVE_STRENGTH:
640#define AR9300_EEP_BASE_DRIV_STRENGTH 0x1
641 return pBase->miscConfiguration & AR9300_EEP_BASE_DRIV_STRENGTH;
642 case EEP_INTERNAL_REGULATOR:
643 /* Bit 4 is internal regulator flag */
644 return (pBase->featureEnable & 0x10) >> 4;
645 case EEP_SWREG:
646 return le32_to_cpu(pBase->swreg);
647 default:
648 return 0;
649 }
650}
651
652static bool ar9300_eeprom_read_byte(struct ath_common *common, int address,
653 u8 *buffer)
654{
655 u16 val;
656
657 if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
658 return false;
659
660 *buffer = (val >> (8 * (address % 2))) & 0xff;
661 return true;
662}
663
664static bool ar9300_eeprom_read_word(struct ath_common *common, int address,
665 u8 *buffer)
666{
667 u16 val;
668
669 if (unlikely(!ath9k_hw_nvram_read(common, address / 2, &val)))
670 return false;
671
672 buffer[0] = val >> 8;
673 buffer[1] = val & 0xff;
674
675 return true;
676}
677
678static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
679 int count)
680{
681 struct ath_common *common = ath9k_hw_common(ah);
682 int i;
683
684 if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
685 ath_print(common, ATH_DBG_EEPROM,
686 "eeprom address not in range\n");
687 return false;
688 }
689
690 /*
691 * Since we're reading the bytes in reverse order from a little-endian
692 * word stream, an even address means we only use the lower half of
693 * the 16-bit word at that address
694 */
695 if (address % 2 == 0) {
696 if (!ar9300_eeprom_read_byte(common, address--, buffer++))
697 goto error;
698
699 count--;
700 }
701
702 for (i = 0; i < count / 2; i++) {
703 if (!ar9300_eeprom_read_word(common, address, buffer))
704 goto error;
705
706 address -= 2;
707 buffer += 2;
708 }
709
710 if (count % 2)
711 if (!ar9300_eeprom_read_byte(common, address, buffer))
712 goto error;
713
714 return true;
715
716error:
717 ath_print(common, ATH_DBG_EEPROM,
718 "unable to read eeprom region at offset %d\n", address);
719 return false;
720}
721
722static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
723 int *length, int *major, int *minor)
724{
725 unsigned long value[4];
726
727 value[0] = best[0];
728 value[1] = best[1];
729 value[2] = best[2];
730 value[3] = best[3];
731 *code = ((value[0] >> 5) & 0x0007);
732 *reference = (value[0] & 0x001f) | ((value[1] >> 2) & 0x0020);
733 *length = ((value[1] << 4) & 0x07f0) | ((value[2] >> 4) & 0x000f);
734 *major = (value[2] & 0x000f);
735 *minor = (value[3] & 0x00ff);
736}
737
738static u16 ar9300_comp_cksum(u8 *data, int dsize)
739{
740 int it, checksum = 0;
741
742 for (it = 0; it < dsize; it++) {
743 checksum += data[it];
744 checksum &= 0xffff;
745 }
746
747 return checksum;
748}
749
750static bool ar9300_uncompress_block(struct ath_hw *ah,
751 u8 *mptr,
752 int mdataSize,
753 u8 *block,
754 int size)
755{
756 int it;
757 int spot;
758 int offset;
759 int length;
760 struct ath_common *common = ath9k_hw_common(ah);
761
762 spot = 0;
763
764 for (it = 0; it < size; it += (length+2)) {
765 offset = block[it];
766 offset &= 0xff;
767 spot += offset;
768 length = block[it+1];
769 length &= 0xff;
770
771 if (length > 0 && spot >= 0 && spot+length < mdataSize) {
772 ath_print(common, ATH_DBG_EEPROM,
773 "Restore at %d: spot=%d "
774 "offset=%d length=%d\n",
775 it, spot, offset, length);
776 memcpy(&mptr[spot], &block[it+2], length);
777 spot += length;
778 } else if (length > 0) {
779 ath_print(common, ATH_DBG_EEPROM,
780 "Bad restore at %d: spot=%d "
781 "offset=%d length=%d\n",
782 it, spot, offset, length);
783 return false;
784 }
785 }
786 return true;
787}
788
789static int ar9300_compress_decision(struct ath_hw *ah,
790 int it,
791 int code,
792 int reference,
793 u8 *mptr,
794 u8 *word, int length, int mdata_size)
795{
796 struct ath_common *common = ath9k_hw_common(ah);
797 u8 *dptr;
798
799 switch (code) {
800 case _CompressNone:
801 if (length != mdata_size) {
802 ath_print(common, ATH_DBG_EEPROM,
803 "EEPROM structure size mismatch"
804 "memory=%d eeprom=%d\n", mdata_size, length);
805 return -1;
806 }
807 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
808 ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:"
809 " uncompressed, length %d\n", it, length);
810 break;
811 case _CompressBlock:
812 if (reference == 0) {
813 dptr = mptr;
814 } else {
815 if (reference != 2) {
816 ath_print(common, ATH_DBG_EEPROM,
817 "cant find reference eeprom"
818 "struct %d\n", reference);
819 return -1;
820 }
821 memcpy(mptr, &ar9300_default, mdata_size);
822 }
823 ath_print(common, ATH_DBG_EEPROM,
824 "restore eeprom %d: block, reference %d,"
825 " length %d\n", it, reference, length);
826 ar9300_uncompress_block(ah, mptr, mdata_size,
827 (u8 *) (word + COMP_HDR_LEN), length);
828 break;
829 default:
830 ath_print(common, ATH_DBG_EEPROM, "unknown compression"
831 " code %d\n", code);
832 return -1;
833 }
834 return 0;
835}
836
837/*
838 * Read the configuration data from the eeprom.
839 * The data can be put in any specified memory buffer.
840 *
841 * Returns -1 on error.
842 * Returns address of next memory location on success.
843 */
844static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
845 u8 *mptr, int mdata_size)
846{
847#define MDEFAULT 15
848#define MSTATE 100
849 int cptr;
850 u8 *word;
851 int code;
852 int reference, length, major, minor;
853 int osize;
854 int it;
855 u16 checksum, mchecksum;
856 struct ath_common *common = ath9k_hw_common(ah);
857
858 word = kzalloc(2048, GFP_KERNEL);
859 if (!word)
860 return -1;
861
862 memcpy(mptr, &ar9300_default, mdata_size);
863
864 cptr = AR9300_BASE_ADDR;
865 for (it = 0; it < MSTATE; it++) {
866 if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN))
867 goto fail;
868
869 if ((word[0] == 0 && word[1] == 0 && word[2] == 0 &&
870 word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
871 && word[2] == 0xff && word[3] == 0xff))
872 break;
873
874 ar9300_comp_hdr_unpack(word, &code, &reference,
875 &length, &major, &minor);
876 ath_print(common, ATH_DBG_EEPROM,
877 "Found block at %x: code=%d ref=%d"
878 "length=%d major=%d minor=%d\n", cptr, code,
879 reference, length, major, minor);
880 if (length >= 1024) {
881 ath_print(common, ATH_DBG_EEPROM,
882 "Skipping bad header\n");
883 cptr -= COMP_HDR_LEN;
884 continue;
885 }
886
887 osize = length;
888 ar9300_read_eeprom(ah, cptr, word,
889 COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
890 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
891 mchecksum = word[COMP_HDR_LEN + osize] |
892 (word[COMP_HDR_LEN + osize + 1] << 8);
893 ath_print(common, ATH_DBG_EEPROM,
894 "checksum %x %x\n", checksum, mchecksum);
895 if (checksum == mchecksum) {
896 ar9300_compress_decision(ah, it, code, reference, mptr,
897 word, length, mdata_size);
898 } else {
899 ath_print(common, ATH_DBG_EEPROM,
900 "skipping block with bad checksum\n");
901 }
902 cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
903 }
904
905 kfree(word);
906 return cptr;
907
908fail:
909 kfree(word);
910 return -1;
911}
912
913/*
914 * Restore the configuration structure by reading the eeprom.
915 * This function destroys any existing in-memory structure
916 * content.
917 */
918static bool ath9k_hw_ar9300_fill_eeprom(struct ath_hw *ah)
919{
920 u8 *mptr = (u8 *) &ah->eeprom.ar9300_eep;
921
922 if (ar9300_eeprom_restore_internal(ah, mptr,
923 sizeof(struct ar9300_eeprom)) < 0)
924 return false;
925
926 return true;
927}
928
929/* XXX: review hardware docs */
930static int ath9k_hw_ar9300_get_eeprom_ver(struct ath_hw *ah)
931{
932 return ah->eeprom.ar9300_eep.eepromVersion;
933}
934
935/* XXX: could be read from the eepromVersion, not sure yet */
936static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
937{
938 return 0;
939}
940
941static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
942 enum ieee80211_band freq_band)
943{
944 return 1;
945}
946
947static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
948 struct ath9k_channel *chan)
949{
950 return -EINVAL;
951}
952
953static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
954{
955 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
956
957 if (is2ghz)
958 return eep->modalHeader2G.xpaBiasLvl;
959 else
960 return eep->modalHeader5G.xpaBiasLvl;
961}
962
963static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
964{
965 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
966 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3));
967 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE,
968 ((bias >> 2) & 0x3));
969}
970
971static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
972{
973 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
974 __le32 val;
975
976 if (is2ghz)
977 val = eep->modalHeader2G.antCtrlCommon;
978 else
979 val = eep->modalHeader5G.antCtrlCommon;
980 return le32_to_cpu(val);
981}
982
983static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
984{
985 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
986 __le32 val;
987
988 if (is2ghz)
989 val = eep->modalHeader2G.antCtrlCommon2;
990 else
991 val = eep->modalHeader5G.antCtrlCommon2;
992 return le32_to_cpu(val);
993}
994
995static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah,
996 int chain,
997 bool is2ghz)
998{
999 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1000 __le16 val = 0;
1001
1002 if (chain >= 0 && chain < AR9300_MAX_CHAINS) {
1003 if (is2ghz)
1004 val = eep->modalHeader2G.antCtrlChain[chain];
1005 else
1006 val = eep->modalHeader5G.antCtrlChain[chain];
1007 }
1008
1009 return le16_to_cpu(val);
1010}
1011
1012static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
1013{
1014 u32 value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
1015 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM, AR_SWITCH_TABLE_COM_ALL, value);
1016
1017 value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
1018 REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
1019
1020 value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
1021 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
1022
1023 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
1024 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value);
1025
1026 value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
1027 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value);
1028}
1029
1030static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
1031{
1032 int drive_strength;
1033 unsigned long reg;
1034
1035 drive_strength = ath9k_hw_ar9300_get_eeprom(ah, EEP_DRIVE_STRENGTH);
1036
1037 if (!drive_strength)
1038 return;
1039
1040 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS1);
1041 reg &= ~0x00ffffc0;
1042 reg |= 0x5 << 21;
1043 reg |= 0x5 << 18;
1044 reg |= 0x5 << 15;
1045 reg |= 0x5 << 12;
1046 reg |= 0x5 << 9;
1047 reg |= 0x5 << 6;
1048 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS1, reg);
1049
1050 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS2);
1051 reg &= ~0xffffffe0;
1052 reg |= 0x5 << 29;
1053 reg |= 0x5 << 26;
1054 reg |= 0x5 << 23;
1055 reg |= 0x5 << 20;
1056 reg |= 0x5 << 17;
1057 reg |= 0x5 << 14;
1058 reg |= 0x5 << 11;
1059 reg |= 0x5 << 8;
1060 reg |= 0x5 << 5;
1061 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS2, reg);
1062
1063 reg = REG_READ(ah, AR_PHY_65NM_CH0_BIAS4);
1064 reg &= ~0xff800000;
1065 reg |= 0x5 << 29;
1066 reg |= 0x5 << 26;
1067 reg |= 0x5 << 23;
1068 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
1069}
1070
1071static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
1072{
1073 int internal_regulator =
1074 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
1075
1076 if (internal_regulator) {
1077 /* Internal regulator is ON. Write swreg register. */
1078 int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
1079 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
1080 REG_READ(ah, AR_RTC_REG_CONTROL1) &
1081 (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
1082 REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
1083 /* Set REG_CONTROL1.SWREG_PROGRAM */
1084 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
1085 REG_READ(ah,
1086 AR_RTC_REG_CONTROL1) |
1087 AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
1088 } else {
1089 REG_WRITE(ah, AR_RTC_SLEEP_CLK,
1090 (REG_READ(ah,
1091 AR_RTC_SLEEP_CLK) |
1092 AR_RTC_FORCE_SWREG_PRD));
1093 }
1094}
1095
1096static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
1097 struct ath9k_channel *chan)
1098{
1099 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
1100 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
1101 ar9003_hw_drive_strength_apply(ah);
1102 ar9003_hw_internal_regulator_apply(ah);
1103}
1104
1105static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
1106 struct ath9k_channel *chan)
1107{
1108}
1109
1110/*
1111 * Returns the interpolated y value corresponding to the specified x value
1112 * from the np ordered pairs of data (px,py).
1113 * The pairs do not have to be in any order.
1114 * If the specified x value is less than any of the px,
1115 * the returned y value is equal to the py for the lowest px.
1116 * If the specified x value is greater than any of the px,
1117 * the returned y value is equal to the py for the highest px.
1118 */
1119static int ar9003_hw_power_interpolate(int32_t x,
1120 int32_t *px, int32_t *py, u_int16_t np)
1121{
1122 int ip = 0;
1123 int lx = 0, ly = 0, lhave = 0;
1124 int hx = 0, hy = 0, hhave = 0;
1125 int dx = 0;
1126 int y = 0;
1127
1128 lhave = 0;
1129 hhave = 0;
1130
1131 /* identify best lower and higher x calibration measurement */
1132 for (ip = 0; ip < np; ip++) {
1133 dx = x - px[ip];
1134
1135 /* this measurement is higher than our desired x */
1136 if (dx <= 0) {
1137 if (!hhave || dx > (x - hx)) {
1138 /* new best higher x measurement */
1139 hx = px[ip];
1140 hy = py[ip];
1141 hhave = 1;
1142 }
1143 }
1144 /* this measurement is lower than our desired x */
1145 if (dx >= 0) {
1146 if (!lhave || dx < (x - lx)) {
1147 /* new best lower x measurement */
1148 lx = px[ip];
1149 ly = py[ip];
1150 lhave = 1;
1151 }
1152 }
1153 }
1154
1155 /* the low x is good */
1156 if (lhave) {
1157 /* so is the high x */
1158 if (hhave) {
1159 /* they're the same, so just pick one */
1160 if (hx == lx)
1161 y = ly;
1162 else /* interpolate */
1163 y = ly + (((x - lx) * (hy - ly)) / (hx - lx));
1164 } else /* only low is good, use it */
1165 y = ly;
1166 } else if (hhave) /* only high is good, use it */
1167 y = hy;
1168 else /* nothing is good,this should never happen unless np=0, ???? */
1169 y = -(1 << 30);
1170 return y;
1171}
1172
1173static u8 ar9003_hw_eeprom_get_tgt_pwr(struct ath_hw *ah,
1174 u16 rateIndex, u16 freq, bool is2GHz)
1175{
1176 u16 numPiers, i;
1177 s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
1178 s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
1179 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1180 struct cal_tgt_pow_legacy *pEepromTargetPwr;
1181 u8 *pFreqBin;
1182
1183 if (is2GHz) {
1184 numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
1185 pEepromTargetPwr = eep->calTargetPower2G;
1186 pFreqBin = eep->calTarget_freqbin_2G;
1187 } else {
1188 numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
1189 pEepromTargetPwr = eep->calTargetPower5G;
1190 pFreqBin = eep->calTarget_freqbin_5G;
1191 }
1192
1193 /*
1194 * create array of channels and targetpower from
1195 * targetpower piers stored on eeprom
1196 */
1197 for (i = 0; i < numPiers; i++) {
1198 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1199 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1200 }
1201
1202 /* interpolate to get target power for given frequency */
1203 return (u8) ar9003_hw_power_interpolate((s32) freq,
1204 freqArray,
1205 targetPowerArray, numPiers);
1206}
1207
1208static u8 ar9003_hw_eeprom_get_ht20_tgt_pwr(struct ath_hw *ah,
1209 u16 rateIndex,
1210 u16 freq, bool is2GHz)
1211{
1212 u16 numPiers, i;
1213 s32 targetPowerArray[AR9300_NUM_5G_20_TARGET_POWERS];
1214 s32 freqArray[AR9300_NUM_5G_20_TARGET_POWERS];
1215 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1216 struct cal_tgt_pow_ht *pEepromTargetPwr;
1217 u8 *pFreqBin;
1218
1219 if (is2GHz) {
1220 numPiers = AR9300_NUM_2G_20_TARGET_POWERS;
1221 pEepromTargetPwr = eep->calTargetPower2GHT20;
1222 pFreqBin = eep->calTarget_freqbin_2GHT20;
1223 } else {
1224 numPiers = AR9300_NUM_5G_20_TARGET_POWERS;
1225 pEepromTargetPwr = eep->calTargetPower5GHT20;
1226 pFreqBin = eep->calTarget_freqbin_5GHT20;
1227 }
1228
1229 /*
1230 * create array of channels and targetpower
1231 * from targetpower piers stored on eeprom
1232 */
1233 for (i = 0; i < numPiers; i++) {
1234 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1235 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1236 }
1237
1238 /* interpolate to get target power for given frequency */
1239 return (u8) ar9003_hw_power_interpolate((s32) freq,
1240 freqArray,
1241 targetPowerArray, numPiers);
1242}
1243
1244static u8 ar9003_hw_eeprom_get_ht40_tgt_pwr(struct ath_hw *ah,
1245 u16 rateIndex,
1246 u16 freq, bool is2GHz)
1247{
1248 u16 numPiers, i;
1249 s32 targetPowerArray[AR9300_NUM_5G_40_TARGET_POWERS];
1250 s32 freqArray[AR9300_NUM_5G_40_TARGET_POWERS];
1251 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1252 struct cal_tgt_pow_ht *pEepromTargetPwr;
1253 u8 *pFreqBin;
1254
1255 if (is2GHz) {
1256 numPiers = AR9300_NUM_2G_40_TARGET_POWERS;
1257 pEepromTargetPwr = eep->calTargetPower2GHT40;
1258 pFreqBin = eep->calTarget_freqbin_2GHT40;
1259 } else {
1260 numPiers = AR9300_NUM_5G_40_TARGET_POWERS;
1261 pEepromTargetPwr = eep->calTargetPower5GHT40;
1262 pFreqBin = eep->calTarget_freqbin_5GHT40;
1263 }
1264
1265 /*
1266 * create array of channels and targetpower from
1267 * targetpower piers stored on eeprom
1268 */
1269 for (i = 0; i < numPiers; i++) {
1270 freqArray[i] = FBIN2FREQ(pFreqBin[i], is2GHz);
1271 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1272 }
1273
1274 /* interpolate to get target power for given frequency */
1275 return (u8) ar9003_hw_power_interpolate((s32) freq,
1276 freqArray,
1277 targetPowerArray, numPiers);
1278}
1279
1280static u8 ar9003_hw_eeprom_get_cck_tgt_pwr(struct ath_hw *ah,
1281 u16 rateIndex, u16 freq)
1282{
1283 u16 numPiers = AR9300_NUM_2G_CCK_TARGET_POWERS, i;
1284 s32 targetPowerArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
1285 s32 freqArray[AR9300_NUM_2G_CCK_TARGET_POWERS];
1286 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1287 struct cal_tgt_pow_legacy *pEepromTargetPwr = eep->calTargetPowerCck;
1288 u8 *pFreqBin = eep->calTarget_freqbin_Cck;
1289
1290 /*
1291 * create array of channels and targetpower from
1292 * targetpower piers stored on eeprom
1293 */
1294 for (i = 0; i < numPiers; i++) {
1295 freqArray[i] = FBIN2FREQ(pFreqBin[i], 1);
1296 targetPowerArray[i] = pEepromTargetPwr[i].tPow2x[rateIndex];
1297 }
1298
1299 /* interpolate to get target power for given frequency */
1300 return (u8) ar9003_hw_power_interpolate((s32) freq,
1301 freqArray,
1302 targetPowerArray, numPiers);
1303}
1304
1305/* Set tx power registers to array of values passed in */
1306static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
1307{
1308#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
1309 /* make sure forced gain is not set */
1310 REG_WRITE(ah, 0xa458, 0);
1311
1312 /* Write the OFDM power per rate set */
1313
1314 /* 6 (LSB), 9, 12, 18 (MSB) */
1315 REG_WRITE(ah, 0xa3c0,
1316 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 24) |
1317 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 16) |
1318 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 8) |
1319 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
1320
1321 /* 24 (LSB), 36, 48, 54 (MSB) */
1322 REG_WRITE(ah, 0xa3c4,
1323 POW_SM(pPwrArray[ALL_TARGET_LEGACY_54], 24) |
1324 POW_SM(pPwrArray[ALL_TARGET_LEGACY_48], 16) |
1325 POW_SM(pPwrArray[ALL_TARGET_LEGACY_36], 8) |
1326 POW_SM(pPwrArray[ALL_TARGET_LEGACY_6_24], 0));
1327
1328 /* Write the CCK power per rate set */
1329
1330 /* 1L (LSB), reserved, 2L, 2S (MSB) */
1331 REG_WRITE(ah, 0xa3c8,
1332 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 24) |
1333 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 16) |
1334 /* POW_SM(txPowerTimes2, 8) | this is reserved for AR9003 */
1335 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0));
1336
1337 /* 5.5L (LSB), 5.5S, 11L, 11S (MSB) */
1338 REG_WRITE(ah, 0xa3cc,
1339 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11S], 24) |
1340 POW_SM(pPwrArray[ALL_TARGET_LEGACY_11L], 16) |
1341 POW_SM(pPwrArray[ALL_TARGET_LEGACY_5S], 8) |
1342 POW_SM(pPwrArray[ALL_TARGET_LEGACY_1L_5L], 0)
1343 );
1344
1345 /* Write the HT20 power per rate set */
1346
1347 /* 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB) */
1348 REG_WRITE(ah, 0xa3d0,
1349 POW_SM(pPwrArray[ALL_TARGET_HT20_5], 24) |
1350 POW_SM(pPwrArray[ALL_TARGET_HT20_4], 16) |
1351 POW_SM(pPwrArray[ALL_TARGET_HT20_1_3_9_11_17_19], 8) |
1352 POW_SM(pPwrArray[ALL_TARGET_HT20_0_8_16], 0)
1353 );
1354
1355 /* 6 (LSB), 7, 12, 13 (MSB) */
1356 REG_WRITE(ah, 0xa3d4,
1357 POW_SM(pPwrArray[ALL_TARGET_HT20_13], 24) |
1358 POW_SM(pPwrArray[ALL_TARGET_HT20_12], 16) |
1359 POW_SM(pPwrArray[ALL_TARGET_HT20_7], 8) |
1360 POW_SM(pPwrArray[ALL_TARGET_HT20_6], 0)
1361 );
1362
1363 /* 14 (LSB), 15, 20, 21 */
1364 REG_WRITE(ah, 0xa3e4,
1365 POW_SM(pPwrArray[ALL_TARGET_HT20_21], 24) |
1366 POW_SM(pPwrArray[ALL_TARGET_HT20_20], 16) |
1367 POW_SM(pPwrArray[ALL_TARGET_HT20_15], 8) |
1368 POW_SM(pPwrArray[ALL_TARGET_HT20_14], 0)
1369 );
1370
1371 /* Mixed HT20 and HT40 rates */
1372
1373 /* HT20 22 (LSB), HT20 23, HT40 22, HT40 23 (MSB) */
1374 REG_WRITE(ah, 0xa3e8,
1375 POW_SM(pPwrArray[ALL_TARGET_HT40_23], 24) |
1376 POW_SM(pPwrArray[ALL_TARGET_HT40_22], 16) |
1377 POW_SM(pPwrArray[ALL_TARGET_HT20_23], 8) |
1378 POW_SM(pPwrArray[ALL_TARGET_HT20_22], 0)
1379 );
1380
1381 /*
1382 * Write the HT40 power per rate set
1383 * correct PAR difference between HT40 and HT20/LEGACY
1384 * 0/8/16 (LSB), 1-3/9-11/17-19, 4, 5 (MSB)
1385 */
1386 REG_WRITE(ah, 0xa3d8,
1387 POW_SM(pPwrArray[ALL_TARGET_HT40_5], 24) |
1388 POW_SM(pPwrArray[ALL_TARGET_HT40_4], 16) |
1389 POW_SM(pPwrArray[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
1390 POW_SM(pPwrArray[ALL_TARGET_HT40_0_8_16], 0)
1391 );
1392
1393 /* 6 (LSB), 7, 12, 13 (MSB) */
1394 REG_WRITE(ah, 0xa3dc,
1395 POW_SM(pPwrArray[ALL_TARGET_HT40_13], 24) |
1396 POW_SM(pPwrArray[ALL_TARGET_HT40_12], 16) |
1397 POW_SM(pPwrArray[ALL_TARGET_HT40_7], 8) |
1398 POW_SM(pPwrArray[ALL_TARGET_HT40_6], 0)
1399 );
1400
1401 /* 14 (LSB), 15, 20, 21 */
1402 REG_WRITE(ah, 0xa3ec,
1403 POW_SM(pPwrArray[ALL_TARGET_HT40_21], 24) |
1404 POW_SM(pPwrArray[ALL_TARGET_HT40_20], 16) |
1405 POW_SM(pPwrArray[ALL_TARGET_HT40_15], 8) |
1406 POW_SM(pPwrArray[ALL_TARGET_HT40_14], 0)
1407 );
1408
1409 return 0;
1410#undef POW_SM
1411}
1412
1413static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq)
1414{
1415 u8 targetPowerValT2[ar9300RateSize];
1416 /* XXX: hard code for now, need to get from eeprom struct */
1417 u8 ht40PowerIncForPdadc = 0;
1418 bool is2GHz = false;
1419 unsigned int i = 0;
1420 struct ath_common *common = ath9k_hw_common(ah);
1421
1422 if (freq < 4000)
1423 is2GHz = true;
1424
1425 targetPowerValT2[ALL_TARGET_LEGACY_6_24] =
1426 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_6_24, freq,
1427 is2GHz);
1428 targetPowerValT2[ALL_TARGET_LEGACY_36] =
1429 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_36, freq,
1430 is2GHz);
1431 targetPowerValT2[ALL_TARGET_LEGACY_48] =
1432 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_48, freq,
1433 is2GHz);
1434 targetPowerValT2[ALL_TARGET_LEGACY_54] =
1435 ar9003_hw_eeprom_get_tgt_pwr(ah, LEGACY_TARGET_RATE_54, freq,
1436 is2GHz);
1437 targetPowerValT2[ALL_TARGET_LEGACY_1L_5L] =
1438 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_1L_5L,
1439 freq);
1440 targetPowerValT2[ALL_TARGET_LEGACY_5S] =
1441 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_5S, freq);
1442 targetPowerValT2[ALL_TARGET_LEGACY_11L] =
1443 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11L, freq);
1444 targetPowerValT2[ALL_TARGET_LEGACY_11S] =
1445 ar9003_hw_eeprom_get_cck_tgt_pwr(ah, LEGACY_TARGET_RATE_11S, freq);
1446 targetPowerValT2[ALL_TARGET_HT20_0_8_16] =
1447 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
1448 is2GHz);
1449 targetPowerValT2[ALL_TARGET_HT20_1_3_9_11_17_19] =
1450 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
1451 freq, is2GHz);
1452 targetPowerValT2[ALL_TARGET_HT20_4] =
1453 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
1454 is2GHz);
1455 targetPowerValT2[ALL_TARGET_HT20_5] =
1456 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
1457 is2GHz);
1458 targetPowerValT2[ALL_TARGET_HT20_6] =
1459 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
1460 is2GHz);
1461 targetPowerValT2[ALL_TARGET_HT20_7] =
1462 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
1463 is2GHz);
1464 targetPowerValT2[ALL_TARGET_HT20_12] =
1465 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
1466 is2GHz);
1467 targetPowerValT2[ALL_TARGET_HT20_13] =
1468 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
1469 is2GHz);
1470 targetPowerValT2[ALL_TARGET_HT20_14] =
1471 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
1472 is2GHz);
1473 targetPowerValT2[ALL_TARGET_HT20_15] =
1474 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
1475 is2GHz);
1476 targetPowerValT2[ALL_TARGET_HT20_20] =
1477 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
1478 is2GHz);
1479 targetPowerValT2[ALL_TARGET_HT20_21] =
1480 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
1481 is2GHz);
1482 targetPowerValT2[ALL_TARGET_HT20_22] =
1483 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
1484 is2GHz);
1485 targetPowerValT2[ALL_TARGET_HT20_23] =
1486 ar9003_hw_eeprom_get_ht20_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
1487 is2GHz);
1488 targetPowerValT2[ALL_TARGET_HT40_0_8_16] =
1489 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_0_8_16, freq,
1490 is2GHz) + ht40PowerIncForPdadc;
1491 targetPowerValT2[ALL_TARGET_HT40_1_3_9_11_17_19] =
1492 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_1_3_9_11_17_19,
1493 freq,
1494 is2GHz) + ht40PowerIncForPdadc;
1495 targetPowerValT2[ALL_TARGET_HT40_4] =
1496 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_4, freq,
1497 is2GHz) + ht40PowerIncForPdadc;
1498 targetPowerValT2[ALL_TARGET_HT40_5] =
1499 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_5, freq,
1500 is2GHz) + ht40PowerIncForPdadc;
1501 targetPowerValT2[ALL_TARGET_HT40_6] =
1502 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_6, freq,
1503 is2GHz) + ht40PowerIncForPdadc;
1504 targetPowerValT2[ALL_TARGET_HT40_7] =
1505 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_7, freq,
1506 is2GHz) + ht40PowerIncForPdadc;
1507 targetPowerValT2[ALL_TARGET_HT40_12] =
1508 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_12, freq,
1509 is2GHz) + ht40PowerIncForPdadc;
1510 targetPowerValT2[ALL_TARGET_HT40_13] =
1511 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_13, freq,
1512 is2GHz) + ht40PowerIncForPdadc;
1513 targetPowerValT2[ALL_TARGET_HT40_14] =
1514 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_14, freq,
1515 is2GHz) + ht40PowerIncForPdadc;
1516 targetPowerValT2[ALL_TARGET_HT40_15] =
1517 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_15, freq,
1518 is2GHz) + ht40PowerIncForPdadc;
1519 targetPowerValT2[ALL_TARGET_HT40_20] =
1520 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_20, freq,
1521 is2GHz) + ht40PowerIncForPdadc;
1522 targetPowerValT2[ALL_TARGET_HT40_21] =
1523 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_21, freq,
1524 is2GHz) + ht40PowerIncForPdadc;
1525 targetPowerValT2[ALL_TARGET_HT40_22] =
1526 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_22, freq,
1527 is2GHz) + ht40PowerIncForPdadc;
1528 targetPowerValT2[ALL_TARGET_HT40_23] =
1529 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
1530 is2GHz) + ht40PowerIncForPdadc;
1531
1532 while (i < ar9300RateSize) {
1533 ath_print(common, ATH_DBG_EEPROM,
1534 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1535 i++;
1536
1537 ath_print(common, ATH_DBG_EEPROM,
1538 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1539 i++;
1540
1541 ath_print(common, ATH_DBG_EEPROM,
1542 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1543 i++;
1544
1545 ath_print(common, ATH_DBG_EEPROM,
1546 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
1547 i++;
1548 }
1549
1550 /* Write target power array to registers */
1551 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
1552}
1553
1554static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
1555 int mode,
1556 int ipier,
1557 int ichain,
1558 int *pfrequency,
1559 int *pcorrection,
1560 int *ptemperature, int *pvoltage)
1561{
1562 u8 *pCalPier;
1563 struct ar9300_cal_data_per_freq_op_loop *pCalPierStruct;
1564 int is2GHz;
1565 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1566 struct ath_common *common = ath9k_hw_common(ah);
1567
1568 if (ichain >= AR9300_MAX_CHAINS) {
1569 ath_print(common, ATH_DBG_EEPROM,
1570 "Invalid chain index, must be less than %d\n",
1571 AR9300_MAX_CHAINS);
1572 return -1;
1573 }
1574
1575 if (mode) { /* 5GHz */
1576 if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
1577 ath_print(common, ATH_DBG_EEPROM,
1578 "Invalid 5GHz cal pier index, must "
1579 "be less than %d\n",
1580 AR9300_NUM_5G_CAL_PIERS);
1581 return -1;
1582 }
1583 pCalPier = &(eep->calFreqPier5G[ipier]);
1584 pCalPierStruct = &(eep->calPierData5G[ichain][ipier]);
1585 is2GHz = 0;
1586 } else {
1587 if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
1588 ath_print(common, ATH_DBG_EEPROM,
1589 "Invalid 2GHz cal pier index, must "
1590 "be less than %d\n", AR9300_NUM_2G_CAL_PIERS);
1591 return -1;
1592 }
1593
1594 pCalPier = &(eep->calFreqPier2G[ipier]);
1595 pCalPierStruct = &(eep->calPierData2G[ichain][ipier]);
1596 is2GHz = 1;
1597 }
1598
1599 *pfrequency = FBIN2FREQ(*pCalPier, is2GHz);
1600 *pcorrection = pCalPierStruct->refPower;
1601 *ptemperature = pCalPierStruct->tempMeas;
1602 *pvoltage = pCalPierStruct->voltMeas;
1603
1604 return 0;
1605}
1606
1607static int ar9003_hw_power_control_override(struct ath_hw *ah,
1608 int frequency,
1609 int *correction,
1610 int *voltage, int *temperature)
1611{
1612 int tempSlope = 0;
1613 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1614
1615 REG_RMW(ah, AR_PHY_TPC_11_B0,
1616 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1617 AR_PHY_TPC_OLPC_GAIN_DELTA);
1618 REG_RMW(ah, AR_PHY_TPC_11_B1,
1619 (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1620 AR_PHY_TPC_OLPC_GAIN_DELTA);
1621 REG_RMW(ah, AR_PHY_TPC_11_B2,
1622 (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1623 AR_PHY_TPC_OLPC_GAIN_DELTA);
1624
1625 /* enable open loop power control on chip */
1626 REG_RMW(ah, AR_PHY_TPC_6_B0,
1627 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1628 AR_PHY_TPC_6_ERROR_EST_MODE);
1629 REG_RMW(ah, AR_PHY_TPC_6_B1,
1630 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1631 AR_PHY_TPC_6_ERROR_EST_MODE);
1632 REG_RMW(ah, AR_PHY_TPC_6_B2,
1633 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1634 AR_PHY_TPC_6_ERROR_EST_MODE);
1635
1636 /*
1637 * enable temperature compensation
1638 * Need to use register names
1639 */
1640 if (frequency < 4000)
1641 tempSlope = eep->modalHeader2G.tempSlope;
1642 else
1643 tempSlope = eep->modalHeader5G.tempSlope;
1644
1645 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
1646 REG_RMW_FIELD(ah, AR_PHY_TPC_18, AR_PHY_TPC_18_THERM_CAL_VALUE,
1647 temperature[0]);
1648
1649 return 0;
1650}
1651
1652/* Apply the recorded correction values. */
1653static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1654{
1655 int ichain, ipier, npier;
1656 int mode;
1657 int lfrequency[AR9300_MAX_CHAINS],
1658 lcorrection[AR9300_MAX_CHAINS],
1659 ltemperature[AR9300_MAX_CHAINS], lvoltage[AR9300_MAX_CHAINS];
1660 int hfrequency[AR9300_MAX_CHAINS],
1661 hcorrection[AR9300_MAX_CHAINS],
1662 htemperature[AR9300_MAX_CHAINS], hvoltage[AR9300_MAX_CHAINS];
1663 int fdiff;
1664 int correction[AR9300_MAX_CHAINS],
1665 voltage[AR9300_MAX_CHAINS], temperature[AR9300_MAX_CHAINS];
1666 int pfrequency, pcorrection, ptemperature, pvoltage;
1667 struct ath_common *common = ath9k_hw_common(ah);
1668
1669 mode = (frequency >= 4000);
1670 if (mode)
1671 npier = AR9300_NUM_5G_CAL_PIERS;
1672 else
1673 npier = AR9300_NUM_2G_CAL_PIERS;
1674
1675 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1676 lfrequency[ichain] = 0;
1677 hfrequency[ichain] = 100000;
1678 }
1679 /* identify best lower and higher frequency calibration measurement */
1680 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1681 for (ipier = 0; ipier < npier; ipier++) {
1682 if (!ar9003_hw_cal_pier_get(ah, mode, ipier, ichain,
1683 &pfrequency, &pcorrection,
1684 &ptemperature, &pvoltage)) {
1685 fdiff = frequency - pfrequency;
1686
1687 /*
1688 * this measurement is higher than
1689 * our desired frequency
1690 */
1691 if (fdiff <= 0) {
1692 if (hfrequency[ichain] <= 0 ||
1693 hfrequency[ichain] >= 100000 ||
1694 fdiff >
1695 (frequency - hfrequency[ichain])) {
1696 /*
1697 * new best higher
1698 * frequency measurement
1699 */
1700 hfrequency[ichain] = pfrequency;
1701 hcorrection[ichain] =
1702 pcorrection;
1703 htemperature[ichain] =
1704 ptemperature;
1705 hvoltage[ichain] = pvoltage;
1706 }
1707 }
1708 if (fdiff >= 0) {
1709 if (lfrequency[ichain] <= 0
1710 || fdiff <
1711 (frequency - lfrequency[ichain])) {
1712 /*
1713 * new best lower
1714 * frequency measurement
1715 */
1716 lfrequency[ichain] = pfrequency;
1717 lcorrection[ichain] =
1718 pcorrection;
1719 ltemperature[ichain] =
1720 ptemperature;
1721 lvoltage[ichain] = pvoltage;
1722 }
1723 }
1724 }
1725 }
1726 }
1727
1728 /* interpolate */
1729 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1730 ath_print(common, ATH_DBG_EEPROM,
1731 "ch=%d f=%d low=%d %d h=%d %d\n",
1732 ichain, frequency, lfrequency[ichain],
1733 lcorrection[ichain], hfrequency[ichain],
1734 hcorrection[ichain]);
1735 /* they're the same, so just pick one */
1736 if (hfrequency[ichain] == lfrequency[ichain]) {
1737 correction[ichain] = lcorrection[ichain];
1738 voltage[ichain] = lvoltage[ichain];
1739 temperature[ichain] = ltemperature[ichain];
1740 }
1741 /* the low frequency is good */
1742 else if (frequency - lfrequency[ichain] < 1000) {
1743 /* so is the high frequency, interpolate */
1744 if (hfrequency[ichain] - frequency < 1000) {
1745
1746 correction[ichain] = lcorrection[ichain] +
1747 (((frequency - lfrequency[ichain]) *
1748 (hcorrection[ichain] -
1749 lcorrection[ichain])) /
1750 (hfrequency[ichain] - lfrequency[ichain]));
1751
1752 temperature[ichain] = ltemperature[ichain] +
1753 (((frequency - lfrequency[ichain]) *
1754 (htemperature[ichain] -
1755 ltemperature[ichain])) /
1756 (hfrequency[ichain] - lfrequency[ichain]));
1757
1758 voltage[ichain] =
1759 lvoltage[ichain] +
1760 (((frequency -
1761 lfrequency[ichain]) * (hvoltage[ichain] -
1762 lvoltage[ichain]))
1763 / (hfrequency[ichain] -
1764 lfrequency[ichain]));
1765 }
1766 /* only low is good, use it */
1767 else {
1768 correction[ichain] = lcorrection[ichain];
1769 temperature[ichain] = ltemperature[ichain];
1770 voltage[ichain] = lvoltage[ichain];
1771 }
1772 }
1773 /* only high is good, use it */
1774 else if (hfrequency[ichain] - frequency < 1000) {
1775 correction[ichain] = hcorrection[ichain];
1776 temperature[ichain] = htemperature[ichain];
1777 voltage[ichain] = hvoltage[ichain];
1778 } else { /* nothing is good, presume 0???? */
1779 correction[ichain] = 0;
1780 temperature[ichain] = 0;
1781 voltage[ichain] = 0;
1782 }
1783 }
1784
1785 ar9003_hw_power_control_override(ah, frequency, correction, voltage,
1786 temperature);
1787
1788 ath_print(common, ATH_DBG_EEPROM,
1789 "for frequency=%d, calibration correction = %d %d %d\n",
1790 frequency, correction[0], correction[1], correction[2]);
1791
1792 return 0;
1793}
1794
1795static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
1796 struct ath9k_channel *chan, u16 cfgCtl,
1797 u8 twiceAntennaReduction,
1798 u8 twiceMaxRegulatoryPower,
1799 u8 powerLimit)
1800{
1801 ah->txpower_limit = powerLimit;
1802 ar9003_hw_set_target_power_eeprom(ah, chan->channel);
1803 ar9003_hw_calibration_apply(ah, chan->channel);
1804}
1805
1806static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
1807 u16 i, bool is2GHz)
1808{
1809 return AR_NO_SPUR;
1810}
1811
1812s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah)
1813{
1814 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1815
1816 return (eep->baseEepHeader.txrxgain >> 4) & 0xf; /* bits 7:4 */
1817}
1818
1819s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
1820{
1821 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
1822
1823 return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
1824}
1825
1826const struct eeprom_ops eep_ar9300_ops = {
1827 .check_eeprom = ath9k_hw_ar9300_check_eeprom,
1828 .get_eeprom = ath9k_hw_ar9300_get_eeprom,
1829 .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
1830 .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
1831 .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
1832 .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
1833 .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
1834 .set_board_values = ath9k_hw_ar9300_set_board_values,
1835 .set_addac = ath9k_hw_ar9300_set_addac,
1836 .set_txpower = ath9k_hw_ar9300_set_txpower,
1837 .get_spur_channel = ath9k_hw_ar9300_get_spur_channel
1838};
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
new file mode 100644
index 000000000000..23fb353c3bba
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -0,0 +1,323 @@
1#ifndef AR9003_EEPROM_H
2#define AR9003_EEPROM_H
3
4#include <linux/types.h>
5
6#define AR9300_EEP_VER 0xD000
7#define AR9300_EEP_VER_MINOR_MASK 0xFFF
8#define AR9300_EEP_MINOR_VER_1 0x1
9#define AR9300_EEP_MINOR_VER AR9300_EEP_MINOR_VER_1
10
11/* 16-bit offset location start of calibration struct */
12#define AR9300_EEP_START_LOC 256
13#define AR9300_NUM_5G_CAL_PIERS 8
14#define AR9300_NUM_2G_CAL_PIERS 3
15#define AR9300_NUM_5G_20_TARGET_POWERS 8
16#define AR9300_NUM_5G_40_TARGET_POWERS 8
17#define AR9300_NUM_2G_CCK_TARGET_POWERS 2
18#define AR9300_NUM_2G_20_TARGET_POWERS 3
19#define AR9300_NUM_2G_40_TARGET_POWERS 3
20/* #define AR9300_NUM_CTLS 21 */
21#define AR9300_NUM_CTLS_5G 9
22#define AR9300_NUM_CTLS_2G 12
23#define AR9300_CTL_MODE_M 0xF
24#define AR9300_NUM_BAND_EDGES_5G 8
25#define AR9300_NUM_BAND_EDGES_2G 4
26#define AR9300_NUM_PD_GAINS 4
27#define AR9300_PD_GAINS_IN_MASK 4
28#define AR9300_PD_GAIN_ICEPTS 5
29#define AR9300_EEPROM_MODAL_SPURS 5
30#define AR9300_MAX_RATE_POWER 63
31#define AR9300_NUM_PDADC_VALUES 128
32#define AR9300_NUM_RATES 16
33#define AR9300_BCHAN_UNUSED 0xFF
34#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
35#define AR9300_OPFLAGS_11A 0x01
36#define AR9300_OPFLAGS_11G 0x02
37#define AR9300_OPFLAGS_5G_HT40 0x04
38#define AR9300_OPFLAGS_2G_HT40 0x08
39#define AR9300_OPFLAGS_5G_HT20 0x10
40#define AR9300_OPFLAGS_2G_HT20 0x20
41#define AR9300_EEPMISC_BIG_ENDIAN 0x01
42#define AR9300_EEPMISC_WOW 0x02
43#define AR9300_CUSTOMER_DATA_SIZE 20
44
45#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
46#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
47#define AR9300_MAX_CHAINS 3
48#define AR9300_ANT_16S 25
49#define AR9300_FUTURE_MODAL_SZ 6
50
51#define AR9300_NUM_ANT_CHAIN_FIELDS 7
52#define AR9300_NUM_ANT_COMMON_FIELDS 4
53#define AR9300_SIZE_ANT_CHAIN_FIELD 3
54#define AR9300_SIZE_ANT_COMMON_FIELD 4
55#define AR9300_ANT_CHAIN_MASK 0x7
56#define AR9300_ANT_COMMON_MASK 0xf
57#define AR9300_CHAIN_0_IDX 0
58#define AR9300_CHAIN_1_IDX 1
59#define AR9300_CHAIN_2_IDX 2
60
61#define AR928X_NUM_ANT_CHAIN_FIELDS 6
62#define AR928X_SIZE_ANT_CHAIN_FIELD 2
63#define AR928X_ANT_CHAIN_MASK 0x3
64
65/* Delta from which to start power to pdadc table */
66/* This offset is used in both open loop and closed loop power control
67 * schemes. In open loop power control, it is not really needed, but for
68 * the "sake of consistency" it was kept. For certain AP designs, this
69 * value is overwritten by the value in the flag "pwrTableOffset" just
70 * before writing the pdadc vs pwr into the chip registers.
71 */
72#define AR9300_PWR_TABLE_OFFSET 0
73
74/* enable flags for voltage and temp compensation */
75#define ENABLE_TEMP_COMPENSATION 0x01
76#define ENABLE_VOLT_COMPENSATION 0x02
77/* byte addressable */
78#define AR9300_EEPROM_SIZE (16*1024)
79#define FIXED_CCA_THRESHOLD 15
80
81#define AR9300_BASE_ADDR 0x3ff
82
83enum targetPowerHTRates {
84 HT_TARGET_RATE_0_8_16,
85 HT_TARGET_RATE_1_3_9_11_17_19,
86 HT_TARGET_RATE_4,
87 HT_TARGET_RATE_5,
88 HT_TARGET_RATE_6,
89 HT_TARGET_RATE_7,
90 HT_TARGET_RATE_12,
91 HT_TARGET_RATE_13,
92 HT_TARGET_RATE_14,
93 HT_TARGET_RATE_15,
94 HT_TARGET_RATE_20,
95 HT_TARGET_RATE_21,
96 HT_TARGET_RATE_22,
97 HT_TARGET_RATE_23
98};
99
100enum targetPowerLegacyRates {
101 LEGACY_TARGET_RATE_6_24,
102 LEGACY_TARGET_RATE_36,
103 LEGACY_TARGET_RATE_48,
104 LEGACY_TARGET_RATE_54
105};
106
107enum targetPowerCckRates {
108 LEGACY_TARGET_RATE_1L_5L,
109 LEGACY_TARGET_RATE_5S,
110 LEGACY_TARGET_RATE_11L,
111 LEGACY_TARGET_RATE_11S
112};
113
114enum ar9300_Rates {
115 ALL_TARGET_LEGACY_6_24,
116 ALL_TARGET_LEGACY_36,
117 ALL_TARGET_LEGACY_48,
118 ALL_TARGET_LEGACY_54,
119 ALL_TARGET_LEGACY_1L_5L,
120 ALL_TARGET_LEGACY_5S,
121 ALL_TARGET_LEGACY_11L,
122 ALL_TARGET_LEGACY_11S,
123 ALL_TARGET_HT20_0_8_16,
124 ALL_TARGET_HT20_1_3_9_11_17_19,
125 ALL_TARGET_HT20_4,
126 ALL_TARGET_HT20_5,
127 ALL_TARGET_HT20_6,
128 ALL_TARGET_HT20_7,
129 ALL_TARGET_HT20_12,
130 ALL_TARGET_HT20_13,
131 ALL_TARGET_HT20_14,
132 ALL_TARGET_HT20_15,
133 ALL_TARGET_HT20_20,
134 ALL_TARGET_HT20_21,
135 ALL_TARGET_HT20_22,
136 ALL_TARGET_HT20_23,
137 ALL_TARGET_HT40_0_8_16,
138 ALL_TARGET_HT40_1_3_9_11_17_19,
139 ALL_TARGET_HT40_4,
140 ALL_TARGET_HT40_5,
141 ALL_TARGET_HT40_6,
142 ALL_TARGET_HT40_7,
143 ALL_TARGET_HT40_12,
144 ALL_TARGET_HT40_13,
145 ALL_TARGET_HT40_14,
146 ALL_TARGET_HT40_15,
147 ALL_TARGET_HT40_20,
148 ALL_TARGET_HT40_21,
149 ALL_TARGET_HT40_22,
150 ALL_TARGET_HT40_23,
151 ar9300RateSize,
152};
153
154
155struct eepFlags {
156 u8 opFlags;
157 u8 eepMisc;
158} __packed;
159
160enum CompressAlgorithm {
161 _CompressNone = 0,
162 _CompressLzma,
163 _CompressPairs,
164 _CompressBlock,
165 _Compress4,
166 _Compress5,
167 _Compress6,
168 _Compress7,
169};
170
171struct ar9300_base_eep_hdr {
172 __le16 regDmn[2];
173 /* 4 bits tx and 4 bits rx */
174 u8 txrxMask;
175 struct eepFlags opCapFlags;
176 u8 rfSilent;
177 u8 blueToothOptions;
178 u8 deviceCap;
179 /* takes lower byte in eeprom location */
180 u8 deviceType;
181 /* offset in dB to be added to beginning
182 * of pdadc table in calibration
183 */
184 int8_t pwrTableOffset;
185 u8 params_for_tuning_caps[2];
186 /*
187 * bit0 - enable tx temp comp
188 * bit1 - enable tx volt comp
189 * bit2 - enable fastClock - default to 1
190 * bit3 - enable doubling - default to 1
191 * bit4 - enable internal regulator - default to 1
192 */
193 u8 featureEnable;
194 /* misc flags: bit0 - turn down drivestrength */
195 u8 miscConfiguration;
196 u8 eepromWriteEnableGpio;
197 u8 wlanDisableGpio;
198 u8 wlanLedGpio;
199 u8 rxBandSelectGpio;
200 u8 txrxgain;
201 /* SW controlled internal regulator fields */
202 __le32 swreg;
203} __packed;
204
205struct ar9300_modal_eep_header {
206 /* 4 idle, t1, t2, b (4 bits per setting) */
207 __le32 antCtrlCommon;
208 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
209 __le32 antCtrlCommon2;
210 /* 6 idle, t, r, rx1, rx12, b (2 bits each) */
211 __le16 antCtrlChain[AR9300_MAX_CHAINS];
212 /* 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
213 u8 xatten1DB[AR9300_MAX_CHAINS];
214 /* 3 xatten1_margin for merlin (0xa20c/b20c 16:12 */
215 u8 xatten1Margin[AR9300_MAX_CHAINS];
216 int8_t tempSlope;
217 int8_t voltSlope;
218 /* spur channels in usual fbin coding format */
219 u8 spurChans[AR9300_EEPROM_MODAL_SPURS];
220 /* 3 Check if the register is per chain */
221 int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
222 u8 ob[AR9300_MAX_CHAINS];
223 u8 db_stage2[AR9300_MAX_CHAINS];
224 u8 db_stage3[AR9300_MAX_CHAINS];
225 u8 db_stage4[AR9300_MAX_CHAINS];
226 u8 xpaBiasLvl;
227 u8 txFrameToDataStart;
228 u8 txFrameToPaOn;
229 u8 txClip;
230 int8_t antennaGain;
231 u8 switchSettling;
232 int8_t adcDesiredSize;
233 u8 txEndToXpaOff;
234 u8 txEndToRxOn;
235 u8 txFrameToXpaOn;
236 u8 thresh62;
237 u8 futureModal[32];
238} __packed;
239
240struct ar9300_cal_data_per_freq_op_loop {
241 int8_t refPower;
242 /* pdadc voltage at power measurement */
243 u8 voltMeas;
244 /* pcdac used for power measurement */
245 u8 tempMeas;
246 /* range is -60 to -127 create a mapping equation 1db resolution */
247 int8_t rxNoisefloorCal;
248 /*range is same as noisefloor */
249 int8_t rxNoisefloorPower;
250 /* temp measured when noisefloor cal was performed */
251 u8 rxTempMeas;
252} __packed;
253
254struct cal_tgt_pow_legacy {
255 u8 tPow2x[4];
256} __packed;
257
258struct cal_tgt_pow_ht {
259 u8 tPow2x[14];
260} __packed;
261
262struct cal_ctl_edge_pwr {
263 u8 tPower:6,
264 flag:2;
265} __packed;
266
267struct cal_ctl_data_2g {
268 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_2G];
269} __packed;
270
271struct cal_ctl_data_5g {
272 struct cal_ctl_edge_pwr ctlEdges[AR9300_NUM_BAND_EDGES_5G];
273} __packed;
274
275struct ar9300_eeprom {
276 u8 eepromVersion;
277 u8 templateVersion;
278 u8 macAddr[6];
279 u8 custData[AR9300_CUSTOMER_DATA_SIZE];
280
281 struct ar9300_base_eep_hdr baseEepHeader;
282
283 struct ar9300_modal_eep_header modalHeader2G;
284 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
285 struct ar9300_cal_data_per_freq_op_loop
286 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
287 u8 calTarget_freqbin_Cck[AR9300_NUM_2G_CCK_TARGET_POWERS];
288 u8 calTarget_freqbin_2G[AR9300_NUM_2G_20_TARGET_POWERS];
289 u8 calTarget_freqbin_2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
290 u8 calTarget_freqbin_2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
291 struct cal_tgt_pow_legacy
292 calTargetPowerCck[AR9300_NUM_2G_CCK_TARGET_POWERS];
293 struct cal_tgt_pow_legacy
294 calTargetPower2G[AR9300_NUM_2G_20_TARGET_POWERS];
295 struct cal_tgt_pow_ht
296 calTargetPower2GHT20[AR9300_NUM_2G_20_TARGET_POWERS];
297 struct cal_tgt_pow_ht
298 calTargetPower2GHT40[AR9300_NUM_2G_40_TARGET_POWERS];
299 u8 ctlIndex_2G[AR9300_NUM_CTLS_2G];
300 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
301 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
302 struct ar9300_modal_eep_header modalHeader5G;
303 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
304 struct ar9300_cal_data_per_freq_op_loop
305 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
306 u8 calTarget_freqbin_5G[AR9300_NUM_5G_20_TARGET_POWERS];
307 u8 calTarget_freqbin_5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
308 u8 calTarget_freqbin_5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
309 struct cal_tgt_pow_legacy
310 calTargetPower5G[AR9300_NUM_5G_20_TARGET_POWERS];
311 struct cal_tgt_pow_ht
312 calTargetPower5GHT20[AR9300_NUM_5G_20_TARGET_POWERS];
313 struct cal_tgt_pow_ht
314 calTargetPower5GHT40[AR9300_NUM_5G_40_TARGET_POWERS];
315 u8 ctlIndex_5G[AR9300_NUM_CTLS_5G];
316 u8 ctl_freqbin_5G[AR9300_NUM_CTLS_5G][AR9300_NUM_BAND_EDGES_5G];
317 struct cal_ctl_data_5g ctlPowerData_5G[AR9300_NUM_CTLS_5G];
318} __packed;
319
320s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
321s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
322
323#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
new file mode 100644
index 000000000000..b15309caf1da
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_mac.h"
19#include "ar9003_initvals.h"
20
21/* General hardware code for the AR9003 hadware family */
22
23static bool ar9003_hw_macversion_supported(u32 macversion)
24{
25 switch (macversion) {
26 case AR_SREV_VERSION_9300:
27 return true;
28 default:
29 break;
30 }
31 return false;
32}
33
34/* AR9003 2.0 - new INI format (pre, core, post arrays per subsystem) */
35/*
36 * XXX: move TX/RX gain INI to its own init_mode_gain_regs after
37 * ensuring it does not affect hardware bring up
38 */
39static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
40{
41 /* mac */
42 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
43 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
44 ar9300_2p0_mac_core,
45 ARRAY_SIZE(ar9300_2p0_mac_core), 2);
46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
47 ar9300_2p0_mac_postamble,
48 ARRAY_SIZE(ar9300_2p0_mac_postamble), 5);
49
50 /* bb */
51 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
52 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
53 ar9300_2p0_baseband_core,
54 ARRAY_SIZE(ar9300_2p0_baseband_core), 2);
55 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
56 ar9300_2p0_baseband_postamble,
57 ARRAY_SIZE(ar9300_2p0_baseband_postamble), 5);
58
59 /* radio */
60 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
61 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
62 ar9300_2p0_radio_core,
63 ARRAY_SIZE(ar9300_2p0_radio_core), 2);
64 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
65 ar9300_2p0_radio_postamble,
66 ARRAY_SIZE(ar9300_2p0_radio_postamble), 5);
67
68 /* soc */
69 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
70 ar9300_2p0_soc_preamble,
71 ARRAY_SIZE(ar9300_2p0_soc_preamble), 2);
72 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
73 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
74 ar9300_2p0_soc_postamble,
75 ARRAY_SIZE(ar9300_2p0_soc_postamble), 5);
76
77 /* rx/tx gain */
78 INIT_INI_ARRAY(&ah->iniModesRxGain,
79 ar9300Common_rx_gain_table_2p0,
80 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0), 2);
81 INIT_INI_ARRAY(&ah->iniModesTxGain,
82 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
83 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
84 5);
85
86 /* Load PCIE SERDES settings from INI */
87
88 /* Awake Setting */
89
90 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0,
92 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p0),
93 2);
94
95 /* Sleep Setting */
96
97 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
98 ar9300PciePhy_clkreq_enable_L1_2p0,
99 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p0),
100 2);
101
102 /* Fast clock modal settings */
103 INIT_INI_ARRAY(&ah->iniModesAdditional,
104 ar9300Modes_fast_clock_2p0,
105 ARRAY_SIZE(ar9300Modes_fast_clock_2p0),
106 3);
107}
108
109static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
110{
111 switch (ar9003_hw_get_tx_gain_idx(ah)) {
112 case 0:
113 default:
114 INIT_INI_ARRAY(&ah->iniModesTxGain,
115 ar9300Modes_lowest_ob_db_tx_gain_table_2p0,
116 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p0),
117 5);
118 break;
119 case 1:
120 INIT_INI_ARRAY(&ah->iniModesTxGain,
121 ar9300Modes_high_ob_db_tx_gain_table_2p0,
122 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p0),
123 5);
124 break;
125 case 2:
126 INIT_INI_ARRAY(&ah->iniModesTxGain,
127 ar9300Modes_low_ob_db_tx_gain_table_2p0,
128 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p0),
129 5);
130 break;
131 }
132}
133
134static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
135{
136 switch (ar9003_hw_get_rx_gain_idx(ah)) {
137 case 0:
138 default:
139 INIT_INI_ARRAY(&ah->iniModesRxGain, ar9300Common_rx_gain_table_2p0,
140 ARRAY_SIZE(ar9300Common_rx_gain_table_2p0),
141 2);
142 break;
143 case 1:
144 INIT_INI_ARRAY(&ah->iniModesRxGain,
145 ar9300Common_wo_xlna_rx_gain_table_2p0,
146 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p0),
147 2);
148 break;
149 }
150}
151
152/* set gain table pointers according to values read from the eeprom */
153static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
154{
155 ar9003_tx_gain_table_apply(ah);
156 ar9003_rx_gain_table_apply(ah);
157}
158
159/*
160 * Helper for ASPM support.
161 *
162 * Disable PLL when in L0s as well as receiver clock when in L1.
163 * This power saving option must be enabled through the SerDes.
164 *
165 * Programming the SerDes must go through the same 288 bit serial shift
166 * register as the other analog registers. Hence the 9 writes.
167 */
168static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
169 int restore,
170 int power_off)
171{
172 if (ah->is_pciexpress != true)
173 return;
174
175 /* Do not touch SerDes registers */
176 if (ah->config.pcie_powersave_enable == 2)
177 return;
178
179 /* Nothing to do on restore for 11N */
180 if (!restore) {
181 /* set bit 19 to allow forcing of pcie core into L1 state */
182 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
183
184 /* Several PCIe massages to ensure proper behaviour */
185 if (ah->config.pcie_waen)
186 REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
187 }
188}
189
190/* Sets up the AR9003 hardware familiy callbacks */
191void ar9003_hw_attach_ops(struct ath_hw *ah)
192{
193 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
194 struct ath_hw_ops *ops = ath9k_hw_ops(ah);
195
196 priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
197 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
198 priv_ops->macversion_supported = ar9003_hw_macversion_supported;
199
200 ops->config_pci_powersave = ar9003_hw_configpcipowersave;
201
202 ar9003_hw_attach_phy_ops(ah);
203 ar9003_hw_attach_calib_ops(ah);
204 ar9003_hw_attach_mac_ops(ah);
205}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
new file mode 100644
index 000000000000..db019dd220b7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_initvals.h
@@ -0,0 +1,1784 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9003_H
18#define INITVALS_9003_H
19
20/* AR9003 2.0 */
21
22static const u32 ar9300_2p0_radio_postamble[][5] = {
23 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
24 {0x0001609c, 0x0dd08f29, 0x0dd08f29, 0x0b283f31, 0x0b283f31},
25 {0x000160ac, 0xa4653c00, 0xa4653c00, 0x24652800, 0x24652800},
26 {0x000160b0, 0x03284f3e, 0x03284f3e, 0x05d08f20, 0x05d08f20},
27 {0x0001610c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
28 {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
29 {0x0001650c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
30 {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
31 {0x0001690c, 0x08000000, 0x00000000, 0x00000000, 0x00000000},
32 {0x00016940, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
33};
34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p0[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
38 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
39 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
40 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
41 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
42 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
43 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
44 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
45 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
46 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
47 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
48 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
49 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
50 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
51 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
52 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
53 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
54 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
55 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
56 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
57 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
58 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
59 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
60 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
61 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
62 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
63 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
64 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
65 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
66 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
67 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
68 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
69 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
70 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
71 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
72 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
73 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
74 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
75 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
76 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
77 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
78 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
79 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
80 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
81 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
82 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
83 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
84 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
85 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
86 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
87 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
88 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
89 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
90 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
91 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
92 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
93 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
94 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
95 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
96 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
97 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
98 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
99 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
100 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
101 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
102 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
103 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
104 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
105 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
106 {0x00016448, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
107 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
108 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
109 {0x00016848, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
110 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
111};
112
113static const u32 ar9300Modes_fast_clock_2p0[][3] = {
114 /* Addr 5G_HT20 5G_HT40 */
115 {0x00001030, 0x00000268, 0x000004d0},
116 {0x00001070, 0x0000018c, 0x00000318},
117 {0x000010b0, 0x00000fd0, 0x00001fa0},
118 {0x00008014, 0x044c044c, 0x08980898},
119 {0x0000801c, 0x148ec02b, 0x148ec057},
120 {0x00008318, 0x000044c0, 0x00008980},
121 {0x00009e00, 0x03721821, 0x03721821},
122 {0x0000a230, 0x0000000b, 0x00000016},
123 {0x0000a254, 0x00000898, 0x00001130},
124};
125
126static const u32 ar9300_2p0_radio_core[][2] = {
127 /* Addr allmodes */
128 {0x00016000, 0x36db6db6},
129 {0x00016004, 0x6db6db40},
130 {0x00016008, 0x73f00000},
131 {0x0001600c, 0x00000000},
132 {0x00016040, 0x7f80fff8},
133 {0x0001604c, 0x76d005b5},
134 {0x00016050, 0x556cf031},
135 {0x00016054, 0x13449440},
136 {0x00016058, 0x0c51c92c},
137 {0x0001605c, 0x3db7fffc},
138 {0x00016060, 0xfffffffc},
139 {0x00016064, 0x000f0278},
140 {0x0001606c, 0x6db60000},
141 {0x00016080, 0x00000000},
142 {0x00016084, 0x0e48048c},
143 {0x00016088, 0x54214514},
144 {0x0001608c, 0x119f481e},
145 {0x00016090, 0x24926490},
146 {0x00016098, 0xd2888888},
147 {0x000160a0, 0x0a108ffe},
148 {0x000160a4, 0x812fc370},
149 {0x000160a8, 0x423c8000},
150 {0x000160b4, 0x92480080},
151 {0x000160c0, 0x00adb6d0},
152 {0x000160c4, 0x6db6db60},
153 {0x000160c8, 0x6db6db6c},
154 {0x000160cc, 0x01e6c000},
155 {0x00016100, 0x3fffbe01},
156 {0x00016104, 0xfff80000},
157 {0x00016108, 0x00080010},
158 {0x00016144, 0x02084080},
159 {0x00016148, 0x00000000},
160 {0x00016280, 0x058a0001},
161 {0x00016284, 0x3d840208},
162 {0x00016288, 0x05a20408},
163 {0x0001628c, 0x00038c07},
164 {0x00016290, 0x40000004},
165 {0x00016294, 0x458aa14f},
166 {0x00016380, 0x00000000},
167 {0x00016384, 0x00000000},
168 {0x00016388, 0x00800700},
169 {0x0001638c, 0x00800700},
170 {0x00016390, 0x00800700},
171 {0x00016394, 0x00000000},
172 {0x00016398, 0x00000000},
173 {0x0001639c, 0x00000000},
174 {0x000163a0, 0x00000001},
175 {0x000163a4, 0x00000001},
176 {0x000163a8, 0x00000000},
177 {0x000163ac, 0x00000000},
178 {0x000163b0, 0x00000000},
179 {0x000163b4, 0x00000000},
180 {0x000163b8, 0x00000000},
181 {0x000163bc, 0x00000000},
182 {0x000163c0, 0x000000a0},
183 {0x000163c4, 0x000c0000},
184 {0x000163c8, 0x14021402},
185 {0x000163cc, 0x00001402},
186 {0x000163d0, 0x00000000},
187 {0x000163d4, 0x00000000},
188 {0x00016400, 0x36db6db6},
189 {0x00016404, 0x6db6db40},
190 {0x00016408, 0x73f00000},
191 {0x0001640c, 0x00000000},
192 {0x00016440, 0x7f80fff8},
193 {0x0001644c, 0x76d005b5},
194 {0x00016450, 0x556cf031},
195 {0x00016454, 0x13449440},
196 {0x00016458, 0x0c51c92c},
197 {0x0001645c, 0x3db7fffc},
198 {0x00016460, 0xfffffffc},
199 {0x00016464, 0x000f0278},
200 {0x0001646c, 0x6db60000},
201 {0x00016500, 0x3fffbe01},
202 {0x00016504, 0xfff80000},
203 {0x00016508, 0x00080010},
204 {0x00016544, 0x02084080},
205 {0x00016548, 0x00000000},
206 {0x00016780, 0x00000000},
207 {0x00016784, 0x00000000},
208 {0x00016788, 0x00800700},
209 {0x0001678c, 0x00800700},
210 {0x00016790, 0x00800700},
211 {0x00016794, 0x00000000},
212 {0x00016798, 0x00000000},
213 {0x0001679c, 0x00000000},
214 {0x000167a0, 0x00000001},
215 {0x000167a4, 0x00000001},
216 {0x000167a8, 0x00000000},
217 {0x000167ac, 0x00000000},
218 {0x000167b0, 0x00000000},
219 {0x000167b4, 0x00000000},
220 {0x000167b8, 0x00000000},
221 {0x000167bc, 0x00000000},
222 {0x000167c0, 0x000000a0},
223 {0x000167c4, 0x000c0000},
224 {0x000167c8, 0x14021402},
225 {0x000167cc, 0x00001402},
226 {0x000167d0, 0x00000000},
227 {0x000167d4, 0x00000000},
228 {0x00016800, 0x36db6db6},
229 {0x00016804, 0x6db6db40},
230 {0x00016808, 0x73f00000},
231 {0x0001680c, 0x00000000},
232 {0x00016840, 0x7f80fff8},
233 {0x0001684c, 0x76d005b5},
234 {0x00016850, 0x556cf031},
235 {0x00016854, 0x13449440},
236 {0x00016858, 0x0c51c92c},
237 {0x0001685c, 0x3db7fffc},
238 {0x00016860, 0xfffffffc},
239 {0x00016864, 0x000f0278},
240 {0x0001686c, 0x6db60000},
241 {0x00016900, 0x3fffbe01},
242 {0x00016904, 0xfff80000},
243 {0x00016908, 0x00080010},
244 {0x00016944, 0x02084080},
245 {0x00016948, 0x00000000},
246 {0x00016b80, 0x00000000},
247 {0x00016b84, 0x00000000},
248 {0x00016b88, 0x00800700},
249 {0x00016b8c, 0x00800700},
250 {0x00016b90, 0x00800700},
251 {0x00016b94, 0x00000000},
252 {0x00016b98, 0x00000000},
253 {0x00016b9c, 0x00000000},
254 {0x00016ba0, 0x00000001},
255 {0x00016ba4, 0x00000001},
256 {0x00016ba8, 0x00000000},
257 {0x00016bac, 0x00000000},
258 {0x00016bb0, 0x00000000},
259 {0x00016bb4, 0x00000000},
260 {0x00016bb8, 0x00000000},
261 {0x00016bbc, 0x00000000},
262 {0x00016bc0, 0x000000a0},
263 {0x00016bc4, 0x000c0000},
264 {0x00016bc8, 0x14021402},
265 {0x00016bcc, 0x00001402},
266 {0x00016bd0, 0x00000000},
267 {0x00016bd4, 0x00000000},
268};
269
270static const u32 ar9300Common_rx_gain_table_merlin_2p0[][2] = {
271 /* Addr allmodes */
272 {0x0000a000, 0x02000101},
273 {0x0000a004, 0x02000102},
274 {0x0000a008, 0x02000103},
275 {0x0000a00c, 0x02000104},
276 {0x0000a010, 0x02000200},
277 {0x0000a014, 0x02000201},
278 {0x0000a018, 0x02000202},
279 {0x0000a01c, 0x02000203},
280 {0x0000a020, 0x02000204},
281 {0x0000a024, 0x02000205},
282 {0x0000a028, 0x02000208},
283 {0x0000a02c, 0x02000302},
284 {0x0000a030, 0x02000303},
285 {0x0000a034, 0x02000304},
286 {0x0000a038, 0x02000400},
287 {0x0000a03c, 0x02010300},
288 {0x0000a040, 0x02010301},
289 {0x0000a044, 0x02010302},
290 {0x0000a048, 0x02000500},
291 {0x0000a04c, 0x02010400},
292 {0x0000a050, 0x02020300},
293 {0x0000a054, 0x02020301},
294 {0x0000a058, 0x02020302},
295 {0x0000a05c, 0x02020303},
296 {0x0000a060, 0x02020400},
297 {0x0000a064, 0x02030300},
298 {0x0000a068, 0x02030301},
299 {0x0000a06c, 0x02030302},
300 {0x0000a070, 0x02030303},
301 {0x0000a074, 0x02030400},
302 {0x0000a078, 0x02040300},
303 {0x0000a07c, 0x02040301},
304 {0x0000a080, 0x02040302},
305 {0x0000a084, 0x02040303},
306 {0x0000a088, 0x02030500},
307 {0x0000a08c, 0x02040400},
308 {0x0000a090, 0x02050203},
309 {0x0000a094, 0x02050204},
310 {0x0000a098, 0x02050205},
311 {0x0000a09c, 0x02040500},
312 {0x0000a0a0, 0x02050301},
313 {0x0000a0a4, 0x02050302},
314 {0x0000a0a8, 0x02050303},
315 {0x0000a0ac, 0x02050400},
316 {0x0000a0b0, 0x02050401},
317 {0x0000a0b4, 0x02050402},
318 {0x0000a0b8, 0x02050403},
319 {0x0000a0bc, 0x02050500},
320 {0x0000a0c0, 0x02050501},
321 {0x0000a0c4, 0x02050502},
322 {0x0000a0c8, 0x02050503},
323 {0x0000a0cc, 0x02050504},
324 {0x0000a0d0, 0x02050600},
325 {0x0000a0d4, 0x02050601},
326 {0x0000a0d8, 0x02050602},
327 {0x0000a0dc, 0x02050603},
328 {0x0000a0e0, 0x02050604},
329 {0x0000a0e4, 0x02050700},
330 {0x0000a0e8, 0x02050701},
331 {0x0000a0ec, 0x02050702},
332 {0x0000a0f0, 0x02050703},
333 {0x0000a0f4, 0x02050704},
334 {0x0000a0f8, 0x02050705},
335 {0x0000a0fc, 0x02050708},
336 {0x0000a100, 0x02050709},
337 {0x0000a104, 0x0205070a},
338 {0x0000a108, 0x0205070b},
339 {0x0000a10c, 0x0205070c},
340 {0x0000a110, 0x0205070d},
341 {0x0000a114, 0x02050710},
342 {0x0000a118, 0x02050711},
343 {0x0000a11c, 0x02050712},
344 {0x0000a120, 0x02050713},
345 {0x0000a124, 0x02050714},
346 {0x0000a128, 0x02050715},
347 {0x0000a12c, 0x02050730},
348 {0x0000a130, 0x02050731},
349 {0x0000a134, 0x02050732},
350 {0x0000a138, 0x02050733},
351 {0x0000a13c, 0x02050734},
352 {0x0000a140, 0x02050735},
353 {0x0000a144, 0x02050750},
354 {0x0000a148, 0x02050751},
355 {0x0000a14c, 0x02050752},
356 {0x0000a150, 0x02050753},
357 {0x0000a154, 0x02050754},
358 {0x0000a158, 0x02050755},
359 {0x0000a15c, 0x02050770},
360 {0x0000a160, 0x02050771},
361 {0x0000a164, 0x02050772},
362 {0x0000a168, 0x02050773},
363 {0x0000a16c, 0x02050774},
364 {0x0000a170, 0x02050775},
365 {0x0000a174, 0x00000776},
366 {0x0000a178, 0x00000776},
367 {0x0000a17c, 0x00000776},
368 {0x0000a180, 0x00000776},
369 {0x0000a184, 0x00000776},
370 {0x0000a188, 0x00000776},
371 {0x0000a18c, 0x00000776},
372 {0x0000a190, 0x00000776},
373 {0x0000a194, 0x00000776},
374 {0x0000a198, 0x00000776},
375 {0x0000a19c, 0x00000776},
376 {0x0000a1a0, 0x00000776},
377 {0x0000a1a4, 0x00000776},
378 {0x0000a1a8, 0x00000776},
379 {0x0000a1ac, 0x00000776},
380 {0x0000a1b0, 0x00000776},
381 {0x0000a1b4, 0x00000776},
382 {0x0000a1b8, 0x00000776},
383 {0x0000a1bc, 0x00000776},
384 {0x0000a1c0, 0x00000776},
385 {0x0000a1c4, 0x00000776},
386 {0x0000a1c8, 0x00000776},
387 {0x0000a1cc, 0x00000776},
388 {0x0000a1d0, 0x00000776},
389 {0x0000a1d4, 0x00000776},
390 {0x0000a1d8, 0x00000776},
391 {0x0000a1dc, 0x00000776},
392 {0x0000a1e0, 0x00000776},
393 {0x0000a1e4, 0x00000776},
394 {0x0000a1e8, 0x00000776},
395 {0x0000a1ec, 0x00000776},
396 {0x0000a1f0, 0x00000776},
397 {0x0000a1f4, 0x00000776},
398 {0x0000a1f8, 0x00000776},
399 {0x0000a1fc, 0x00000776},
400 {0x0000b000, 0x02000101},
401 {0x0000b004, 0x02000102},
402 {0x0000b008, 0x02000103},
403 {0x0000b00c, 0x02000104},
404 {0x0000b010, 0x02000200},
405 {0x0000b014, 0x02000201},
406 {0x0000b018, 0x02000202},
407 {0x0000b01c, 0x02000203},
408 {0x0000b020, 0x02000204},
409 {0x0000b024, 0x02000205},
410 {0x0000b028, 0x02000208},
411 {0x0000b02c, 0x02000302},
412 {0x0000b030, 0x02000303},
413 {0x0000b034, 0x02000304},
414 {0x0000b038, 0x02000400},
415 {0x0000b03c, 0x02010300},
416 {0x0000b040, 0x02010301},
417 {0x0000b044, 0x02010302},
418 {0x0000b048, 0x02000500},
419 {0x0000b04c, 0x02010400},
420 {0x0000b050, 0x02020300},
421 {0x0000b054, 0x02020301},
422 {0x0000b058, 0x02020302},
423 {0x0000b05c, 0x02020303},
424 {0x0000b060, 0x02020400},
425 {0x0000b064, 0x02030300},
426 {0x0000b068, 0x02030301},
427 {0x0000b06c, 0x02030302},
428 {0x0000b070, 0x02030303},
429 {0x0000b074, 0x02030400},
430 {0x0000b078, 0x02040300},
431 {0x0000b07c, 0x02040301},
432 {0x0000b080, 0x02040302},
433 {0x0000b084, 0x02040303},
434 {0x0000b088, 0x02030500},
435 {0x0000b08c, 0x02040400},
436 {0x0000b090, 0x02050203},
437 {0x0000b094, 0x02050204},
438 {0x0000b098, 0x02050205},
439 {0x0000b09c, 0x02040500},
440 {0x0000b0a0, 0x02050301},
441 {0x0000b0a4, 0x02050302},
442 {0x0000b0a8, 0x02050303},
443 {0x0000b0ac, 0x02050400},
444 {0x0000b0b0, 0x02050401},
445 {0x0000b0b4, 0x02050402},
446 {0x0000b0b8, 0x02050403},
447 {0x0000b0bc, 0x02050500},
448 {0x0000b0c0, 0x02050501},
449 {0x0000b0c4, 0x02050502},
450 {0x0000b0c8, 0x02050503},
451 {0x0000b0cc, 0x02050504},
452 {0x0000b0d0, 0x02050600},
453 {0x0000b0d4, 0x02050601},
454 {0x0000b0d8, 0x02050602},
455 {0x0000b0dc, 0x02050603},
456 {0x0000b0e0, 0x02050604},
457 {0x0000b0e4, 0x02050700},
458 {0x0000b0e8, 0x02050701},
459 {0x0000b0ec, 0x02050702},
460 {0x0000b0f0, 0x02050703},
461 {0x0000b0f4, 0x02050704},
462 {0x0000b0f8, 0x02050705},
463 {0x0000b0fc, 0x02050708},
464 {0x0000b100, 0x02050709},
465 {0x0000b104, 0x0205070a},
466 {0x0000b108, 0x0205070b},
467 {0x0000b10c, 0x0205070c},
468 {0x0000b110, 0x0205070d},
469 {0x0000b114, 0x02050710},
470 {0x0000b118, 0x02050711},
471 {0x0000b11c, 0x02050712},
472 {0x0000b120, 0x02050713},
473 {0x0000b124, 0x02050714},
474 {0x0000b128, 0x02050715},
475 {0x0000b12c, 0x02050730},
476 {0x0000b130, 0x02050731},
477 {0x0000b134, 0x02050732},
478 {0x0000b138, 0x02050733},
479 {0x0000b13c, 0x02050734},
480 {0x0000b140, 0x02050735},
481 {0x0000b144, 0x02050750},
482 {0x0000b148, 0x02050751},
483 {0x0000b14c, 0x02050752},
484 {0x0000b150, 0x02050753},
485 {0x0000b154, 0x02050754},
486 {0x0000b158, 0x02050755},
487 {0x0000b15c, 0x02050770},
488 {0x0000b160, 0x02050771},
489 {0x0000b164, 0x02050772},
490 {0x0000b168, 0x02050773},
491 {0x0000b16c, 0x02050774},
492 {0x0000b170, 0x02050775},
493 {0x0000b174, 0x00000776},
494 {0x0000b178, 0x00000776},
495 {0x0000b17c, 0x00000776},
496 {0x0000b180, 0x00000776},
497 {0x0000b184, 0x00000776},
498 {0x0000b188, 0x00000776},
499 {0x0000b18c, 0x00000776},
500 {0x0000b190, 0x00000776},
501 {0x0000b194, 0x00000776},
502 {0x0000b198, 0x00000776},
503 {0x0000b19c, 0x00000776},
504 {0x0000b1a0, 0x00000776},
505 {0x0000b1a4, 0x00000776},
506 {0x0000b1a8, 0x00000776},
507 {0x0000b1ac, 0x00000776},
508 {0x0000b1b0, 0x00000776},
509 {0x0000b1b4, 0x00000776},
510 {0x0000b1b8, 0x00000776},
511 {0x0000b1bc, 0x00000776},
512 {0x0000b1c0, 0x00000776},
513 {0x0000b1c4, 0x00000776},
514 {0x0000b1c8, 0x00000776},
515 {0x0000b1cc, 0x00000776},
516 {0x0000b1d0, 0x00000776},
517 {0x0000b1d4, 0x00000776},
518 {0x0000b1d8, 0x00000776},
519 {0x0000b1dc, 0x00000776},
520 {0x0000b1e0, 0x00000776},
521 {0x0000b1e4, 0x00000776},
522 {0x0000b1e8, 0x00000776},
523 {0x0000b1ec, 0x00000776},
524 {0x0000b1f0, 0x00000776},
525 {0x0000b1f4, 0x00000776},
526 {0x0000b1f8, 0x00000776},
527 {0x0000b1fc, 0x00000776},
528};
529
530static const u32 ar9300_2p0_mac_postamble[][5] = {
531 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
532 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
533 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
534 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
535 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
536 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
537 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
538 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
539 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
540};
541
542static const u32 ar9300_2p0_soc_postamble[][5] = {
543 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
544 {0x00007010, 0x00000023, 0x00000023, 0x00000023, 0x00000023},
545};
546
547static const u32 ar9200_merlin_2p0_radio_core[][2] = {
548 /* Addr allmodes */
549 {0x00007800, 0x00040000},
550 {0x00007804, 0xdb005012},
551 {0x00007808, 0x04924914},
552 {0x0000780c, 0x21084210},
553 {0x00007810, 0x6d801300},
554 {0x00007814, 0x0019beff},
555 {0x00007818, 0x07e41000},
556 {0x0000781c, 0x00392000},
557 {0x00007820, 0x92592480},
558 {0x00007824, 0x00040000},
559 {0x00007828, 0xdb005012},
560 {0x0000782c, 0x04924914},
561 {0x00007830, 0x21084210},
562 {0x00007834, 0x6d801300},
563 {0x00007838, 0x0019beff},
564 {0x0000783c, 0x07e40000},
565 {0x00007840, 0x00392000},
566 {0x00007844, 0x92592480},
567 {0x00007848, 0x00100000},
568 {0x0000784c, 0x773f0567},
569 {0x00007850, 0x54214514},
570 {0x00007854, 0x12035828},
571 {0x00007858, 0x92592692},
572 {0x0000785c, 0x00000000},
573 {0x00007860, 0x56400000},
574 {0x00007864, 0x0a8e370e},
575 {0x00007868, 0xc0102850},
576 {0x0000786c, 0x812d4000},
577 {0x00007870, 0x807ec400},
578 {0x00007874, 0x001b6db0},
579 {0x00007878, 0x00376b63},
580 {0x0000787c, 0x06db6db6},
581 {0x00007880, 0x006d8000},
582 {0x00007884, 0xffeffffe},
583 {0x00007888, 0xffeffffe},
584 {0x0000788c, 0x00010000},
585 {0x00007890, 0x02060aeb},
586 {0x00007894, 0x5a108000},
587};
588
589static const u32 ar9300_2p0_baseband_postamble[][5] = {
590 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
591 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
592 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
593 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
594 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
595 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
596 {0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
597 {0x00009c00, 0x00000044, 0x000000c4, 0x000000c4, 0x00000044},
598 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
599 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
600 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
601 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
602 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
603 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
604 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
605 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
606 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
607 {0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
608 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
609 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
610 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
611 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
612 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
613 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
614 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
615 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
616 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
617 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
618 {0x0000a260, 0x0a021501, 0x0a021501, 0x3a021501, 0x3a021501},
619 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
620 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
621 {0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
622 {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
623 {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
624 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
625 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
626 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
627 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
628 {0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
629 {0x0000ae04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
630 {0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
631 {0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
632 {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
633 {0x0000b284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
634 {0x0000b830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
635 {0x0000be04, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
636 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
637 {0x0000be1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
638 {0x0000be20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
639 {0x0000c284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
640};
641
642static const u32 ar9300_2p0_baseband_core[][2] = {
643 /* Addr allmodes */
644 {0x00009800, 0xafe68e30},
645 {0x00009804, 0xfd14e000},
646 {0x00009808, 0x9c0a9f6b},
647 {0x0000980c, 0x04900000},
648 {0x00009814, 0x9280c00a},
649 {0x00009818, 0x00000000},
650 {0x0000981c, 0x00020028},
651 {0x00009834, 0x5f3ca3de},
652 {0x00009838, 0x0108ecff},
653 {0x0000983c, 0x14750600},
654 {0x00009880, 0x201fff00},
655 {0x00009884, 0x00001042},
656 {0x000098a4, 0x00200400},
657 {0x000098b0, 0x52440bbe},
658 {0x000098d0, 0x004b6a8e},
659 {0x000098d4, 0x00000820},
660 {0x000098dc, 0x00000000},
661 {0x000098f0, 0x00000000},
662 {0x000098f4, 0x00000000},
663 {0x00009c04, 0xff55ff55},
664 {0x00009c08, 0x0320ff55},
665 {0x00009c0c, 0x00000000},
666 {0x00009c10, 0x00000000},
667 {0x00009c14, 0x00046384},
668 {0x00009c18, 0x05b6b440},
669 {0x00009c1c, 0x00b6b440},
670 {0x00009d00, 0xc080a333},
671 {0x00009d04, 0x40206c10},
672 {0x00009d08, 0x009c4060},
673 {0x00009d0c, 0x9883800a},
674 {0x00009d10, 0x01834061},
675 {0x00009d14, 0x00c0040b},
676 {0x00009d18, 0x00000000},
677 {0x00009e08, 0x0038230c},
678 {0x00009e24, 0x990bb515},
679 {0x00009e28, 0x0c6f0000},
680 {0x00009e30, 0x06336f77},
681 {0x00009e34, 0x6af6532f},
682 {0x00009e38, 0x0cc80c00},
683 {0x00009e3c, 0xcf946222},
684 {0x00009e40, 0x0d261820},
685 {0x00009e4c, 0x00001004},
686 {0x00009e50, 0x00ff03f1},
687 {0x00009e54, 0x00000000},
688 {0x00009fc0, 0x803e4788},
689 {0x00009fc4, 0x0001efb5},
690 {0x00009fcc, 0x40000014},
691 {0x00009fd0, 0x01193b93},
692 {0x0000a20c, 0x00000000},
693 {0x0000a220, 0x00000000},
694 {0x0000a224, 0x00000000},
695 {0x0000a228, 0x10002310},
696 {0x0000a22c, 0x01036a1e},
697 {0x0000a234, 0x10000fff},
698 {0x0000a23c, 0x00000000},
699 {0x0000a244, 0x0c000000},
700 {0x0000a2a0, 0x00000001},
701 {0x0000a2c0, 0x00000001},
702 {0x0000a2c8, 0x00000000},
703 {0x0000a2cc, 0x18c43433},
704 {0x0000a2d4, 0x00000000},
705 {0x0000a2dc, 0x00000000},
706 {0x0000a2e0, 0x00000000},
707 {0x0000a2e4, 0x00000000},
708 {0x0000a2e8, 0x00000000},
709 {0x0000a2ec, 0x00000000},
710 {0x0000a2f0, 0x00000000},
711 {0x0000a2f4, 0x00000000},
712 {0x0000a2f8, 0x00000000},
713 {0x0000a344, 0x00000000},
714 {0x0000a34c, 0x00000000},
715 {0x0000a350, 0x0000a000},
716 {0x0000a364, 0x00000000},
717 {0x0000a370, 0x00000000},
718 {0x0000a390, 0x00000001},
719 {0x0000a394, 0x00000444},
720 {0x0000a398, 0x001f0e0f},
721 {0x0000a39c, 0x0075393f},
722 {0x0000a3a0, 0xb79f6427},
723 {0x0000a3a4, 0x00000000},
724 {0x0000a3a8, 0xaaaaaaaa},
725 {0x0000a3ac, 0x3c466478},
726 {0x0000a3c0, 0x20202020},
727 {0x0000a3c4, 0x22222220},
728 {0x0000a3c8, 0x20200020},
729 {0x0000a3cc, 0x20202020},
730 {0x0000a3d0, 0x20202020},
731 {0x0000a3d4, 0x20202020},
732 {0x0000a3d8, 0x20202020},
733 {0x0000a3dc, 0x20202020},
734 {0x0000a3e0, 0x20202020},
735 {0x0000a3e4, 0x20202020},
736 {0x0000a3e8, 0x20202020},
737 {0x0000a3ec, 0x20202020},
738 {0x0000a3f0, 0x00000000},
739 {0x0000a3f4, 0x00000246},
740 {0x0000a3f8, 0x0cdbd380},
741 {0x0000a3fc, 0x000f0f01},
742 {0x0000a400, 0x8fa91f01},
743 {0x0000a404, 0x00000000},
744 {0x0000a408, 0x0e79e5c6},
745 {0x0000a40c, 0x00820820},
746 {0x0000a414, 0x1ce739ce},
747 {0x0000a418, 0x2d001dce},
748 {0x0000a41c, 0x1ce739ce},
749 {0x0000a420, 0x000001ce},
750 {0x0000a424, 0x1ce739ce},
751 {0x0000a428, 0x000001ce},
752 {0x0000a42c, 0x1ce739ce},
753 {0x0000a430, 0x1ce739ce},
754 {0x0000a434, 0x00000000},
755 {0x0000a438, 0x00001801},
756 {0x0000a43c, 0x00000000},
757 {0x0000a440, 0x00000000},
758 {0x0000a444, 0x00000000},
759 {0x0000a448, 0x04000080},
760 {0x0000a44c, 0x00000001},
761 {0x0000a450, 0x00010000},
762 {0x0000a458, 0x00000000},
763 {0x0000a600, 0x00000000},
764 {0x0000a604, 0x00000000},
765 {0x0000a608, 0x00000000},
766 {0x0000a60c, 0x00000000},
767 {0x0000a610, 0x00000000},
768 {0x0000a614, 0x00000000},
769 {0x0000a618, 0x00000000},
770 {0x0000a61c, 0x00000000},
771 {0x0000a620, 0x00000000},
772 {0x0000a624, 0x00000000},
773 {0x0000a628, 0x00000000},
774 {0x0000a62c, 0x00000000},
775 {0x0000a630, 0x00000000},
776 {0x0000a634, 0x00000000},
777 {0x0000a638, 0x00000000},
778 {0x0000a63c, 0x00000000},
779 {0x0000a640, 0x00000000},
780 {0x0000a644, 0x3fad9d74},
781 {0x0000a648, 0x0048060a},
782 {0x0000a64c, 0x00000637},
783 {0x0000a670, 0x03020100},
784 {0x0000a674, 0x09080504},
785 {0x0000a678, 0x0d0c0b0a},
786 {0x0000a67c, 0x13121110},
787 {0x0000a680, 0x31301514},
788 {0x0000a684, 0x35343332},
789 {0x0000a688, 0x00000036},
790 {0x0000a690, 0x00000838},
791 {0x0000a7c0, 0x00000000},
792 {0x0000a7c4, 0xfffffffc},
793 {0x0000a7c8, 0x00000000},
794 {0x0000a7cc, 0x00000000},
795 {0x0000a7d0, 0x00000000},
796 {0x0000a7d4, 0x00000004},
797 {0x0000a7dc, 0x00000001},
798 {0x0000a8d0, 0x004b6a8e},
799 {0x0000a8d4, 0x00000820},
800 {0x0000a8dc, 0x00000000},
801 {0x0000a8f0, 0x00000000},
802 {0x0000a8f4, 0x00000000},
803 {0x0000b2d0, 0x00000080},
804 {0x0000b2d4, 0x00000000},
805 {0x0000b2dc, 0x00000000},
806 {0x0000b2e0, 0x00000000},
807 {0x0000b2e4, 0x00000000},
808 {0x0000b2e8, 0x00000000},
809 {0x0000b2ec, 0x00000000},
810 {0x0000b2f0, 0x00000000},
811 {0x0000b2f4, 0x00000000},
812 {0x0000b2f8, 0x00000000},
813 {0x0000b408, 0x0e79e5c0},
814 {0x0000b40c, 0x00820820},
815 {0x0000b420, 0x00000000},
816 {0x0000b8d0, 0x004b6a8e},
817 {0x0000b8d4, 0x00000820},
818 {0x0000b8dc, 0x00000000},
819 {0x0000b8f0, 0x00000000},
820 {0x0000b8f4, 0x00000000},
821 {0x0000c2d0, 0x00000080},
822 {0x0000c2d4, 0x00000000},
823 {0x0000c2dc, 0x00000000},
824 {0x0000c2e0, 0x00000000},
825 {0x0000c2e4, 0x00000000},
826 {0x0000c2e8, 0x00000000},
827 {0x0000c2ec, 0x00000000},
828 {0x0000c2f0, 0x00000000},
829 {0x0000c2f4, 0x00000000},
830 {0x0000c2f8, 0x00000000},
831 {0x0000c408, 0x0e79e5c0},
832 {0x0000c40c, 0x00820820},
833 {0x0000c420, 0x00000000},
834};
835
836static const u32 ar9300Modes_high_power_tx_gain_table_2p0[][5] = {
837 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
838 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
839 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
840 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
841 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
842 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
843 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
844 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
845 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
846 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
847 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
848 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
849 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
850 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
851 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
852 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
853 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
854 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
855 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
856 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
857 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
858 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
859 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
860 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
861 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
862 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
863 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
864 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
865 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
866 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
867 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
868 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
869 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
870 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
871 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
872 {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
873 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
874 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
875 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
876 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
877 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
878 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
879 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
880 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
881 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
882 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
883 {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
884 {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
885 {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
886 {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
887 {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
888 {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
889 {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
890 {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
891 {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
892 {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
893 {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
894 {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
895 {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
896 {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
897 {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
898 {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
899 {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
900 {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
901 {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
902 {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
903 {0x00016044, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
904 {0x00016048, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
905 {0x00016068, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
906 {0x00016444, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
907 {0x00016448, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
908 {0x00016468, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
909 {0x00016844, 0x056db2e6, 0x056db2e6, 0x056db2e6, 0x056db2e6},
910 {0x00016848, 0xae480001, 0xae480001, 0xae480001, 0xae480001},
911 {0x00016868, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c, 0x6eb6db6c},
912};
913
914static const u32 ar9300Modes_high_ob_db_tx_gain_table_2p0[][5] = {
915 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
916 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
917 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
918 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
919 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
920 {0x0000a50c, 0x0f022223, 0x0f022223, 0x0b000200, 0x0b000200},
921 {0x0000a510, 0x14022620, 0x14022620, 0x0f000202, 0x0f000202},
922 {0x0000a514, 0x18022622, 0x18022622, 0x11000400, 0x11000400},
923 {0x0000a518, 0x1b022822, 0x1b022822, 0x15000402, 0x15000402},
924 {0x0000a51c, 0x20022842, 0x20022842, 0x19000404, 0x19000404},
925 {0x0000a520, 0x22022c41, 0x22022c41, 0x1b000603, 0x1b000603},
926 {0x0000a524, 0x28023042, 0x28023042, 0x1f000a02, 0x1f000a02},
927 {0x0000a528, 0x2c023044, 0x2c023044, 0x23000a04, 0x23000a04},
928 {0x0000a52c, 0x2f023644, 0x2f023644, 0x26000a20, 0x26000a20},
929 {0x0000a530, 0x34025643, 0x34025643, 0x2a000e20, 0x2a000e20},
930 {0x0000a534, 0x38025a44, 0x38025a44, 0x2e000e22, 0x2e000e22},
931 {0x0000a538, 0x3b025e45, 0x3b025e45, 0x31000e24, 0x31000e24},
932 {0x0000a53c, 0x41025e4a, 0x41025e4a, 0x34001640, 0x34001640},
933 {0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
934 {0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
935 {0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
936 {0x0000a54c, 0x59025eb5, 0x59025eb5, 0x42001a83, 0x42001a83},
937 {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
938 {0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
939 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
940 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
941 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
942 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
943 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
944 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
945 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
946 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
947 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
948 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec},
949 {0x0000a580, 0x00802220, 0x00802220, 0x00800000, 0x00800000},
950 {0x0000a584, 0x06802223, 0x06802223, 0x04800002, 0x04800002},
951 {0x0000a588, 0x0a822220, 0x0a822220, 0x08800004, 0x08800004},
952 {0x0000a58c, 0x0f822223, 0x0f822223, 0x0b800200, 0x0b800200},
953 {0x0000a590, 0x14822620, 0x14822620, 0x0f800202, 0x0f800202},
954 {0x0000a594, 0x18822622, 0x18822622, 0x11800400, 0x11800400},
955 {0x0000a598, 0x1b822822, 0x1b822822, 0x15800402, 0x15800402},
956 {0x0000a59c, 0x20822842, 0x20822842, 0x19800404, 0x19800404},
957 {0x0000a5a0, 0x22822c41, 0x22822c41, 0x1b800603, 0x1b800603},
958 {0x0000a5a4, 0x28823042, 0x28823042, 0x1f800a02, 0x1f800a02},
959 {0x0000a5a8, 0x2c823044, 0x2c823044, 0x23800a04, 0x23800a04},
960 {0x0000a5ac, 0x2f823644, 0x2f823644, 0x26800a20, 0x26800a20},
961 {0x0000a5b0, 0x34825643, 0x34825643, 0x2a800e20, 0x2a800e20},
962 {0x0000a5b4, 0x38825a44, 0x38825a44, 0x2e800e22, 0x2e800e22},
963 {0x0000a5b8, 0x3b825e45, 0x3b825e45, 0x31800e24, 0x31800e24},
964 {0x0000a5bc, 0x41825e4a, 0x41825e4a, 0x34801640, 0x34801640},
965 {0x0000a5c0, 0x48825e6c, 0x48825e6c, 0x38801660, 0x38801660},
966 {0x0000a5c4, 0x4e825e8e, 0x4e825e8e, 0x3b801861, 0x3b801861},
967 {0x0000a5c8, 0x53825eb2, 0x53825eb2, 0x3e801a81, 0x3e801a81},
968 {0x0000a5cc, 0x59825eb5, 0x59825eb5, 0x42801a83, 0x42801a83},
969 {0x0000a5d0, 0x5f825ef6, 0x5f825ef6, 0x44801c84, 0x44801c84},
970 {0x0000a5d4, 0x62825f56, 0x62825f56, 0x48801ce3, 0x48801ce3},
971 {0x0000a5d8, 0x66827f56, 0x66827f56, 0x4c801ce5, 0x4c801ce5},
972 {0x0000a5dc, 0x6a829f56, 0x6a829f56, 0x50801ce9, 0x50801ce9},
973 {0x0000a5e0, 0x70849f56, 0x70849f56, 0x54801ceb, 0x54801ceb},
974 {0x0000a5e4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
975 {0x0000a5e8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
976 {0x0000a5ec, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
977 {0x0000a5f0, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
978 {0x0000a5f4, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
979 {0x0000a5f8, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
980 {0x0000a5fc, 0x7584ff56, 0x7584ff56, 0x56801eec, 0x56801eec},
981 {0x00016044, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
982 {0x00016048, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
983 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
984 {0x00016444, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
985 {0x00016448, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
986 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
987 {0x00016844, 0x056db2e4, 0x056db2e4, 0x056db2e4, 0x056db2e4},
988 {0x00016848, 0x8e480001, 0x8e480001, 0x8e480001, 0x8e480001},
989 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
990};
991
992static const u32 ar9300Common_rx_gain_table_2p0[][2] = {
993 /* Addr allmodes */
994 {0x0000a000, 0x00010000},
995 {0x0000a004, 0x00030002},
996 {0x0000a008, 0x00050004},
997 {0x0000a00c, 0x00810080},
998 {0x0000a010, 0x00830082},
999 {0x0000a014, 0x01810180},
1000 {0x0000a018, 0x01830182},
1001 {0x0000a01c, 0x01850184},
1002 {0x0000a020, 0x01890188},
1003 {0x0000a024, 0x018b018a},
1004 {0x0000a028, 0x018d018c},
1005 {0x0000a02c, 0x01910190},
1006 {0x0000a030, 0x01930192},
1007 {0x0000a034, 0x01950194},
1008 {0x0000a038, 0x038a0196},
1009 {0x0000a03c, 0x038c038b},
1010 {0x0000a040, 0x0390038d},
1011 {0x0000a044, 0x03920391},
1012 {0x0000a048, 0x03940393},
1013 {0x0000a04c, 0x03960395},
1014 {0x0000a050, 0x00000000},
1015 {0x0000a054, 0x00000000},
1016 {0x0000a058, 0x00000000},
1017 {0x0000a05c, 0x00000000},
1018 {0x0000a060, 0x00000000},
1019 {0x0000a064, 0x00000000},
1020 {0x0000a068, 0x00000000},
1021 {0x0000a06c, 0x00000000},
1022 {0x0000a070, 0x00000000},
1023 {0x0000a074, 0x00000000},
1024 {0x0000a078, 0x00000000},
1025 {0x0000a07c, 0x00000000},
1026 {0x0000a080, 0x22222229},
1027 {0x0000a084, 0x1d1d1d1d},
1028 {0x0000a088, 0x1d1d1d1d},
1029 {0x0000a08c, 0x1d1d1d1d},
1030 {0x0000a090, 0x171d1d1d},
1031 {0x0000a094, 0x11111717},
1032 {0x0000a098, 0x00030311},
1033 {0x0000a09c, 0x00000000},
1034 {0x0000a0a0, 0x00000000},
1035 {0x0000a0a4, 0x00000000},
1036 {0x0000a0a8, 0x00000000},
1037 {0x0000a0ac, 0x00000000},
1038 {0x0000a0b0, 0x00000000},
1039 {0x0000a0b4, 0x00000000},
1040 {0x0000a0b8, 0x00000000},
1041 {0x0000a0bc, 0x00000000},
1042 {0x0000a0c0, 0x001f0000},
1043 {0x0000a0c4, 0x01000101},
1044 {0x0000a0c8, 0x011e011f},
1045 {0x0000a0cc, 0x011c011d},
1046 {0x0000a0d0, 0x02030204},
1047 {0x0000a0d4, 0x02010202},
1048 {0x0000a0d8, 0x021f0200},
1049 {0x0000a0dc, 0x0302021e},
1050 {0x0000a0e0, 0x03000301},
1051 {0x0000a0e4, 0x031e031f},
1052 {0x0000a0e8, 0x0402031d},
1053 {0x0000a0ec, 0x04000401},
1054 {0x0000a0f0, 0x041e041f},
1055 {0x0000a0f4, 0x0502041d},
1056 {0x0000a0f8, 0x05000501},
1057 {0x0000a0fc, 0x051e051f},
1058 {0x0000a100, 0x06010602},
1059 {0x0000a104, 0x061f0600},
1060 {0x0000a108, 0x061d061e},
1061 {0x0000a10c, 0x07020703},
1062 {0x0000a110, 0x07000701},
1063 {0x0000a114, 0x00000000},
1064 {0x0000a118, 0x00000000},
1065 {0x0000a11c, 0x00000000},
1066 {0x0000a120, 0x00000000},
1067 {0x0000a124, 0x00000000},
1068 {0x0000a128, 0x00000000},
1069 {0x0000a12c, 0x00000000},
1070 {0x0000a130, 0x00000000},
1071 {0x0000a134, 0x00000000},
1072 {0x0000a138, 0x00000000},
1073 {0x0000a13c, 0x00000000},
1074 {0x0000a140, 0x001f0000},
1075 {0x0000a144, 0x01000101},
1076 {0x0000a148, 0x011e011f},
1077 {0x0000a14c, 0x011c011d},
1078 {0x0000a150, 0x02030204},
1079 {0x0000a154, 0x02010202},
1080 {0x0000a158, 0x021f0200},
1081 {0x0000a15c, 0x0302021e},
1082 {0x0000a160, 0x03000301},
1083 {0x0000a164, 0x031e031f},
1084 {0x0000a168, 0x0402031d},
1085 {0x0000a16c, 0x04000401},
1086 {0x0000a170, 0x041e041f},
1087 {0x0000a174, 0x0502041d},
1088 {0x0000a178, 0x05000501},
1089 {0x0000a17c, 0x051e051f},
1090 {0x0000a180, 0x06010602},
1091 {0x0000a184, 0x061f0600},
1092 {0x0000a188, 0x061d061e},
1093 {0x0000a18c, 0x07020703},
1094 {0x0000a190, 0x07000701},
1095 {0x0000a194, 0x00000000},
1096 {0x0000a198, 0x00000000},
1097 {0x0000a19c, 0x00000000},
1098 {0x0000a1a0, 0x00000000},
1099 {0x0000a1a4, 0x00000000},
1100 {0x0000a1a8, 0x00000000},
1101 {0x0000a1ac, 0x00000000},
1102 {0x0000a1b0, 0x00000000},
1103 {0x0000a1b4, 0x00000000},
1104 {0x0000a1b8, 0x00000000},
1105 {0x0000a1bc, 0x00000000},
1106 {0x0000a1c0, 0x00000000},
1107 {0x0000a1c4, 0x00000000},
1108 {0x0000a1c8, 0x00000000},
1109 {0x0000a1cc, 0x00000000},
1110 {0x0000a1d0, 0x00000000},
1111 {0x0000a1d4, 0x00000000},
1112 {0x0000a1d8, 0x00000000},
1113 {0x0000a1dc, 0x00000000},
1114 {0x0000a1e0, 0x00000000},
1115 {0x0000a1e4, 0x00000000},
1116 {0x0000a1e8, 0x00000000},
1117 {0x0000a1ec, 0x00000000},
1118 {0x0000a1f0, 0x00000396},
1119 {0x0000a1f4, 0x00000396},
1120 {0x0000a1f8, 0x00000396},
1121 {0x0000a1fc, 0x00000196},
1122 {0x0000b000, 0x00010000},
1123 {0x0000b004, 0x00030002},
1124 {0x0000b008, 0x00050004},
1125 {0x0000b00c, 0x00810080},
1126 {0x0000b010, 0x00830082},
1127 {0x0000b014, 0x01810180},
1128 {0x0000b018, 0x01830182},
1129 {0x0000b01c, 0x01850184},
1130 {0x0000b020, 0x02810280},
1131 {0x0000b024, 0x02830282},
1132 {0x0000b028, 0x02850284},
1133 {0x0000b02c, 0x02890288},
1134 {0x0000b030, 0x028b028a},
1135 {0x0000b034, 0x0388028c},
1136 {0x0000b038, 0x038a0389},
1137 {0x0000b03c, 0x038c038b},
1138 {0x0000b040, 0x0390038d},
1139 {0x0000b044, 0x03920391},
1140 {0x0000b048, 0x03940393},
1141 {0x0000b04c, 0x03960395},
1142 {0x0000b050, 0x00000000},
1143 {0x0000b054, 0x00000000},
1144 {0x0000b058, 0x00000000},
1145 {0x0000b05c, 0x00000000},
1146 {0x0000b060, 0x00000000},
1147 {0x0000b064, 0x00000000},
1148 {0x0000b068, 0x00000000},
1149 {0x0000b06c, 0x00000000},
1150 {0x0000b070, 0x00000000},
1151 {0x0000b074, 0x00000000},
1152 {0x0000b078, 0x00000000},
1153 {0x0000b07c, 0x00000000},
1154 {0x0000b080, 0x32323232},
1155 {0x0000b084, 0x2f2f3232},
1156 {0x0000b088, 0x23282a2d},
1157 {0x0000b08c, 0x1c1e2123},
1158 {0x0000b090, 0x14171919},
1159 {0x0000b094, 0x0e0e1214},
1160 {0x0000b098, 0x03050707},
1161 {0x0000b09c, 0x00030303},
1162 {0x0000b0a0, 0x00000000},
1163 {0x0000b0a4, 0x00000000},
1164 {0x0000b0a8, 0x00000000},
1165 {0x0000b0ac, 0x00000000},
1166 {0x0000b0b0, 0x00000000},
1167 {0x0000b0b4, 0x00000000},
1168 {0x0000b0b8, 0x00000000},
1169 {0x0000b0bc, 0x00000000},
1170 {0x0000b0c0, 0x003f0020},
1171 {0x0000b0c4, 0x00400041},
1172 {0x0000b0c8, 0x0140005f},
1173 {0x0000b0cc, 0x0160015f},
1174 {0x0000b0d0, 0x017e017f},
1175 {0x0000b0d4, 0x02410242},
1176 {0x0000b0d8, 0x025f0240},
1177 {0x0000b0dc, 0x027f0260},
1178 {0x0000b0e0, 0x0341027e},
1179 {0x0000b0e4, 0x035f0340},
1180 {0x0000b0e8, 0x037f0360},
1181 {0x0000b0ec, 0x04400441},
1182 {0x0000b0f0, 0x0460045f},
1183 {0x0000b0f4, 0x0541047f},
1184 {0x0000b0f8, 0x055f0540},
1185 {0x0000b0fc, 0x057f0560},
1186 {0x0000b100, 0x06400641},
1187 {0x0000b104, 0x0660065f},
1188 {0x0000b108, 0x067e067f},
1189 {0x0000b10c, 0x07410742},
1190 {0x0000b110, 0x075f0740},
1191 {0x0000b114, 0x077f0760},
1192 {0x0000b118, 0x07800781},
1193 {0x0000b11c, 0x07a0079f},
1194 {0x0000b120, 0x07c107bf},
1195 {0x0000b124, 0x000007c0},
1196 {0x0000b128, 0x00000000},
1197 {0x0000b12c, 0x00000000},
1198 {0x0000b130, 0x00000000},
1199 {0x0000b134, 0x00000000},
1200 {0x0000b138, 0x00000000},
1201 {0x0000b13c, 0x00000000},
1202 {0x0000b140, 0x003f0020},
1203 {0x0000b144, 0x00400041},
1204 {0x0000b148, 0x0140005f},
1205 {0x0000b14c, 0x0160015f},
1206 {0x0000b150, 0x017e017f},
1207 {0x0000b154, 0x02410242},
1208 {0x0000b158, 0x025f0240},
1209 {0x0000b15c, 0x027f0260},
1210 {0x0000b160, 0x0341027e},
1211 {0x0000b164, 0x035f0340},
1212 {0x0000b168, 0x037f0360},
1213 {0x0000b16c, 0x04400441},
1214 {0x0000b170, 0x0460045f},
1215 {0x0000b174, 0x0541047f},
1216 {0x0000b178, 0x055f0540},
1217 {0x0000b17c, 0x057f0560},
1218 {0x0000b180, 0x06400641},
1219 {0x0000b184, 0x0660065f},
1220 {0x0000b188, 0x067e067f},
1221 {0x0000b18c, 0x07410742},
1222 {0x0000b190, 0x075f0740},
1223 {0x0000b194, 0x077f0760},
1224 {0x0000b198, 0x07800781},
1225 {0x0000b19c, 0x07a0079f},
1226 {0x0000b1a0, 0x07c107bf},
1227 {0x0000b1a4, 0x000007c0},
1228 {0x0000b1a8, 0x00000000},
1229 {0x0000b1ac, 0x00000000},
1230 {0x0000b1b0, 0x00000000},
1231 {0x0000b1b4, 0x00000000},
1232 {0x0000b1b8, 0x00000000},
1233 {0x0000b1bc, 0x00000000},
1234 {0x0000b1c0, 0x00000000},
1235 {0x0000b1c4, 0x00000000},
1236 {0x0000b1c8, 0x00000000},
1237 {0x0000b1cc, 0x00000000},
1238 {0x0000b1d0, 0x00000000},
1239 {0x0000b1d4, 0x00000000},
1240 {0x0000b1d8, 0x00000000},
1241 {0x0000b1dc, 0x00000000},
1242 {0x0000b1e0, 0x00000000},
1243 {0x0000b1e4, 0x00000000},
1244 {0x0000b1e8, 0x00000000},
1245 {0x0000b1ec, 0x00000000},
1246 {0x0000b1f0, 0x00000396},
1247 {0x0000b1f4, 0x00000396},
1248 {0x0000b1f8, 0x00000396},
1249 {0x0000b1fc, 0x00000196},
1250};
1251
1252static const u32 ar9300Modes_low_ob_db_tx_gain_table_2p0[][5] = {
1253 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
1254 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
1255 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1256 {0x0000a504, 0x06000003, 0x06000003, 0x04000002, 0x04000002},
1257 {0x0000a508, 0x0a000020, 0x0a000020, 0x08000004, 0x08000004},
1258 {0x0000a50c, 0x10000023, 0x10000023, 0x0b000200, 0x0b000200},
1259 {0x0000a510, 0x16000220, 0x16000220, 0x0f000202, 0x0f000202},
1260 {0x0000a514, 0x1c000223, 0x1c000223, 0x12000400, 0x12000400},
1261 {0x0000a518, 0x21020220, 0x21020220, 0x16000402, 0x16000402},
1262 {0x0000a51c, 0x27020223, 0x27020223, 0x19000404, 0x19000404},
1263 {0x0000a520, 0x2b022220, 0x2b022220, 0x1c000603, 0x1c000603},
1264 {0x0000a524, 0x2f022222, 0x2f022222, 0x21000a02, 0x21000a02},
1265 {0x0000a528, 0x34022225, 0x34022225, 0x25000a04, 0x25000a04},
1266 {0x0000a52c, 0x3a02222a, 0x3a02222a, 0x28000a20, 0x28000a20},
1267 {0x0000a530, 0x3e02222c, 0x3e02222c, 0x2c000e20, 0x2c000e20},
1268 {0x0000a534, 0x4202242a, 0x4202242a, 0x30000e22, 0x30000e22},
1269 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
1270 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
1271 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
1272 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861},
1273 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81},
1274 {0x0000a54c, 0x5c04286b, 0x5c04286b, 0x47001a83, 0x47001a83},
1275 {0x0000a550, 0x61042a6c, 0x61042a6c, 0x4a001c84, 0x4a001c84},
1276 {0x0000a554, 0x66062a6c, 0x66062a6c, 0x4e001ce3, 0x4e001ce3},
1277 {0x0000a558, 0x6b062e6c, 0x6b062e6c, 0x52001ce5, 0x52001ce5},
1278 {0x0000a55c, 0x7006308c, 0x7006308c, 0x56001ce9, 0x56001ce9},
1279 {0x0000a560, 0x730a308a, 0x730a308a, 0x5a001ceb, 0x5a001ceb},
1280 {0x0000a564, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1281 {0x0000a568, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1282 {0x0000a56c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1283 {0x0000a570, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1284 {0x0000a574, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1285 {0x0000a578, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1286 {0x0000a57c, 0x770a308c, 0x770a308c, 0x5d001eec, 0x5d001eec},
1287 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
1288 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
1289 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
1290 {0x0000a58c, 0x10800023, 0x10800023, 0x0b800200, 0x0b800200},
1291 {0x0000a590, 0x16800220, 0x16800220, 0x0f800202, 0x0f800202},
1292 {0x0000a594, 0x1c800223, 0x1c800223, 0x12800400, 0x12800400},
1293 {0x0000a598, 0x21820220, 0x21820220, 0x16800402, 0x16800402},
1294 {0x0000a59c, 0x27820223, 0x27820223, 0x19800404, 0x19800404},
1295 {0x0000a5a0, 0x2b822220, 0x2b822220, 0x1c800603, 0x1c800603},
1296 {0x0000a5a4, 0x2f822222, 0x2f822222, 0x21800a02, 0x21800a02},
1297 {0x0000a5a8, 0x34822225, 0x34822225, 0x25800a04, 0x25800a04},
1298 {0x0000a5ac, 0x3a82222a, 0x3a82222a, 0x28800a20, 0x28800a20},
1299 {0x0000a5b0, 0x3e82222c, 0x3e82222c, 0x2c800e20, 0x2c800e20},
1300 {0x0000a5b4, 0x4282242a, 0x4282242a, 0x30800e22, 0x30800e22},
1301 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
1302 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
1303 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
1304 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861},
1305 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81},
1306 {0x0000a5cc, 0x5c84286b, 0x5c84286b, 0x47801a83, 0x47801a83},
1307 {0x0000a5d0, 0x61842a6c, 0x61842a6c, 0x4a801c84, 0x4a801c84},
1308 {0x0000a5d4, 0x66862a6c, 0x66862a6c, 0x4e801ce3, 0x4e801ce3},
1309 {0x0000a5d8, 0x6b862e6c, 0x6b862e6c, 0x52801ce5, 0x52801ce5},
1310 {0x0000a5dc, 0x7086308c, 0x7086308c, 0x56801ce9, 0x56801ce9},
1311 {0x0000a5e0, 0x738a308a, 0x738a308a, 0x5a801ceb, 0x5a801ceb},
1312 {0x0000a5e4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1313 {0x0000a5e8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1314 {0x0000a5ec, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1315 {0x0000a5f0, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1316 {0x0000a5f4, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1317 {0x0000a5f8, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1318 {0x0000a5fc, 0x778a308c, 0x778a308c, 0x5d801eec, 0x5d801eec},
1319 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1320 {0x00016048, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1321 {0x00016068, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1322 {0x00016444, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1323 {0x00016448, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1324 {0x00016468, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1325 {0x00016844, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
1326 {0x00016848, 0x64000001, 0x64000001, 0x64000001, 0x64000001},
1327 {0x00016868, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c, 0x6db6db6c},
1328};
1329
1330static const u32 ar9300_2p0_mac_core[][2] = {
1331 /* Addr allmodes */
1332 {0x00000008, 0x00000000},
1333 {0x00000030, 0x00020085},
1334 {0x00000034, 0x00000005},
1335 {0x00000040, 0x00000000},
1336 {0x00000044, 0x00000000},
1337 {0x00000048, 0x00000008},
1338 {0x0000004c, 0x00000010},
1339 {0x00000050, 0x00000000},
1340 {0x00001040, 0x002ffc0f},
1341 {0x00001044, 0x002ffc0f},
1342 {0x00001048, 0x002ffc0f},
1343 {0x0000104c, 0x002ffc0f},
1344 {0x00001050, 0x002ffc0f},
1345 {0x00001054, 0x002ffc0f},
1346 {0x00001058, 0x002ffc0f},
1347 {0x0000105c, 0x002ffc0f},
1348 {0x00001060, 0x002ffc0f},
1349 {0x00001064, 0x002ffc0f},
1350 {0x000010f0, 0x00000100},
1351 {0x00001270, 0x00000000},
1352 {0x000012b0, 0x00000000},
1353 {0x000012f0, 0x00000000},
1354 {0x0000143c, 0x00000000},
1355 {0x0000147c, 0x00000000},
1356 {0x00008000, 0x00000000},
1357 {0x00008004, 0x00000000},
1358 {0x00008008, 0x00000000},
1359 {0x0000800c, 0x00000000},
1360 {0x00008018, 0x00000000},
1361 {0x00008020, 0x00000000},
1362 {0x00008038, 0x00000000},
1363 {0x0000803c, 0x00000000},
1364 {0x00008040, 0x00000000},
1365 {0x00008044, 0x00000000},
1366 {0x00008048, 0x00000000},
1367 {0x0000804c, 0xffffffff},
1368 {0x00008054, 0x00000000},
1369 {0x00008058, 0x00000000},
1370 {0x0000805c, 0x000fc78f},
1371 {0x00008060, 0x0000000f},
1372 {0x00008064, 0x00000000},
1373 {0x00008070, 0x00000310},
1374 {0x00008074, 0x00000020},
1375 {0x00008078, 0x00000000},
1376 {0x0000809c, 0x0000000f},
1377 {0x000080a0, 0x00000000},
1378 {0x000080a4, 0x02ff0000},
1379 {0x000080a8, 0x0e070605},
1380 {0x000080ac, 0x0000000d},
1381 {0x000080b0, 0x00000000},
1382 {0x000080b4, 0x00000000},
1383 {0x000080b8, 0x00000000},
1384 {0x000080bc, 0x00000000},
1385 {0x000080c0, 0x2a800000},
1386 {0x000080c4, 0x06900168},
1387 {0x000080c8, 0x13881c20},
1388 {0x000080cc, 0x01f40000},
1389 {0x000080d0, 0x00252500},
1390 {0x000080d4, 0x00a00000},
1391 {0x000080d8, 0x00400000},
1392 {0x000080dc, 0x00000000},
1393 {0x000080e0, 0xffffffff},
1394 {0x000080e4, 0x0000ffff},
1395 {0x000080e8, 0x3f3f3f3f},
1396 {0x000080ec, 0x00000000},
1397 {0x000080f0, 0x00000000},
1398 {0x000080f4, 0x00000000},
1399 {0x000080fc, 0x00020000},
1400 {0x00008100, 0x00000000},
1401 {0x00008108, 0x00000052},
1402 {0x0000810c, 0x00000000},
1403 {0x00008110, 0x00000000},
1404 {0x00008114, 0x000007ff},
1405 {0x00008118, 0x000000aa},
1406 {0x0000811c, 0x00003210},
1407 {0x00008124, 0x00000000},
1408 {0x00008128, 0x00000000},
1409 {0x0000812c, 0x00000000},
1410 {0x00008130, 0x00000000},
1411 {0x00008134, 0x00000000},
1412 {0x00008138, 0x00000000},
1413 {0x0000813c, 0x0000ffff},
1414 {0x00008144, 0xffffffff},
1415 {0x00008168, 0x00000000},
1416 {0x0000816c, 0x00000000},
1417 {0x00008170, 0x18486200},
1418 {0x00008174, 0x33332210},
1419 {0x00008178, 0x00000000},
1420 {0x0000817c, 0x00020000},
1421 {0x000081c0, 0x00000000},
1422 {0x000081c4, 0x33332210},
1423 {0x000081c8, 0x00000000},
1424 {0x000081cc, 0x00000000},
1425 {0x000081d4, 0x00000000},
1426 {0x000081ec, 0x00000000},
1427 {0x000081f0, 0x00000000},
1428 {0x000081f4, 0x00000000},
1429 {0x000081f8, 0x00000000},
1430 {0x000081fc, 0x00000000},
1431 {0x00008240, 0x00100000},
1432 {0x00008244, 0x0010f424},
1433 {0x00008248, 0x00000800},
1434 {0x0000824c, 0x0001e848},
1435 {0x00008250, 0x00000000},
1436 {0x00008254, 0x00000000},
1437 {0x00008258, 0x00000000},
1438 {0x0000825c, 0x40000000},
1439 {0x00008260, 0x00080922},
1440 {0x00008264, 0x98a00010},
1441 {0x00008268, 0xffffffff},
1442 {0x0000826c, 0x0000ffff},
1443 {0x00008270, 0x00000000},
1444 {0x00008274, 0x40000000},
1445 {0x00008278, 0x003e4180},
1446 {0x0000827c, 0x00000004},
1447 {0x00008284, 0x0000002c},
1448 {0x00008288, 0x0000002c},
1449 {0x0000828c, 0x000000ff},
1450 {0x00008294, 0x00000000},
1451 {0x00008298, 0x00000000},
1452 {0x0000829c, 0x00000000},
1453 {0x00008300, 0x00000140},
1454 {0x00008314, 0x00000000},
1455 {0x0000831c, 0x0000010d},
1456 {0x00008328, 0x00000000},
1457 {0x0000832c, 0x00000007},
1458 {0x00008330, 0x00000302},
1459 {0x00008334, 0x00000700},
1460 {0x00008338, 0x00ff0000},
1461 {0x0000833c, 0x02400000},
1462 {0x00008340, 0x000107ff},
1463 {0x00008344, 0xaa48105b},
1464 {0x00008348, 0x008f0000},
1465 {0x0000835c, 0x00000000},
1466 {0x00008360, 0xffffffff},
1467 {0x00008364, 0xffffffff},
1468 {0x00008368, 0x00000000},
1469 {0x00008370, 0x00000000},
1470 {0x00008374, 0x000000ff},
1471 {0x00008378, 0x00000000},
1472 {0x0000837c, 0x00000000},
1473 {0x00008380, 0xffffffff},
1474 {0x00008384, 0xffffffff},
1475 {0x00008390, 0xffffffff},
1476 {0x00008394, 0xffffffff},
1477 {0x00008398, 0x00000000},
1478 {0x0000839c, 0x00000000},
1479 {0x000083a0, 0x00000000},
1480 {0x000083a4, 0x0000fa14},
1481 {0x000083a8, 0x000f0c00},
1482 {0x000083ac, 0x33332210},
1483 {0x000083b0, 0x33332210},
1484 {0x000083b4, 0x33332210},
1485 {0x000083b8, 0x33332210},
1486 {0x000083bc, 0x00000000},
1487 {0x000083c0, 0x00000000},
1488 {0x000083c4, 0x00000000},
1489 {0x000083c8, 0x00000000},
1490 {0x000083cc, 0x00000200},
1491 {0x000083d0, 0x000301ff},
1492};
1493
1494static const u32 ar9300Common_wo_xlna_rx_gain_table_2p0[][2] = {
1495 /* Addr allmodes */
1496 {0x0000a000, 0x00010000},
1497 {0x0000a004, 0x00030002},
1498 {0x0000a008, 0x00050004},
1499 {0x0000a00c, 0x00810080},
1500 {0x0000a010, 0x00830082},
1501 {0x0000a014, 0x01810180},
1502 {0x0000a018, 0x01830182},
1503 {0x0000a01c, 0x01850184},
1504 {0x0000a020, 0x01890188},
1505 {0x0000a024, 0x018b018a},
1506 {0x0000a028, 0x018d018c},
1507 {0x0000a02c, 0x03820190},
1508 {0x0000a030, 0x03840383},
1509 {0x0000a034, 0x03880385},
1510 {0x0000a038, 0x038a0389},
1511 {0x0000a03c, 0x038c038b},
1512 {0x0000a040, 0x0390038d},
1513 {0x0000a044, 0x03920391},
1514 {0x0000a048, 0x03940393},
1515 {0x0000a04c, 0x03960395},
1516 {0x0000a050, 0x00000000},
1517 {0x0000a054, 0x00000000},
1518 {0x0000a058, 0x00000000},
1519 {0x0000a05c, 0x00000000},
1520 {0x0000a060, 0x00000000},
1521 {0x0000a064, 0x00000000},
1522 {0x0000a068, 0x00000000},
1523 {0x0000a06c, 0x00000000},
1524 {0x0000a070, 0x00000000},
1525 {0x0000a074, 0x00000000},
1526 {0x0000a078, 0x00000000},
1527 {0x0000a07c, 0x00000000},
1528 {0x0000a080, 0x29292929},
1529 {0x0000a084, 0x29292929},
1530 {0x0000a088, 0x29292929},
1531 {0x0000a08c, 0x29292929},
1532 {0x0000a090, 0x22292929},
1533 {0x0000a094, 0x1d1d2222},
1534 {0x0000a098, 0x0c111117},
1535 {0x0000a09c, 0x00030303},
1536 {0x0000a0a0, 0x00000000},
1537 {0x0000a0a4, 0x00000000},
1538 {0x0000a0a8, 0x00000000},
1539 {0x0000a0ac, 0x00000000},
1540 {0x0000a0b0, 0x00000000},
1541 {0x0000a0b4, 0x00000000},
1542 {0x0000a0b8, 0x00000000},
1543 {0x0000a0bc, 0x00000000},
1544 {0x0000a0c0, 0x001f0000},
1545 {0x0000a0c4, 0x01000101},
1546 {0x0000a0c8, 0x011e011f},
1547 {0x0000a0cc, 0x011c011d},
1548 {0x0000a0d0, 0x02030204},
1549 {0x0000a0d4, 0x02010202},
1550 {0x0000a0d8, 0x021f0200},
1551 {0x0000a0dc, 0x0302021e},
1552 {0x0000a0e0, 0x03000301},
1553 {0x0000a0e4, 0x031e031f},
1554 {0x0000a0e8, 0x0402031d},
1555 {0x0000a0ec, 0x04000401},
1556 {0x0000a0f0, 0x041e041f},
1557 {0x0000a0f4, 0x0502041d},
1558 {0x0000a0f8, 0x05000501},
1559 {0x0000a0fc, 0x051e051f},
1560 {0x0000a100, 0x06010602},
1561 {0x0000a104, 0x061f0600},
1562 {0x0000a108, 0x061d061e},
1563 {0x0000a10c, 0x07020703},
1564 {0x0000a110, 0x07000701},
1565 {0x0000a114, 0x00000000},
1566 {0x0000a118, 0x00000000},
1567 {0x0000a11c, 0x00000000},
1568 {0x0000a120, 0x00000000},
1569 {0x0000a124, 0x00000000},
1570 {0x0000a128, 0x00000000},
1571 {0x0000a12c, 0x00000000},
1572 {0x0000a130, 0x00000000},
1573 {0x0000a134, 0x00000000},
1574 {0x0000a138, 0x00000000},
1575 {0x0000a13c, 0x00000000},
1576 {0x0000a140, 0x001f0000},
1577 {0x0000a144, 0x01000101},
1578 {0x0000a148, 0x011e011f},
1579 {0x0000a14c, 0x011c011d},
1580 {0x0000a150, 0x02030204},
1581 {0x0000a154, 0x02010202},
1582 {0x0000a158, 0x021f0200},
1583 {0x0000a15c, 0x0302021e},
1584 {0x0000a160, 0x03000301},
1585 {0x0000a164, 0x031e031f},
1586 {0x0000a168, 0x0402031d},
1587 {0x0000a16c, 0x04000401},
1588 {0x0000a170, 0x041e041f},
1589 {0x0000a174, 0x0502041d},
1590 {0x0000a178, 0x05000501},
1591 {0x0000a17c, 0x051e051f},
1592 {0x0000a180, 0x06010602},
1593 {0x0000a184, 0x061f0600},
1594 {0x0000a188, 0x061d061e},
1595 {0x0000a18c, 0x07020703},
1596 {0x0000a190, 0x07000701},
1597 {0x0000a194, 0x00000000},
1598 {0x0000a198, 0x00000000},
1599 {0x0000a19c, 0x00000000},
1600 {0x0000a1a0, 0x00000000},
1601 {0x0000a1a4, 0x00000000},
1602 {0x0000a1a8, 0x00000000},
1603 {0x0000a1ac, 0x00000000},
1604 {0x0000a1b0, 0x00000000},
1605 {0x0000a1b4, 0x00000000},
1606 {0x0000a1b8, 0x00000000},
1607 {0x0000a1bc, 0x00000000},
1608 {0x0000a1c0, 0x00000000},
1609 {0x0000a1c4, 0x00000000},
1610 {0x0000a1c8, 0x00000000},
1611 {0x0000a1cc, 0x00000000},
1612 {0x0000a1d0, 0x00000000},
1613 {0x0000a1d4, 0x00000000},
1614 {0x0000a1d8, 0x00000000},
1615 {0x0000a1dc, 0x00000000},
1616 {0x0000a1e0, 0x00000000},
1617 {0x0000a1e4, 0x00000000},
1618 {0x0000a1e8, 0x00000000},
1619 {0x0000a1ec, 0x00000000},
1620 {0x0000a1f0, 0x00000396},
1621 {0x0000a1f4, 0x00000396},
1622 {0x0000a1f8, 0x00000396},
1623 {0x0000a1fc, 0x00000196},
1624 {0x0000b000, 0x00010000},
1625 {0x0000b004, 0x00030002},
1626 {0x0000b008, 0x00050004},
1627 {0x0000b00c, 0x00810080},
1628 {0x0000b010, 0x00830082},
1629 {0x0000b014, 0x01810180},
1630 {0x0000b018, 0x01830182},
1631 {0x0000b01c, 0x01850184},
1632 {0x0000b020, 0x02810280},
1633 {0x0000b024, 0x02830282},
1634 {0x0000b028, 0x02850284},
1635 {0x0000b02c, 0x02890288},
1636 {0x0000b030, 0x028b028a},
1637 {0x0000b034, 0x0388028c},
1638 {0x0000b038, 0x038a0389},
1639 {0x0000b03c, 0x038c038b},
1640 {0x0000b040, 0x0390038d},
1641 {0x0000b044, 0x03920391},
1642 {0x0000b048, 0x03940393},
1643 {0x0000b04c, 0x03960395},
1644 {0x0000b050, 0x00000000},
1645 {0x0000b054, 0x00000000},
1646 {0x0000b058, 0x00000000},
1647 {0x0000b05c, 0x00000000},
1648 {0x0000b060, 0x00000000},
1649 {0x0000b064, 0x00000000},
1650 {0x0000b068, 0x00000000},
1651 {0x0000b06c, 0x00000000},
1652 {0x0000b070, 0x00000000},
1653 {0x0000b074, 0x00000000},
1654 {0x0000b078, 0x00000000},
1655 {0x0000b07c, 0x00000000},
1656 {0x0000b080, 0x32323232},
1657 {0x0000b084, 0x2f2f3232},
1658 {0x0000b088, 0x23282a2d},
1659 {0x0000b08c, 0x1c1e2123},
1660 {0x0000b090, 0x14171919},
1661 {0x0000b094, 0x0e0e1214},
1662 {0x0000b098, 0x03050707},
1663 {0x0000b09c, 0x00030303},
1664 {0x0000b0a0, 0x00000000},
1665 {0x0000b0a4, 0x00000000},
1666 {0x0000b0a8, 0x00000000},
1667 {0x0000b0ac, 0x00000000},
1668 {0x0000b0b0, 0x00000000},
1669 {0x0000b0b4, 0x00000000},
1670 {0x0000b0b8, 0x00000000},
1671 {0x0000b0bc, 0x00000000},
1672 {0x0000b0c0, 0x003f0020},
1673 {0x0000b0c4, 0x00400041},
1674 {0x0000b0c8, 0x0140005f},
1675 {0x0000b0cc, 0x0160015f},
1676 {0x0000b0d0, 0x017e017f},
1677 {0x0000b0d4, 0x02410242},
1678 {0x0000b0d8, 0x025f0240},
1679 {0x0000b0dc, 0x027f0260},
1680 {0x0000b0e0, 0x0341027e},
1681 {0x0000b0e4, 0x035f0340},
1682 {0x0000b0e8, 0x037f0360},
1683 {0x0000b0ec, 0x04400441},
1684 {0x0000b0f0, 0x0460045f},
1685 {0x0000b0f4, 0x0541047f},
1686 {0x0000b0f8, 0x055f0540},
1687 {0x0000b0fc, 0x057f0560},
1688 {0x0000b100, 0x06400641},
1689 {0x0000b104, 0x0660065f},
1690 {0x0000b108, 0x067e067f},
1691 {0x0000b10c, 0x07410742},
1692 {0x0000b110, 0x075f0740},
1693 {0x0000b114, 0x077f0760},
1694 {0x0000b118, 0x07800781},
1695 {0x0000b11c, 0x07a0079f},
1696 {0x0000b120, 0x07c107bf},
1697 {0x0000b124, 0x000007c0},
1698 {0x0000b128, 0x00000000},
1699 {0x0000b12c, 0x00000000},
1700 {0x0000b130, 0x00000000},
1701 {0x0000b134, 0x00000000},
1702 {0x0000b138, 0x00000000},
1703 {0x0000b13c, 0x00000000},
1704 {0x0000b140, 0x003f0020},
1705 {0x0000b144, 0x00400041},
1706 {0x0000b148, 0x0140005f},
1707 {0x0000b14c, 0x0160015f},
1708 {0x0000b150, 0x017e017f},
1709 {0x0000b154, 0x02410242},
1710 {0x0000b158, 0x025f0240},
1711 {0x0000b15c, 0x027f0260},
1712 {0x0000b160, 0x0341027e},
1713 {0x0000b164, 0x035f0340},
1714 {0x0000b168, 0x037f0360},
1715 {0x0000b16c, 0x04400441},
1716 {0x0000b170, 0x0460045f},
1717 {0x0000b174, 0x0541047f},
1718 {0x0000b178, 0x055f0540},
1719 {0x0000b17c, 0x057f0560},
1720 {0x0000b180, 0x06400641},
1721 {0x0000b184, 0x0660065f},
1722 {0x0000b188, 0x067e067f},
1723 {0x0000b18c, 0x07410742},
1724 {0x0000b190, 0x075f0740},
1725 {0x0000b194, 0x077f0760},
1726 {0x0000b198, 0x07800781},
1727 {0x0000b19c, 0x07a0079f},
1728 {0x0000b1a0, 0x07c107bf},
1729 {0x0000b1a4, 0x000007c0},
1730 {0x0000b1a8, 0x00000000},
1731 {0x0000b1ac, 0x00000000},
1732 {0x0000b1b0, 0x00000000},
1733 {0x0000b1b4, 0x00000000},
1734 {0x0000b1b8, 0x00000000},
1735 {0x0000b1bc, 0x00000000},
1736 {0x0000b1c0, 0x00000000},
1737 {0x0000b1c4, 0x00000000},
1738 {0x0000b1c8, 0x00000000},
1739 {0x0000b1cc, 0x00000000},
1740 {0x0000b1d0, 0x00000000},
1741 {0x0000b1d4, 0x00000000},
1742 {0x0000b1d8, 0x00000000},
1743 {0x0000b1dc, 0x00000000},
1744 {0x0000b1e0, 0x00000000},
1745 {0x0000b1e4, 0x00000000},
1746 {0x0000b1e8, 0x00000000},
1747 {0x0000b1ec, 0x00000000},
1748 {0x0000b1f0, 0x00000396},
1749 {0x0000b1f4, 0x00000396},
1750 {0x0000b1f8, 0x00000396},
1751 {0x0000b1fc, 0x00000196},
1752};
1753
1754static const u32 ar9300_2p0_soc_preamble[][2] = {
1755 /* Addr allmodes */
1756 {0x000040a4, 0x00a0c1c9},
1757 {0x00007008, 0x00000000},
1758 {0x00007020, 0x00000000},
1759 {0x00007034, 0x00000002},
1760 {0x00007038, 0x000004c2},
1761};
1762
1763static const u32 ar9300PciePhy_pll_on_clkreq_disable_L1_2p0[][2] = {
1764 /* Addr allmodes */
1765 {0x00004040, 0x08212e5e},
1766 {0x00004040, 0x0008003b},
1767 {0x00004044, 0x00000000},
1768};
1769
1770static const u32 ar9300PciePhy_clkreq_enable_L1_2p0[][2] = {
1771 /* Addr allmodes */
1772 {0x00004040, 0x08253e5e},
1773 {0x00004040, 0x0008003b},
1774 {0x00004044, 0x00000000},
1775};
1776
1777static const u32 ar9300PciePhy_clkreq_disable_L1_2p0[][2] = {
1778 /* Addr allmodes */
1779 {0x00004040, 0x08213e5e},
1780 {0x00004040, 0x0008003b},
1781 {0x00004044, 0x00000000},
1782};
1783
1784#endif /* INITVALS_9003_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
new file mode 100644
index 000000000000..37ba37481a47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -0,0 +1,614 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#include "hw.h"
17#include "ar9003_mac.h"
18
19static void ar9003_hw_rx_enable(struct ath_hw *hw)
20{
21 REG_WRITE(hw, AR_CR, 0);
22}
23
24static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
25{
26 int checksum;
27
28 checksum = ads->info + ads->link
29 + ads->data0 + ads->ctl3
30 + ads->data1 + ads->ctl5
31 + ads->data2 + ads->ctl7
32 + ads->data3 + ads->ctl9;
33
34 return ((checksum & 0xffff) + (checksum >> 16)) & AR_TxPtrChkSum;
35}
36
37static void ar9003_hw_set_desc_link(void *ds, u32 ds_link)
38{
39 struct ar9003_txc *ads = ds;
40
41 ads->link = ds_link;
42 ads->ctl10 &= ~AR_TxPtrChkSum;
43 ads->ctl10 |= ar9003_calc_ptr_chksum(ads);
44}
45
46static void ar9003_hw_get_desc_link(void *ds, u32 **ds_link)
47{
48 struct ar9003_txc *ads = ds;
49
50 *ds_link = &ads->link;
51}
52
53static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
54{
55 u32 isr = 0;
56 u32 mask2 = 0;
57 struct ath9k_hw_capabilities *pCap = &ah->caps;
58 u32 sync_cause = 0;
59 struct ath_common *common = ath9k_hw_common(ah);
60
61 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
62 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
63 == AR_RTC_STATUS_ON)
64 isr = REG_READ(ah, AR_ISR);
65 }
66
67 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
68
69 *masked = 0;
70
71 if (!isr && !sync_cause)
72 return false;
73
74 if (isr) {
75 if (isr & AR_ISR_BCNMISC) {
76 u32 isr2;
77 isr2 = REG_READ(ah, AR_ISR_S2);
78
79 mask2 |= ((isr2 & AR_ISR_S2_TIM) >>
80 MAP_ISR_S2_TIM);
81 mask2 |= ((isr2 & AR_ISR_S2_DTIM) >>
82 MAP_ISR_S2_DTIM);
83 mask2 |= ((isr2 & AR_ISR_S2_DTIMSYNC) >>
84 MAP_ISR_S2_DTIMSYNC);
85 mask2 |= ((isr2 & AR_ISR_S2_CABEND) >>
86 MAP_ISR_S2_CABEND);
87 mask2 |= ((isr2 & AR_ISR_S2_GTT) <<
88 MAP_ISR_S2_GTT);
89 mask2 |= ((isr2 & AR_ISR_S2_CST) <<
90 MAP_ISR_S2_CST);
91 mask2 |= ((isr2 & AR_ISR_S2_TSFOOR) >>
92 MAP_ISR_S2_TSFOOR);
93
94 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
95 REG_WRITE(ah, AR_ISR_S2, isr2);
96 isr &= ~AR_ISR_BCNMISC;
97 }
98 }
99
100 if ((pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED))
101 isr = REG_READ(ah, AR_ISR_RAC);
102
103 if (isr == 0xffffffff) {
104 *masked = 0;
105 return false;
106 }
107
108 *masked = isr & ATH9K_INT_COMMON;
109
110 if (ah->config.rx_intr_mitigation)
111 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
112 *masked |= ATH9K_INT_RXLP;
113
114 if (ah->config.tx_intr_mitigation)
115 if (isr & (AR_ISR_TXMINTR | AR_ISR_TXINTM))
116 *masked |= ATH9K_INT_TX;
117
118 if (isr & (AR_ISR_LP_RXOK | AR_ISR_RXERR))
119 *masked |= ATH9K_INT_RXLP;
120
121 if (isr & AR_ISR_HP_RXOK)
122 *masked |= ATH9K_INT_RXHP;
123
124 if (isr & (AR_ISR_TXOK | AR_ISR_TXERR | AR_ISR_TXEOL)) {
125 *masked |= ATH9K_INT_TX;
126
127 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
128 u32 s0, s1;
129 s0 = REG_READ(ah, AR_ISR_S0);
130 REG_WRITE(ah, AR_ISR_S0, s0);
131 s1 = REG_READ(ah, AR_ISR_S1);
132 REG_WRITE(ah, AR_ISR_S1, s1);
133
134 isr &= ~(AR_ISR_TXOK | AR_ISR_TXERR |
135 AR_ISR_TXEOL);
136 }
137 }
138
139 if (isr & AR_ISR_GENTMR) {
140 u32 s5;
141
142 if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
143 s5 = REG_READ(ah, AR_ISR_S5_S);
144 else
145 s5 = REG_READ(ah, AR_ISR_S5);
146
147 ah->intr_gen_timer_trigger =
148 MS(s5, AR_ISR_S5_GENTIMER_TRIG);
149
150 ah->intr_gen_timer_thresh =
151 MS(s5, AR_ISR_S5_GENTIMER_THRESH);
152
153 if (ah->intr_gen_timer_trigger)
154 *masked |= ATH9K_INT_GENTIMER;
155
156 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
157 REG_WRITE(ah, AR_ISR_S5, s5);
158 isr &= ~AR_ISR_GENTMR;
159 }
160
161 }
162
163 *masked |= mask2;
164
165 if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
166 REG_WRITE(ah, AR_ISR, isr);
167
168 (void) REG_READ(ah, AR_ISR);
169 }
170 }
171
172 if (sync_cause) {
173 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
174 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
175 REG_WRITE(ah, AR_RC, 0);
176 *masked |= ATH9K_INT_FATAL;
177 }
178
179 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
180 ath_print(common, ATH_DBG_INTERRUPT,
181 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
182
183 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
184 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
185
186 }
187 return true;
188}
189
190static void ar9003_hw_fill_txdesc(struct ath_hw *ah, void *ds, u32 seglen,
191 bool is_firstseg, bool is_lastseg,
192 const void *ds0, dma_addr_t buf_addr,
193 unsigned int qcu)
194{
195 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
196 unsigned int descid = 0;
197
198 ads->info = (ATHEROS_VENDOR_ID << AR_DescId_S) |
199 (1 << AR_TxRxDesc_S) |
200 (1 << AR_CtrlStat_S) |
201 (qcu << AR_TxQcuNum_S) | 0x17;
202
203 ads->data0 = buf_addr;
204 ads->data1 = 0;
205 ads->data2 = 0;
206 ads->data3 = 0;
207
208 ads->ctl3 = (seglen << AR_BufLen_S);
209 ads->ctl3 &= AR_BufLen;
210
211 /* Fill in pointer checksum and descriptor id */
212 ads->ctl10 = ar9003_calc_ptr_chksum(ads);
213 ads->ctl10 |= (descid << AR_TxDescId_S);
214
215 if (is_firstseg) {
216 ads->ctl12 |= (is_lastseg ? 0 : AR_TxMore);
217 } else if (is_lastseg) {
218 ads->ctl11 = 0;
219 ads->ctl12 = 0;
220 ads->ctl13 = AR9003TXC_CONST(ds0)->ctl13;
221 ads->ctl14 = AR9003TXC_CONST(ds0)->ctl14;
222 } else {
223 /* XXX Intermediate descriptor in a multi-descriptor frame.*/
224 ads->ctl11 = 0;
225 ads->ctl12 = AR_TxMore;
226 ads->ctl13 = 0;
227 ads->ctl14 = 0;
228 }
229}
230
231static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
232 struct ath_tx_status *ts)
233{
234 struct ar9003_txs *ads;
235
236 ads = &ah->ts_ring[ah->ts_tail];
237
238 if ((ads->status8 & AR_TxDone) == 0)
239 return -EINPROGRESS;
240
241 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
242
243 if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
244 (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
245 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
246 "Tx Descriptor error %x\n", ads->ds_info);
247 memset(ads, 0, sizeof(*ads));
248 return -EIO;
249 }
250
251 ts->qid = MS(ads->ds_info, AR_TxQcuNum);
252 ts->desc_id = MS(ads->status1, AR_TxDescId);
253 ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
254 ts->ts_tstamp = ads->status4;
255 ts->ts_status = 0;
256 ts->ts_flags = 0;
257
258 if (ads->status3 & AR_ExcessiveRetries)
259 ts->ts_status |= ATH9K_TXERR_XRETRY;
260 if (ads->status3 & AR_Filtered)
261 ts->ts_status |= ATH9K_TXERR_FILT;
262 if (ads->status3 & AR_FIFOUnderrun) {
263 ts->ts_status |= ATH9K_TXERR_FIFO;
264 ath9k_hw_updatetxtriglevel(ah, true);
265 }
266 if (ads->status8 & AR_TxOpExceeded)
267 ts->ts_status |= ATH9K_TXERR_XTXOP;
268 if (ads->status3 & AR_TxTimerExpired)
269 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
270
271 if (ads->status3 & AR_DescCfgErr)
272 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
273 if (ads->status3 & AR_TxDataUnderrun) {
274 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
275 ath9k_hw_updatetxtriglevel(ah, true);
276 }
277 if (ads->status3 & AR_TxDelimUnderrun) {
278 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
279 ath9k_hw_updatetxtriglevel(ah, true);
280 }
281 if (ads->status2 & AR_TxBaStatus) {
282 ts->ts_flags |= ATH9K_TX_BA;
283 ts->ba_low = ads->status5;
284 ts->ba_high = ads->status6;
285 }
286
287 ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx);
288
289 ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined);
290 ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00);
291 ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01);
292 ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
293 ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
294 ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
295 ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
296 ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
297 ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
298 ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
299 ts->ts_antenna = 0;
300
301 ts->tid = MS(ads->status8, AR_TxTid);
302
303 memset(ads, 0, sizeof(*ads));
304
305 return 0;
306}
307
308static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
309 u32 pktlen, enum ath9k_pkt_type type, u32 txpower,
310 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
311{
312 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
313
314 if (txpower > ah->txpower_limit)
315 txpower = ah->txpower_limit;
316
317 txpower += ah->txpower_indexoffset;
318 if (txpower > 63)
319 txpower = 63;
320
321 ads->ctl11 = (pktlen & AR_FrameLen)
322 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
323 | SM(txpower, AR_XmitPower)
324 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
325 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
326 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0)
327 | (flags & ATH9K_TXDESC_LOWRXCHAIN ? AR_LowRxChain : 0);
328
329 ads->ctl12 =
330 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
331 | SM(type, AR_FrameType)
332 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
333 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
334 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
335
336 ads->ctl17 = SM(keyType, AR_EncrType) |
337 (flags & ATH9K_TXDESC_LDPC ? AR_LDPC : 0);
338 ads->ctl18 = 0;
339 ads->ctl19 = AR_Not_Sounding;
340
341 ads->ctl20 = 0;
342 ads->ctl21 = 0;
343 ads->ctl22 = 0;
344}
345
346static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
347 void *lastds,
348 u32 durUpdateEn, u32 rtsctsRate,
349 u32 rtsctsDuration,
350 struct ath9k_11n_rate_series series[],
351 u32 nseries, u32 flags)
352{
353 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
354 struct ar9003_txc *last_ads = (struct ar9003_txc *) lastds;
355 u_int32_t ctl11;
356
357 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
358 ctl11 = ads->ctl11;
359
360 if (flags & ATH9K_TXDESC_RTSENA) {
361 ctl11 &= ~AR_CTSEnable;
362 ctl11 |= AR_RTSEnable;
363 } else {
364 ctl11 &= ~AR_RTSEnable;
365 ctl11 |= AR_CTSEnable;
366 }
367
368 ads->ctl11 = ctl11;
369 } else {
370 ads->ctl11 = (ads->ctl11 & ~(AR_RTSEnable | AR_CTSEnable));
371 }
372
373 ads->ctl13 = set11nTries(series, 0)
374 | set11nTries(series, 1)
375 | set11nTries(series, 2)
376 | set11nTries(series, 3)
377 | (durUpdateEn ? AR_DurUpdateEna : 0)
378 | SM(0, AR_BurstDur);
379
380 ads->ctl14 = set11nRate(series, 0)
381 | set11nRate(series, 1)
382 | set11nRate(series, 2)
383 | set11nRate(series, 3);
384
385 ads->ctl15 = set11nPktDurRTSCTS(series, 0)
386 | set11nPktDurRTSCTS(series, 1);
387
388 ads->ctl16 = set11nPktDurRTSCTS(series, 2)
389 | set11nPktDurRTSCTS(series, 3);
390
391 ads->ctl18 = set11nRateFlags(series, 0)
392 | set11nRateFlags(series, 1)
393 | set11nRateFlags(series, 2)
394 | set11nRateFlags(series, 3)
395 | SM(rtsctsRate, AR_RTSCTSRate);
396 ads->ctl19 = AR_Not_Sounding;
397
398 last_ads->ctl13 = ads->ctl13;
399 last_ads->ctl14 = ads->ctl14;
400}
401
402static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
403 u32 aggrLen)
404{
405 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
406
407 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
408
409 ads->ctl17 &= ~AR_AggrLen;
410 ads->ctl17 |= SM(aggrLen, AR_AggrLen);
411}
412
413static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
414 u32 numDelims)
415{
416 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
417 unsigned int ctl17;
418
419 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
420
421 /*
422 * We use a stack variable to manipulate ctl6 to reduce uncached
423 * read modify, modfiy, write.
424 */
425 ctl17 = ads->ctl17;
426 ctl17 &= ~AR_PadDelim;
427 ctl17 |= SM(numDelims, AR_PadDelim);
428 ads->ctl17 = ctl17;
429}
430
431static void ar9003_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
432{
433 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
434
435 ads->ctl12 |= AR_IsAggr;
436 ads->ctl12 &= ~AR_MoreAggr;
437 ads->ctl17 &= ~AR_PadDelim;
438}
439
440static void ar9003_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
441{
442 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
443
444 ads->ctl12 &= (~AR_IsAggr & ~AR_MoreAggr);
445}
446
447static void ar9003_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
448 u32 burstDuration)
449{
450 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
451
452 ads->ctl13 &= ~AR_BurstDur;
453 ads->ctl13 |= SM(burstDuration, AR_BurstDur);
454
455}
456
457static void ar9003_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
458 u32 vmf)
459{
460 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
461
462 if (vmf)
463 ads->ctl11 |= AR_VirtMoreFrag;
464 else
465 ads->ctl11 &= ~AR_VirtMoreFrag;
466}
467
468void ar9003_hw_attach_mac_ops(struct ath_hw *hw)
469{
470 struct ath_hw_ops *ops = ath9k_hw_ops(hw);
471
472 ops->rx_enable = ar9003_hw_rx_enable;
473 ops->set_desc_link = ar9003_hw_set_desc_link;
474 ops->get_desc_link = ar9003_hw_get_desc_link;
475 ops->get_isr = ar9003_hw_get_isr;
476 ops->fill_txdesc = ar9003_hw_fill_txdesc;
477 ops->proc_txdesc = ar9003_hw_proc_txdesc;
478 ops->set11n_txdesc = ar9003_hw_set11n_txdesc;
479 ops->set11n_ratescenario = ar9003_hw_set11n_ratescenario;
480 ops->set11n_aggr_first = ar9003_hw_set11n_aggr_first;
481 ops->set11n_aggr_middle = ar9003_hw_set11n_aggr_middle;
482 ops->set11n_aggr_last = ar9003_hw_set11n_aggr_last;
483 ops->clr11n_aggr = ar9003_hw_clr11n_aggr;
484 ops->set11n_burstduration = ar9003_hw_set11n_burstduration;
485 ops->set11n_virtualmorefrag = ar9003_hw_set11n_virtualmorefrag;
486}
487
488void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size)
489{
490 REG_WRITE(ah, AR_DATABUF_SIZE, buf_size & AR_DATABUF_SIZE_MASK);
491}
492EXPORT_SYMBOL(ath9k_hw_set_rx_bufsize);
493
494void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
495 enum ath9k_rx_qtype qtype)
496{
497 if (qtype == ATH9K_RX_QUEUE_HP)
498 REG_WRITE(ah, AR_HP_RXDP, rxdp);
499 else
500 REG_WRITE(ah, AR_LP_RXDP, rxdp);
501}
502EXPORT_SYMBOL(ath9k_hw_addrxbuf_edma);
503
504int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
505 void *buf_addr)
506{
507 struct ar9003_rxs *rxsp = (struct ar9003_rxs *) buf_addr;
508 unsigned int phyerr;
509
510 /* TODO: byte swap on big endian for ar9300_10 */
511
512 if ((rxsp->status11 & AR_RxDone) == 0)
513 return -EINPROGRESS;
514
515 if (MS(rxsp->ds_info, AR_DescId) != 0x168c)
516 return -EINVAL;
517
518 if ((rxsp->ds_info & (AR_TxRxDesc | AR_CtrlStat)) != 0)
519 return -EINPROGRESS;
520
521 if (!rxs)
522 return 0;
523
524 rxs->rs_status = 0;
525 rxs->rs_flags = 0;
526
527 rxs->rs_datalen = rxsp->status2 & AR_DataLen;
528 rxs->rs_tstamp = rxsp->status3;
529
530 /* XXX: Keycache */
531 rxs->rs_rssi = MS(rxsp->status5, AR_RxRSSICombined);
532 rxs->rs_rssi_ctl0 = MS(rxsp->status1, AR_RxRSSIAnt00);
533 rxs->rs_rssi_ctl1 = MS(rxsp->status1, AR_RxRSSIAnt01);
534 rxs->rs_rssi_ctl2 = MS(rxsp->status1, AR_RxRSSIAnt02);
535 rxs->rs_rssi_ext0 = MS(rxsp->status5, AR_RxRSSIAnt10);
536 rxs->rs_rssi_ext1 = MS(rxsp->status5, AR_RxRSSIAnt11);
537 rxs->rs_rssi_ext2 = MS(rxsp->status5, AR_RxRSSIAnt12);
538
539 if (rxsp->status11 & AR_RxKeyIdxValid)
540 rxs->rs_keyix = MS(rxsp->status11, AR_KeyIdx);
541 else
542 rxs->rs_keyix = ATH9K_RXKEYIX_INVALID;
543
544 rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
545 rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
546
547 rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
548 rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
549 rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
550 rxs->rs_flags = (rxsp->status4 & AR_GI) ? ATH9K_RX_GI : 0;
551 rxs->rs_flags |= (rxsp->status4 & AR_2040) ? ATH9K_RX_2040 : 0;
552
553 rxs->evm0 = rxsp->status6;
554 rxs->evm1 = rxsp->status7;
555 rxs->evm2 = rxsp->status8;
556 rxs->evm3 = rxsp->status9;
557 rxs->evm4 = (rxsp->status10 & 0xffff);
558
559 if (rxsp->status11 & AR_PreDelimCRCErr)
560 rxs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
561
562 if (rxsp->status11 & AR_PostDelimCRCErr)
563 rxs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
564
565 if (rxsp->status11 & AR_DecryptBusyErr)
566 rxs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
567
568 if ((rxsp->status11 & AR_RxFrameOK) == 0) {
569 if (rxsp->status11 & AR_CRCErr) {
570 rxs->rs_status |= ATH9K_RXERR_CRC;
571 } else if (rxsp->status11 & AR_PHYErr) {
572 rxs->rs_status |= ATH9K_RXERR_PHY;
573 phyerr = MS(rxsp->status11, AR_PHYErrCode);
574 rxs->rs_phyerr = phyerr;
575 } else if (rxsp->status11 & AR_DecryptCRCErr) {
576 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
577 } else if (rxsp->status11 & AR_MichaelErr) {
578 rxs->rs_status |= ATH9K_RXERR_MIC;
579 }
580 }
581
582 return 0;
583}
584EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
585
586void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
587{
588 ah->ts_tail = 0;
589
590 memset((void *) ah->ts_ring, 0,
591 ah->ts_size * sizeof(struct ar9003_txs));
592
593 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
594 "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
595 ah->ts_paddr_start, ah->ts_paddr_end,
596 ah->ts_ring, ah->ts_size);
597
598 REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
599 REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
600}
601
602void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
603 u32 ts_paddr_start,
604 u8 size)
605{
606
607 ah->ts_paddr_start = ts_paddr_start;
608 ah->ts_paddr_end = ts_paddr_start + (size * sizeof(struct ar9003_txs));
609 ah->ts_size = size;
610 ah->ts_ring = (struct ar9003_txs *) ts_start;
611
612 ath9k_hw_reset_txstatus_ring(ah);
613}
614EXPORT_SYMBOL(ath9k_hw_setup_statusring);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
new file mode 100644
index 000000000000..f17558b14539
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -0,0 +1,120 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_MAC_H
18#define AR9003_MAC_H
19
20#define AR_DescId 0xffff0000
21#define AR_DescId_S 16
22#define AR_CtrlStat 0x00004000
23#define AR_CtrlStat_S 14
24#define AR_TxRxDesc 0x00008000
25#define AR_TxRxDesc_S 15
26#define AR_TxQcuNum 0x00000f00
27#define AR_TxQcuNum_S 8
28
29#define AR_BufLen 0x0fff0000
30#define AR_BufLen_S 16
31
32#define AR_TxDescId 0xffff0000
33#define AR_TxDescId_S 16
34#define AR_TxPtrChkSum 0x0000ffff
35
36#define AR_TxTid 0xf0000000
37#define AR_TxTid_S 28
38
39#define AR_LowRxChain 0x00004000
40
41#define AR_Not_Sounding 0x20000000
42
43#define MAP_ISR_S2_CST 6
44#define MAP_ISR_S2_GTT 6
45#define MAP_ISR_S2_TIM 3
46#define MAP_ISR_S2_CABEND 0
47#define MAP_ISR_S2_DTIMSYNC 7
48#define MAP_ISR_S2_DTIM 7
49#define MAP_ISR_S2_TSFOOR 4
50
51#define AR9003TXC_CONST(_ds) ((const struct ar9003_txc *) _ds)
52
53struct ar9003_rxs {
54 u32 ds_info;
55 u32 status1;
56 u32 status2;
57 u32 status3;
58 u32 status4;
59 u32 status5;
60 u32 status6;
61 u32 status7;
62 u32 status8;
63 u32 status9;
64 u32 status10;
65 u32 status11;
66} __packed;
67
68/* Transmit Control Descriptor */
69struct ar9003_txc {
70 u32 info; /* descriptor information */
71 u32 link; /* link pointer */
72 u32 data0; /* data pointer to 1st buffer */
73 u32 ctl3; /* DMA control 3 */
74 u32 data1; /* data pointer to 2nd buffer */
75 u32 ctl5; /* DMA control 5 */
76 u32 data2; /* data pointer to 3rd buffer */
77 u32 ctl7; /* DMA control 7 */
78 u32 data3; /* data pointer to 4th buffer */
79 u32 ctl9; /* DMA control 9 */
80 u32 ctl10; /* DMA control 10 */
81 u32 ctl11; /* DMA control 11 */
82 u32 ctl12; /* DMA control 12 */
83 u32 ctl13; /* DMA control 13 */
84 u32 ctl14; /* DMA control 14 */
85 u32 ctl15; /* DMA control 15 */
86 u32 ctl16; /* DMA control 16 */
87 u32 ctl17; /* DMA control 17 */
88 u32 ctl18; /* DMA control 18 */
89 u32 ctl19; /* DMA control 19 */
90 u32 ctl20; /* DMA control 20 */
91 u32 ctl21; /* DMA control 21 */
92 u32 ctl22; /* DMA control 22 */
93 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
94} __packed;
95
96struct ar9003_txs {
97 u32 ds_info;
98 u32 status1;
99 u32 status2;
100 u32 status3;
101 u32 status4;
102 u32 status5;
103 u32 status6;
104 u32 status7;
105 u32 status8;
106} __packed;
107
108void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
109void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
110void ath9k_hw_addrxbuf_edma(struct ath_hw *ah, u32 rxdp,
111 enum ath9k_rx_qtype qtype);
112
113int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
114 struct ath_rx_status *rxs,
115 void *buf_addr);
116void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
117void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
118 u32 ts_paddr_start,
119 u8 size);
120#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
new file mode 100644
index 000000000000..80431a2f6dc1
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -0,0 +1,1134 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "hw.h"
18#include "ar9003_phy.h"
19
20/**
21 * ar9003_hw_set_channel - set channel on single-chip device
22 * @ah: atheros hardware structure
23 * @chan:
24 *
25 * This is the function to change channel on single-chip devices, that is
26 * all devices after ar9280.
27 *
28 * This function takes the channel value in MHz and sets
29 * hardware channel value. Assumes writes have been enabled to analog bus.
30 *
31 * Actual Expression,
32 *
33 * For 2GHz channel,
34 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
35 * (freq_ref = 40MHz)
36 *
37 * For 5GHz channel,
38 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
39 * (freq_ref = 40MHz/(24>>amodeRefSel))
40 *
41 * For 5GHz channels which are 5MHz spaced,
42 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
43 * (freq_ref = 40MHz)
44 */
45static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
46{
47 u16 bMode, fracMode = 0, aModeRefSel = 0;
48 u32 freq, channelSel = 0, reg32 = 0;
49 struct chan_centers centers;
50 int loadSynthChannel;
51
52 ath9k_hw_get_channel_centers(ah, chan, &centers);
53 freq = centers.synth_center;
54
55 if (freq < 4800) { /* 2 GHz, fractional mode */
56 channelSel = CHANSEL_2G(freq);
57 /* Set to 2G mode */
58 bMode = 1;
59 } else {
60 channelSel = CHANSEL_5G(freq);
61 /* Doubler is ON, so, divide channelSel by 2. */
62 channelSel >>= 1;
63 /* Set to 5G mode */
64 bMode = 0;
65 }
66
67 /* Enable fractional mode for all channels */
68 fracMode = 1;
69 aModeRefSel = 0;
70 loadSynthChannel = 0;
71
72 reg32 = (bMode << 29);
73 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
74
75 /* Enable Long shift Select for Synthesizer */
76 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_SYNTH4,
77 AR_PHY_SYNTH4_LONG_SHIFT_SELECT, 1);
78
79 /* Program Synth. setting */
80 reg32 = (channelSel << 2) | (fracMode << 30) |
81 (aModeRefSel << 28) | (loadSynthChannel << 31);
82 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
83
84 /* Toggle Load Synth channel bit */
85 loadSynthChannel = 1;
86 reg32 = (channelSel << 2) | (fracMode << 30) |
87 (aModeRefSel << 28) | (loadSynthChannel << 31);
88 REG_WRITE(ah, AR_PHY_65NM_CH0_SYNTH7, reg32);
89
90 ah->curchan = chan;
91 ah->curchan_rad_index = -1;
92
93 return 0;
94}
95
96/**
97 * ar9003_hw_spur_mitigate - convert baseband spur frequency
98 * @ah: atheros hardware structure
99 * @chan:
100 *
101 * For single-chip solutions. Converts to baseband spur frequency given the
102 * input channel frequency and compute register settings below.
103 *
104 * Spur mitigation for MRC CCK
105 */
106static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
107 struct ath9k_channel *chan)
108{
109 u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
110 int cur_bb_spur, negative = 0, cck_spur_freq;
111 int i;
112
113 /*
114 * Need to verify range +/- 10 MHz in control channel, otherwise spur
115 * is out-of-band and can be ignored.
116 */
117
118 for (i = 0; i < 4; i++) {
119 negative = 0;
120 cur_bb_spur = spur_freq[i] - chan->channel;
121
122 if (cur_bb_spur < 0) {
123 negative = 1;
124 cur_bb_spur = -cur_bb_spur;
125 }
126 if (cur_bb_spur < 10) {
127 cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
128
129 if (negative == 1)
130 cck_spur_freq = -cck_spur_freq;
131
132 cck_spur_freq = cck_spur_freq & 0xfffff;
133
134 REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
135 AR_PHY_AGC_CONTROL_YCOK_MAX, 0x7);
136 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
137 AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR, 0x7f);
138 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
139 AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE,
140 0x2);
141 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
142 AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT,
143 0x1);
144 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
145 AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ,
146 cck_spur_freq);
147
148 return;
149 }
150 }
151
152 REG_RMW_FIELD(ah, AR_PHY_AGC_CONTROL,
153 AR_PHY_AGC_CONTROL_YCOK_MAX, 0x5);
154 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
155 AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT, 0x0);
156 REG_RMW_FIELD(ah, AR_PHY_CCK_SPUR_MIT,
157 AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ, 0x0);
158}
159
160/* Clean all spur register fields */
161static void ar9003_hw_spur_ofdm_clear(struct ath_hw *ah)
162{
163 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
164 AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0);
165 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
166 AR_PHY_TIMING11_SPUR_FREQ_SD, 0);
167 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
168 AR_PHY_TIMING11_SPUR_DELTA_PHASE, 0);
169 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
170 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, 0);
171 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
172 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0);
173 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
174 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0);
175 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
176 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0);
177 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
178 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 0);
179 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
180 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 0);
181
182 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
183 AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0);
184 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
185 AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0);
186 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
187 AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0);
188 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
189 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, 0);
190 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
191 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, 0);
192 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
193 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, 0);
194 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
195 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0);
196 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
197 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0);
198 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
199 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0);
200 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
201 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0);
202}
203
204static void ar9003_hw_spur_ofdm(struct ath_hw *ah,
205 int freq_offset,
206 int spur_freq_sd,
207 int spur_delta_phase,
208 int spur_subchannel_sd)
209{
210 int mask_index = 0;
211
212 /* OFDM Spur mitigation */
213 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
214 AR_PHY_TIMING4_ENABLE_SPUR_FILTER, 0x1);
215 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
216 AR_PHY_TIMING11_SPUR_FREQ_SD, spur_freq_sd);
217 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
218 AR_PHY_TIMING11_SPUR_DELTA_PHASE, spur_delta_phase);
219 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
220 AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD, spur_subchannel_sd);
221 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
222 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC, 0x1);
223 REG_RMW_FIELD(ah, AR_PHY_TIMING11,
224 AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR, 0x1);
225 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
226 AR_PHY_TIMING4_ENABLE_SPUR_RSSI, 0x1);
227 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
228 AR_PHY_SPUR_REG_SPUR_RSSI_THRESH, 34);
229 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
230 AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI, 1);
231
232 if (REG_READ_FIELD(ah, AR_PHY_MODE,
233 AR_PHY_MODE_DYNAMIC) == 0x1)
234 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
235 AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT, 1);
236
237 mask_index = (freq_offset << 4) / 5;
238 if (mask_index < 0)
239 mask_index = mask_index - 1;
240
241 mask_index = mask_index & 0x7f;
242
243 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
244 AR_PHY_SPUR_REG_ENABLE_MASK_PPM, 0x1);
245 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
246 AR_PHY_TIMING4_ENABLE_PILOT_MASK, 0x1);
247 REG_RMW_FIELD(ah, AR_PHY_TIMING4,
248 AR_PHY_TIMING4_ENABLE_CHAN_MASK, 0x1);
249 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
250 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A, mask_index);
251 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
252 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A, mask_index);
253 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
254 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A, mask_index);
255 REG_RMW_FIELD(ah, AR_PHY_PILOT_SPUR_MASK,
256 AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A, 0xc);
257 REG_RMW_FIELD(ah, AR_PHY_CHAN_SPUR_MASK,
258 AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A, 0xc);
259 REG_RMW_FIELD(ah, AR_PHY_SPUR_MASK_A,
260 AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A, 0xa0);
261 REG_RMW_FIELD(ah, AR_PHY_SPUR_REG,
262 AR_PHY_SPUR_REG_MASK_RATE_CNTL, 0xff);
263}
264
265static void ar9003_hw_spur_ofdm_work(struct ath_hw *ah,
266 struct ath9k_channel *chan,
267 int freq_offset)
268{
269 int spur_freq_sd = 0;
270 int spur_subchannel_sd = 0;
271 int spur_delta_phase = 0;
272
273 if (IS_CHAN_HT40(chan)) {
274 if (freq_offset < 0) {
275 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
276 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
277 spur_subchannel_sd = 1;
278 else
279 spur_subchannel_sd = 0;
280
281 spur_freq_sd = ((freq_offset + 10) << 9) / 11;
282
283 } else {
284 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
285 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
286 spur_subchannel_sd = 0;
287 else
288 spur_subchannel_sd = 1;
289
290 spur_freq_sd = ((freq_offset - 10) << 9) / 11;
291
292 }
293
294 spur_delta_phase = (freq_offset << 17) / 5;
295
296 } else {
297 spur_subchannel_sd = 0;
298 spur_freq_sd = (freq_offset << 9) /11;
299 spur_delta_phase = (freq_offset << 18) / 5;
300 }
301
302 spur_freq_sd = spur_freq_sd & 0x3ff;
303 spur_delta_phase = spur_delta_phase & 0xfffff;
304
305 ar9003_hw_spur_ofdm(ah,
306 freq_offset,
307 spur_freq_sd,
308 spur_delta_phase,
309 spur_subchannel_sd);
310}
311
312/* Spur mitigation for OFDM */
313static void ar9003_hw_spur_mitigate_ofdm(struct ath_hw *ah,
314 struct ath9k_channel *chan)
315{
316 int synth_freq;
317 int range = 10;
318 int freq_offset = 0;
319 int mode;
320 u8* spurChansPtr;
321 unsigned int i;
322 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
323
324 if (IS_CHAN_5GHZ(chan)) {
325 spurChansPtr = &(eep->modalHeader5G.spurChans[0]);
326 mode = 0;
327 }
328 else {
329 spurChansPtr = &(eep->modalHeader2G.spurChans[0]);
330 mode = 1;
331 }
332
333 if (spurChansPtr[0] == 0)
334 return; /* No spur in the mode */
335
336 if (IS_CHAN_HT40(chan)) {
337 range = 19;
338 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
339 AR_PHY_GC_DYN2040_PRI_CH) == 0x0)
340 synth_freq = chan->channel - 10;
341 else
342 synth_freq = chan->channel + 10;
343 } else {
344 range = 10;
345 synth_freq = chan->channel;
346 }
347
348 ar9003_hw_spur_ofdm_clear(ah);
349
350 for (i = 0; spurChansPtr[i] && i < 5; i++) {
351 freq_offset = FBIN2FREQ(spurChansPtr[i], mode) - synth_freq;
352 if (abs(freq_offset) < range) {
353 ar9003_hw_spur_ofdm_work(ah, chan, freq_offset);
354 break;
355 }
356 }
357}
358
359static void ar9003_hw_spur_mitigate(struct ath_hw *ah,
360 struct ath9k_channel *chan)
361{
362 ar9003_hw_spur_mitigate_mrc_cck(ah, chan);
363 ar9003_hw_spur_mitigate_ofdm(ah, chan);
364}
365
366static u32 ar9003_hw_compute_pll_control(struct ath_hw *ah,
367 struct ath9k_channel *chan)
368{
369 u32 pll;
370
371 pll = SM(0x5, AR_RTC_9300_PLL_REFDIV);
372
373 if (chan && IS_CHAN_HALF_RATE(chan))
374 pll |= SM(0x1, AR_RTC_9300_PLL_CLKSEL);
375 else if (chan && IS_CHAN_QUARTER_RATE(chan))
376 pll |= SM(0x2, AR_RTC_9300_PLL_CLKSEL);
377
378 pll |= SM(0x2c, AR_RTC_9300_PLL_DIV);
379
380 return pll;
381}
382
383static void ar9003_hw_set_channel_regs(struct ath_hw *ah,
384 struct ath9k_channel *chan)
385{
386 u32 phymode;
387 u32 enableDacFifo = 0;
388
389 enableDacFifo =
390 (REG_READ(ah, AR_PHY_GEN_CTRL) & AR_PHY_GC_ENABLE_DAC_FIFO);
391
392 /* Enable 11n HT, 20 MHz */
393 phymode = AR_PHY_GC_HT_EN | AR_PHY_GC_SINGLE_HT_LTF1 | AR_PHY_GC_WALSH |
394 AR_PHY_GC_SHORT_GI_40 | enableDacFifo;
395
396 /* Configure baseband for dynamic 20/40 operation */
397 if (IS_CHAN_HT40(chan)) {
398 phymode |= AR_PHY_GC_DYN2040_EN;
399 /* Configure control (primary) channel at +-10MHz */
400 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
401 (chan->chanmode == CHANNEL_G_HT40PLUS))
402 phymode |= AR_PHY_GC_DYN2040_PRI_CH;
403
404 }
405
406 /* make sure we preserve INI settings */
407 phymode |= REG_READ(ah, AR_PHY_GEN_CTRL);
408 /* turn off Green Field detection for STA for now */
409 phymode &= ~AR_PHY_GC_GF_DETECT_EN;
410
411 REG_WRITE(ah, AR_PHY_GEN_CTRL, phymode);
412
413 /* Configure MAC for 20/40 operation */
414 ath9k_hw_set11nmac2040(ah);
415
416 /* global transmit timeout (25 TUs default)*/
417 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
418 /* carrier sense timeout */
419 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
420}
421
422static void ar9003_hw_init_bb(struct ath_hw *ah,
423 struct ath9k_channel *chan)
424{
425 u32 synthDelay;
426
427 /*
428 * Wait for the frequency synth to settle (synth goes on
429 * via AR_PHY_ACTIVE_EN). Read the phy active delay register.
430 * Value is in 100ns increments.
431 */
432 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
433 if (IS_CHAN_B(chan))
434 synthDelay = (4 * synthDelay) / 22;
435 else
436 synthDelay /= 10;
437
438 /* Activate the PHY (includes baseband activate + synthesizer on) */
439 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
440
441 /*
442 * There is an issue if the AP starts the calibration before
443 * the base band timeout completes. This could result in the
444 * rx_clear false triggering. As a workaround we add delay an
445 * extra BASE_ACTIVATE_DELAY usecs to ensure this condition
446 * does not happen.
447 */
448 udelay(synthDelay + BASE_ACTIVATE_DELAY);
449}
450
451void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
452{
453 switch (rx) {
454 case 0x5:
455 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
456 AR_PHY_SWAP_ALT_CHAIN);
457 case 0x3:
458 case 0x1:
459 case 0x2:
460 case 0x7:
461 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx);
462 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx);
463 break;
464 default:
465 break;
466 }
467
468 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
469 if (tx == 0x5) {
470 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
471 AR_PHY_SWAP_ALT_CHAIN);
472 }
473}
474
475/*
476 * Override INI values with chip specific configuration.
477 */
478static void ar9003_hw_override_ini(struct ath_hw *ah)
479{
480 u32 val;
481
482 /*
483 * Set the RX_ABORT and RX_DIS and clear it only after
484 * RXE is set for MAC. This prevents frames with
485 * corrupted descriptor status.
486 */
487 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
488
489 /*
490 * For AR9280 and above, there is a new feature that allows
491 * Multicast search based on both MAC Address and Key ID. By default,
492 * this feature is enabled. But since the driver is not using this
493 * feature, we switch it off; otherwise multicast search based on
494 * MAC addr only will fail.
495 */
496 val = REG_READ(ah, AR_PCU_MISC_MODE2) & (~AR_ADHOC_MCAST_KEYID_ENABLE);
497 REG_WRITE(ah, AR_PCU_MISC_MODE2,
498 val | AR_AGG_WEP_ENABLE_FIX | AR_AGG_WEP_ENABLE);
499}
500
501static void ar9003_hw_prog_ini(struct ath_hw *ah,
502 struct ar5416IniArray *iniArr,
503 int column)
504{
505 unsigned int i, regWrites = 0;
506
507 /* New INI format: Array may be undefined (pre, core, post arrays) */
508 if (!iniArr->ia_array)
509 return;
510
511 /*
512 * New INI format: Pre, core, and post arrays for a given subsystem
513 * may be modal (> 2 columns) or non-modal (2 columns). Determine if
514 * the array is non-modal and force the column to 1.
515 */
516 if (column >= iniArr->ia_columns)
517 column = 1;
518
519 for (i = 0; i < iniArr->ia_rows; i++) {
520 u32 reg = INI_RA(iniArr, i, 0);
521 u32 val = INI_RA(iniArr, i, column);
522
523 REG_WRITE(ah, reg, val);
524
525 /*
526 * Determine if this is a shift register value, and insert the
527 * configured delay if so.
528 */
529 if (reg >= 0x16000 && reg < 0x17000
530 && ah->config.analog_shiftreg)
531 udelay(100);
532
533 DO_DELAY(regWrites);
534 }
535}
536
537static int ar9003_hw_process_ini(struct ath_hw *ah,
538 struct ath9k_channel *chan)
539{
540 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
541 unsigned int regWrites = 0, i;
542 struct ieee80211_channel *channel = chan->chan;
543 u32 modesIndex, freqIndex;
544
545 switch (chan->chanmode) {
546 case CHANNEL_A:
547 case CHANNEL_A_HT20:
548 modesIndex = 1;
549 freqIndex = 1;
550 break;
551 case CHANNEL_A_HT40PLUS:
552 case CHANNEL_A_HT40MINUS:
553 modesIndex = 2;
554 freqIndex = 1;
555 break;
556 case CHANNEL_G:
557 case CHANNEL_G_HT20:
558 case CHANNEL_B:
559 modesIndex = 4;
560 freqIndex = 2;
561 break;
562 case CHANNEL_G_HT40PLUS:
563 case CHANNEL_G_HT40MINUS:
564 modesIndex = 3;
565 freqIndex = 2;
566 break;
567
568 default:
569 return -EINVAL;
570 }
571
572 for (i = 0; i < ATH_INI_NUM_SPLIT; i++) {
573 ar9003_hw_prog_ini(ah, &ah->iniSOC[i], modesIndex);
574 ar9003_hw_prog_ini(ah, &ah->iniMac[i], modesIndex);
575 ar9003_hw_prog_ini(ah, &ah->iniBB[i], modesIndex);
576 ar9003_hw_prog_ini(ah, &ah->iniRadio[i], modesIndex);
577 }
578
579 REG_WRITE_ARRAY(&ah->iniModesRxGain, 1, regWrites);
580 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
581
582 /*
583 * For 5GHz channels requiring Fast Clock, apply
584 * different modal values.
585 */
586 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
587 REG_WRITE_ARRAY(&ah->iniModesAdditional,
588 modesIndex, regWrites);
589
590 ar9003_hw_override_ini(ah);
591 ar9003_hw_set_channel_regs(ah, chan);
592 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
593
594 /* Set TX power */
595 ah->eep_ops->set_txpower(ah, chan,
596 ath9k_regd_get_ctl(regulatory, chan),
597 channel->max_antenna_gain * 2,
598 channel->max_power * 2,
599 min((u32) MAX_RATE_POWER,
600 (u32) regulatory->power_limit));
601
602 return 0;
603}
604
605static void ar9003_hw_set_rfmode(struct ath_hw *ah,
606 struct ath9k_channel *chan)
607{
608 u32 rfMode = 0;
609
610 if (chan == NULL)
611 return;
612
613 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
614 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
615
616 if (IS_CHAN_A_FAST_CLOCK(ah, chan))
617 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
618
619 REG_WRITE(ah, AR_PHY_MODE, rfMode);
620}
621
622static void ar9003_hw_mark_phy_inactive(struct ath_hw *ah)
623{
624 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
625}
626
627static void ar9003_hw_set_delta_slope(struct ath_hw *ah,
628 struct ath9k_channel *chan)
629{
630 u32 coef_scaled, ds_coef_exp, ds_coef_man;
631 u32 clockMhzScaled = 0x64000000;
632 struct chan_centers centers;
633
634 /*
635 * half and quarter rate can divide the scaled clock by 2 or 4
636 * scale for selected channel bandwidth
637 */
638 if (IS_CHAN_HALF_RATE(chan))
639 clockMhzScaled = clockMhzScaled >> 1;
640 else if (IS_CHAN_QUARTER_RATE(chan))
641 clockMhzScaled = clockMhzScaled >> 2;
642
643 /*
644 * ALGO -> coef = 1e8/fcarrier*fclock/40;
645 * scaled coef to provide precision for this floating calculation
646 */
647 ath9k_hw_get_channel_centers(ah, chan, &centers);
648 coef_scaled = clockMhzScaled / centers.synth_center;
649
650 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
651 &ds_coef_exp);
652
653 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
654 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
655 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
656 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
657
658 /*
659 * For Short GI,
660 * scaled coeff is 9/10 that of normal coeff
661 */
662 coef_scaled = (9 * coef_scaled) / 10;
663
664 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
665 &ds_coef_exp);
666
667 /* for short gi */
668 REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
669 AR_PHY_SGI_DSC_MAN, ds_coef_man);
670 REG_RMW_FIELD(ah, AR_PHY_SGI_DELTA,
671 AR_PHY_SGI_DSC_EXP, ds_coef_exp);
672}
673
674static bool ar9003_hw_rfbus_req(struct ath_hw *ah)
675{
676 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
677 return ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
678 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT);
679}
680
681/*
682 * Wait for the frequency synth to settle (synth goes on via PHY_ACTIVE_EN).
683 * Read the phy active delay register. Value is in 100ns increments.
684 */
685static void ar9003_hw_rfbus_done(struct ath_hw *ah)
686{
687 u32 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY;
688 if (IS_CHAN_B(ah->curchan))
689 synthDelay = (4 * synthDelay) / 22;
690 else
691 synthDelay /= 10;
692
693 udelay(synthDelay + BASE_ACTIVATE_DELAY);
694
695 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
696}
697
698/*
699 * Set the interrupt and GPIO values so the ISR can disable RF
700 * on a switch signal. Assumes GPIO port and interrupt polarity
701 * are set prior to call.
702 */
703static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
704{
705 /* Connect rfsilent_bb_l to baseband */
706 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
707 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
708 /* Set input mux for rfsilent_bb_l to GPIO #0 */
709 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
710 AR_GPIO_INPUT_MUX2_RFSILENT);
711
712 /*
713 * Configure the desired GPIO port for input and
714 * enable baseband rf silence.
715 */
716 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
717 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
718}
719
720static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
721{
722 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
723 if (value)
724 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
725 else
726 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
727 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
728}
729
730static bool ar9003_hw_ani_control(struct ath_hw *ah,
731 enum ath9k_ani_cmd cmd, int param)
732{
733 struct ar5416AniState *aniState = ah->curani;
734 struct ath_common *common = ath9k_hw_common(ah);
735
736 switch (cmd & ah->ani_function) {
737 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
738 u32 level = param;
739
740 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
741 ath_print(common, ATH_DBG_ANI,
742 "level out of range (%u > %u)\n",
743 level,
744 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
745 return false;
746 }
747
748 REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ,
749 AR_PHY_DESIRED_SZ_TOT_DES,
750 ah->totalSizeDesired[level]);
751 REG_RMW_FIELD(ah, AR_PHY_AGC,
752 AR_PHY_AGC_COARSE_LOW,
753 ah->coarse_low[level]);
754 REG_RMW_FIELD(ah, AR_PHY_AGC,
755 AR_PHY_AGC_COARSE_HIGH,
756 ah->coarse_high[level]);
757 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
758 AR_PHY_FIND_SIG_FIRPWR, ah->firpwr[level]);
759
760 if (level > aniState->noiseImmunityLevel)
761 ah->stats.ast_ani_niup++;
762 else if (level < aniState->noiseImmunityLevel)
763 ah->stats.ast_ani_nidown++;
764 aniState->noiseImmunityLevel = level;
765 break;
766 }
767 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
768 const int m1ThreshLow[] = { 127, 50 };
769 const int m2ThreshLow[] = { 127, 40 };
770 const int m1Thresh[] = { 127, 0x4d };
771 const int m2Thresh[] = { 127, 0x40 };
772 const int m2CountThr[] = { 31, 16 };
773 const int m2CountThrLow[] = { 63, 48 };
774 u32 on = param ? 1 : 0;
775
776 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
777 AR_PHY_SFCORR_LOW_M1_THRESH_LOW,
778 m1ThreshLow[on]);
779 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
780 AR_PHY_SFCORR_LOW_M2_THRESH_LOW,
781 m2ThreshLow[on]);
782 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
783 AR_PHY_SFCORR_M1_THRESH, m1Thresh[on]);
784 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
785 AR_PHY_SFCORR_M2_THRESH, m2Thresh[on]);
786 REG_RMW_FIELD(ah, AR_PHY_SFCORR,
787 AR_PHY_SFCORR_M2COUNT_THR, m2CountThr[on]);
788 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
789 AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW,
790 m2CountThrLow[on]);
791
792 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
793 AR_PHY_SFCORR_EXT_M1_THRESH_LOW, m1ThreshLow[on]);
794 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
795 AR_PHY_SFCORR_EXT_M2_THRESH_LOW, m2ThreshLow[on]);
796 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
797 AR_PHY_SFCORR_EXT_M1_THRESH, m1Thresh[on]);
798 REG_RMW_FIELD(ah, AR_PHY_SFCORR_EXT,
799 AR_PHY_SFCORR_EXT_M2_THRESH, m2Thresh[on]);
800
801 if (on)
802 REG_SET_BIT(ah, AR_PHY_SFCORR_LOW,
803 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
804 else
805 REG_CLR_BIT(ah, AR_PHY_SFCORR_LOW,
806 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
807
808 if (!on != aniState->ofdmWeakSigDetectOff) {
809 if (on)
810 ah->stats.ast_ani_ofdmon++;
811 else
812 ah->stats.ast_ani_ofdmoff++;
813 aniState->ofdmWeakSigDetectOff = !on;
814 }
815 break;
816 }
817 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
818 const int weakSigThrCck[] = { 8, 6 };
819 u32 high = param ? 1 : 0;
820
821 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
822 AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK,
823 weakSigThrCck[high]);
824 if (high != aniState->cckWeakSigThreshold) {
825 if (high)
826 ah->stats.ast_ani_cckhigh++;
827 else
828 ah->stats.ast_ani_ccklow++;
829 aniState->cckWeakSigThreshold = high;
830 }
831 break;
832 }
833 case ATH9K_ANI_FIRSTEP_LEVEL:{
834 const int firstep[] = { 0, 4, 8 };
835 u32 level = param;
836
837 if (level >= ARRAY_SIZE(firstep)) {
838 ath_print(common, ATH_DBG_ANI,
839 "level out of range (%u > %u)\n",
840 level,
841 (unsigned) ARRAY_SIZE(firstep));
842 return false;
843 }
844 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
845 AR_PHY_FIND_SIG_FIRSTEP,
846 firstep[level]);
847 if (level > aniState->firstepLevel)
848 ah->stats.ast_ani_stepup++;
849 else if (level < aniState->firstepLevel)
850 ah->stats.ast_ani_stepdown++;
851 aniState->firstepLevel = level;
852 break;
853 }
854 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
855 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
856 u32 level = param;
857
858 if (level >= ARRAY_SIZE(cycpwrThr1)) {
859 ath_print(common, ATH_DBG_ANI,
860 "level out of range (%u > %u)\n",
861 level,
862 (unsigned) ARRAY_SIZE(cycpwrThr1));
863 return false;
864 }
865 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
866 AR_PHY_TIMING5_CYCPWR_THR1,
867 cycpwrThr1[level]);
868 if (level > aniState->spurImmunityLevel)
869 ah->stats.ast_ani_spurup++;
870 else if (level < aniState->spurImmunityLevel)
871 ah->stats.ast_ani_spurdown++;
872 aniState->spurImmunityLevel = level;
873 break;
874 }
875 case ATH9K_ANI_PRESENT:
876 break;
877 default:
878 ath_print(common, ATH_DBG_ANI,
879 "invalid cmd %u\n", cmd);
880 return false;
881 }
882
883 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
884 ath_print(common, ATH_DBG_ANI,
885 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
886 "ofdmWeakSigDetectOff=%d\n",
887 aniState->noiseImmunityLevel,
888 aniState->spurImmunityLevel,
889 !aniState->ofdmWeakSigDetectOff);
890 ath_print(common, ATH_DBG_ANI,
891 "cckWeakSigThreshold=%d, "
892 "firstepLevel=%d, listenTime=%d\n",
893 aniState->cckWeakSigThreshold,
894 aniState->firstepLevel,
895 aniState->listenTime);
896 ath_print(common, ATH_DBG_ANI,
897 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
898 aniState->cycleCount,
899 aniState->ofdmPhyErrCount,
900 aniState->cckPhyErrCount);
901
902 return true;
903}
904
905static void ar9003_hw_nf_sanitize_2g(struct ath_hw *ah, s16 *nf)
906{
907 struct ath_common *common = ath9k_hw_common(ah);
908
909 if (*nf > ah->nf_2g_max) {
910 ath_print(common, ATH_DBG_CALIBRATE,
911 "2 GHz NF (%d) > MAX (%d), "
912 "correcting to MAX",
913 *nf, ah->nf_2g_max);
914 *nf = ah->nf_2g_max;
915 } else if (*nf < ah->nf_2g_min) {
916 ath_print(common, ATH_DBG_CALIBRATE,
917 "2 GHz NF (%d) < MIN (%d), "
918 "correcting to MIN",
919 *nf, ah->nf_2g_min);
920 *nf = ah->nf_2g_min;
921 }
922}
923
924static void ar9003_hw_nf_sanitize_5g(struct ath_hw *ah, s16 *nf)
925{
926 struct ath_common *common = ath9k_hw_common(ah);
927
928 if (*nf > ah->nf_5g_max) {
929 ath_print(common, ATH_DBG_CALIBRATE,
930 "5 GHz NF (%d) > MAX (%d), "
931 "correcting to MAX",
932 *nf, ah->nf_5g_max);
933 *nf = ah->nf_5g_max;
934 } else if (*nf < ah->nf_5g_min) {
935 ath_print(common, ATH_DBG_CALIBRATE,
936 "5 GHz NF (%d) < MIN (%d), "
937 "correcting to MIN",
938 *nf, ah->nf_5g_min);
939 *nf = ah->nf_5g_min;
940 }
941}
942
943static void ar9003_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
944{
945 if (IS_CHAN_2GHZ(ah->curchan))
946 ar9003_hw_nf_sanitize_2g(ah, nf);
947 else
948 ar9003_hw_nf_sanitize_5g(ah, nf);
949}
950
951static void ar9003_hw_do_getnf(struct ath_hw *ah,
952 int16_t nfarray[NUM_NF_READINGS])
953{
954 struct ath_common *common = ath9k_hw_common(ah);
955 int16_t nf;
956
957 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
958 if (nf & 0x100)
959 nf = 0 - ((nf ^ 0x1ff) + 1);
960 ar9003_hw_nf_sanitize(ah, &nf);
961 ath_print(common, ATH_DBG_CALIBRATE,
962 "NF calibrated [ctl] [chain 0] is %d\n", nf);
963 nfarray[0] = nf;
964
965 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
966 if (nf & 0x100)
967 nf = 0 - ((nf ^ 0x1ff) + 1);
968 ar9003_hw_nf_sanitize(ah, &nf);
969 ath_print(common, ATH_DBG_CALIBRATE,
970 "NF calibrated [ctl] [chain 1] is %d\n", nf);
971 nfarray[1] = nf;
972
973 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
974 if (nf & 0x100)
975 nf = 0 - ((nf ^ 0x1ff) + 1);
976 ar9003_hw_nf_sanitize(ah, &nf);
977 ath_print(common, ATH_DBG_CALIBRATE,
978 "NF calibrated [ctl] [chain 2] is %d\n", nf);
979 nfarray[2] = nf;
980
981 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
982 if (nf & 0x100)
983 nf = 0 - ((nf ^ 0x1ff) + 1);
984 ar9003_hw_nf_sanitize(ah, &nf);
985 ath_print(common, ATH_DBG_CALIBRATE,
986 "NF calibrated [ext] [chain 0] is %d\n", nf);
987 nfarray[3] = nf;
988
989 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
990 if (nf & 0x100)
991 nf = 0 - ((nf ^ 0x1ff) + 1);
992 ar9003_hw_nf_sanitize(ah, &nf);
993 ath_print(common, ATH_DBG_CALIBRATE,
994 "NF calibrated [ext] [chain 1] is %d\n", nf);
995 nfarray[4] = nf;
996
997 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
998 if (nf & 0x100)
999 nf = 0 - ((nf ^ 0x1ff) + 1);
1000 ar9003_hw_nf_sanitize(ah, &nf);
1001 ath_print(common, ATH_DBG_CALIBRATE,
1002 "NF calibrated [ext] [chain 2] is %d\n", nf);
1003 nfarray[5] = nf;
1004}
1005
1006void ar9003_hw_set_nf_limits(struct ath_hw *ah)
1007{
1008 ah->nf_2g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ;
1009 ah->nf_2g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ;
1010 ah->nf_5g_max = AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ;
1011 ah->nf_5g_min = AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ;
1012}
1013
1014/*
1015 * Find out which of the RX chains are enabled
1016 */
1017static u32 ar9003_hw_get_rx_chainmask(struct ath_hw *ah)
1018{
1019 u32 chain = REG_READ(ah, AR_PHY_RX_CHAINMASK);
1020 /*
1021 * The bits [2:0] indicate the rx chain mask and are to be
1022 * interpreted as follows:
1023 * 00x => Only chain 0 is enabled
1024 * 01x => Chain 1 and 0 enabled
1025 * 1xx => Chain 2,1 and 0 enabled
1026 */
1027 return chain & 0x7;
1028}
1029
1030static void ar9003_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
1031{
1032 struct ath9k_nfcal_hist *h;
1033 unsigned i, j;
1034 int32_t val;
1035 const u32 ar9300_cca_regs[6] = {
1036 AR_PHY_CCA_0,
1037 AR_PHY_CCA_1,
1038 AR_PHY_CCA_2,
1039 AR_PHY_EXT_CCA,
1040 AR_PHY_EXT_CCA_1,
1041 AR_PHY_EXT_CCA_2,
1042 };
1043 u8 chainmask, rx_chain_status;
1044 struct ath_common *common = ath9k_hw_common(ah);
1045
1046 rx_chain_status = ar9003_hw_get_rx_chainmask(ah);
1047
1048 chainmask = 0x3F;
1049 h = ah->nfCalHist;
1050
1051 for (i = 0; i < NUM_NF_READINGS; i++) {
1052 if (chainmask & (1 << i)) {
1053 val = REG_READ(ah, ar9300_cca_regs[i]);
1054 val &= 0xFFFFFE00;
1055 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
1056 REG_WRITE(ah, ar9300_cca_regs[i], val);
1057 }
1058 }
1059
1060 /*
1061 * Load software filtered NF value into baseband internal minCCApwr
1062 * variable.
1063 */
1064 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1065 AR_PHY_AGC_CONTROL_ENABLE_NF);
1066 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1067 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
1068 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
1069
1070 /*
1071 * Wait for load to complete, should be fast, a few 10s of us.
1072 * The max delay was changed from an original 250us to 10000us
1073 * since 250us often results in NF load timeout and causes deaf
1074 * condition during stress testing 12/12/2009
1075 */
1076 for (j = 0; j < 1000; j++) {
1077 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
1078 AR_PHY_AGC_CONTROL_NF) == 0)
1079 break;
1080 udelay(10);
1081 }
1082
1083 /*
1084 * We timed out waiting for the noisefloor to load, probably due to an
1085 * in-progress rx. Simply return here and allow the load plenty of time
1086 * to complete before the next calibration interval. We need to avoid
1087 * trying to load -50 (which happens below) while the previous load is
1088 * still in progress as this can cause rx deafness. Instead by returning
1089 * here, the baseband nf cal will just be capped by our present
1090 * noisefloor until the next calibration timer.
1091 */
1092 if (j == 1000) {
1093 ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf "
1094 "to load: AR_PHY_AGC_CONTROL=0x%x\n",
1095 REG_READ(ah, AR_PHY_AGC_CONTROL));
1096 return;
1097 }
1098
1099 /*
1100 * Restore maxCCAPower register parameter again so that we're not capped
1101 * by the median we just loaded. This will be initial (and max) value
1102 * of next noise floor calibration the baseband does.
1103 */
1104 for (i = 0; i < NUM_NF_READINGS; i++) {
1105 if (chainmask & (1 << i)) {
1106 val = REG_READ(ah, ar9300_cca_regs[i]);
1107 val &= 0xFFFFFE00;
1108 val |= (((u32) (-50) << 1) & 0x1ff);
1109 REG_WRITE(ah, ar9300_cca_regs[i], val);
1110 }
1111 }
1112}
1113
1114void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1115{
1116 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1117
1118 priv_ops->rf_set_freq = ar9003_hw_set_channel;
1119 priv_ops->spur_mitigate_freq = ar9003_hw_spur_mitigate;
1120 priv_ops->compute_pll_control = ar9003_hw_compute_pll_control;
1121 priv_ops->set_channel_regs = ar9003_hw_set_channel_regs;
1122 priv_ops->init_bb = ar9003_hw_init_bb;
1123 priv_ops->process_ini = ar9003_hw_process_ini;
1124 priv_ops->set_rfmode = ar9003_hw_set_rfmode;
1125 priv_ops->mark_phy_inactive = ar9003_hw_mark_phy_inactive;
1126 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
1127 priv_ops->rfbus_req = ar9003_hw_rfbus_req;
1128 priv_ops->rfbus_done = ar9003_hw_rfbus_done;
1129 priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
1130 priv_ops->set_diversity = ar9003_hw_set_diversity;
1131 priv_ops->ani_control = ar9003_hw_ani_control;
1132 priv_ops->do_getnf = ar9003_hw_do_getnf;
1133 priv_ops->loadnf = ar9003_hw_loadnf;
1134}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
new file mode 100644
index 000000000000..f08cc8bda005
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -0,0 +1,847 @@
1/*
2 * Copyright (c) 2002-2010 Atheros Communications, Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef AR9003_PHY_H
18#define AR9003_PHY_H
19
20/*
21 * Channel Register Map
22 */
23#define AR_CHAN_BASE 0x9800
24
25#define AR_PHY_TIMING1 (AR_CHAN_BASE + 0x0)
26#define AR_PHY_TIMING2 (AR_CHAN_BASE + 0x4)
27#define AR_PHY_TIMING3 (AR_CHAN_BASE + 0x8)
28#define AR_PHY_TIMING4 (AR_CHAN_BASE + 0xc)
29#define AR_PHY_TIMING5 (AR_CHAN_BASE + 0x10)
30#define AR_PHY_TIMING6 (AR_CHAN_BASE + 0x14)
31#define AR_PHY_TIMING11 (AR_CHAN_BASE + 0x18)
32#define AR_PHY_SPUR_REG (AR_CHAN_BASE + 0x1c)
33#define AR_PHY_RX_IQCAL_CORR_B0 (AR_CHAN_BASE + 0xdc)
34#define AR_PHY_TX_IQCAL_CONTROL_3 (AR_CHAN_BASE + 0xb0)
35
36#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
37#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
38
39#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
40#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
41
42#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC 0x40000000
43#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_AGC_S 30
44
45#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR 0x80000000
46#define AR_PHY_TIMING11_USE_SPUR_FILTER_IN_SELFCOR_S 31
47
48#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT 0x4000000
49#define AR_PHY_SPUR_REG_ENABLE_NF_RSSI_SPUR_MIT_S 26
50
51#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000 /* bins move with freq offset */
52#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM_S 17
53#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x000000FF
54#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
55#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI 0x00000100
56#define AR_PHY_SPUR_REG_EN_VIT_SPUR_RSSI_S 8
57#define AR_PHY_SPUR_REG_MASK_RATE_CNTL 0x03FC0000
58#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
59
60#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN 0x20000000
61#define AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN_S 29
62
63#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN 0x80000000
64#define AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN_S 31
65
66#define AR_PHY_FIND_SIG_LOW (AR_CHAN_BASE + 0x20)
67
68#define AR_PHY_SFCORR (AR_CHAN_BASE + 0x24)
69#define AR_PHY_SFCORR_LOW (AR_CHAN_BASE + 0x28)
70#define AR_PHY_SFCORR_EXT (AR_CHAN_BASE + 0x2c)
71
72#define AR_PHY_EXT_CCA (AR_CHAN_BASE + 0x30)
73#define AR_PHY_RADAR_0 (AR_CHAN_BASE + 0x34)
74#define AR_PHY_RADAR_1 (AR_CHAN_BASE + 0x38)
75#define AR_PHY_RADAR_EXT (AR_CHAN_BASE + 0x3c)
76#define AR_PHY_MULTICHAIN_CTRL (AR_CHAN_BASE + 0x80)
77#define AR_PHY_PERCHAIN_CSD (AR_CHAN_BASE + 0x84)
78
79#define AR_PHY_TX_PHASE_RAMP_0 (AR_CHAN_BASE + 0xd0)
80#define AR_PHY_ADC_GAIN_DC_CORR_0 (AR_CHAN_BASE + 0xd4)
81#define AR_PHY_IQ_ADC_MEAS_0_B0 (AR_CHAN_BASE + 0xc0)
82#define AR_PHY_IQ_ADC_MEAS_1_B0 (AR_CHAN_BASE + 0xc4)
83#define AR_PHY_IQ_ADC_MEAS_2_B0 (AR_CHAN_BASE + 0xc8)
84#define AR_PHY_IQ_ADC_MEAS_3_B0 (AR_CHAN_BASE + 0xcc)
85
86/* The following registers changed position from AR9300 1.0 to AR9300 2.0 */
87#define AR_PHY_TX_PHASE_RAMP_0_9300_10 (AR_CHAN_BASE + 0xd0 - 0x10)
88#define AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 (AR_CHAN_BASE + 0xd4 - 0x10)
89#define AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 (AR_CHAN_BASE + 0xc0 + 0x8)
90#define AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 (AR_CHAN_BASE + 0xc4 + 0x8)
91#define AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 (AR_CHAN_BASE + 0xc8 + 0x8)
92#define AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 (AR_CHAN_BASE + 0xcc + 0x8)
93
94#define AR_PHY_TX_CRC (AR_CHAN_BASE + 0xa0)
95#define AR_PHY_TST_DAC_CONST (AR_CHAN_BASE + 0xa4)
96#define AR_PHY_SPUR_REPORT_0 (AR_CHAN_BASE + 0xa8)
97#define AR_PHY_CHAN_INFO_TAB_0 (AR_CHAN_BASE + 0x300)
98
99/*
100 * Channel Field Definitions
101 */
102#define AR_PHY_TIMING2_USE_FORCE_PPM 0x00001000
103#define AR_PHY_TIMING2_FORCE_PPM_VAL 0x00000fff
104#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
105#define AR_PHY_TIMING3_DSC_MAN_S 17
106#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
107#define AR_PHY_TIMING3_DSC_EXP_S 13
108#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX 0xF000
109#define AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX_S 12
110#define AR_PHY_TIMING4_DO_CAL 0x10000
111
112#define AR_PHY_TIMING4_ENABLE_PILOT_MASK 0x10000000
113#define AR_PHY_TIMING4_ENABLE_PILOT_MASK_S 28
114#define AR_PHY_TIMING4_ENABLE_CHAN_MASK 0x20000000
115#define AR_PHY_TIMING4_ENABLE_CHAN_MASK_S 29
116
117#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER 0x40000000
118#define AR_PHY_TIMING4_ENABLE_SPUR_FILTER_S 30
119#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI 0x80000000
120#define AR_PHY_TIMING4_ENABLE_SPUR_RSSI_S 31
121
122#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
123#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
124#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
125#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
126#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
127#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
128#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
129#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
130#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
131#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
132#define AR_PHY_SFCORR_M2COUNT_THR_S 0
133#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
134#define AR_PHY_SFCORR_M1_THRESH_S 17
135#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
136#define AR_PHY_SFCORR_M2_THRESH_S 24
137#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
138#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
139#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
140#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
141#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
142#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
143#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
144#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
145#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD 0x10000000
146#define AR_PHY_SFCORR_EXT_SPUR_SUBCHANNEL_SD_S 28
147#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
148#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
149#define AR_PHY_EXT_CCA_THRESH62_S 16
150#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
151#define AR_PHY_EXT_MINCCA_PWR_S 16
152#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
153#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
154#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE 0x00000001
155#define AR_PHY_TIMING5_CYCPWR_THR1_ENABLE_S 0
156#define AR_PHY_TIMING5_CYCPWR_THR1A 0x007F0000
157#define AR_PHY_TIMING5_CYCPWR_THR1A_S 16
158#define AR_PHY_TIMING5_RSSI_THR1A (0x7F << 16)
159#define AR_PHY_TIMING5_RSSI_THR1A_S 16
160#define AR_PHY_TIMING5_RSSI_THR1A_ENA (0x1 << 15)
161#define AR_PHY_RADAR_0_ENA 0x00000001
162#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
163#define AR_PHY_RADAR_0_INBAND 0x0000003e
164#define AR_PHY_RADAR_0_INBAND_S 1
165#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
166#define AR_PHY_RADAR_0_PRSSI_S 6
167#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
168#define AR_PHY_RADAR_0_HEIGHT_S 12
169#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
170#define AR_PHY_RADAR_0_RRSSI_S 18
171#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
172#define AR_PHY_RADAR_0_FIRPWR_S 24
173#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
174#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
175#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
176#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
177#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
178#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
179#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
180#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
181#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
182#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
183#define AR_PHY_RADAR_1_MAXLEN_S 0
184#define AR_PHY_RADAR_EXT_ENA 0x00004000
185#define AR_PHY_RADAR_DC_PWR_THRESH 0x007f8000
186#define AR_PHY_RADAR_DC_PWR_THRESH_S 15
187#define AR_PHY_RADAR_LB_DC_CAP 0x7f800000
188#define AR_PHY_RADAR_LB_DC_CAP_S 23
189#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW (0x3f << 6)
190#define AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW_S 6
191#define AR_PHY_FIND_SIG_LOW_FIRPWR (0x7f << 12)
192#define AR_PHY_FIND_SIG_LOW_FIRPWR_S 12
193#define AR_PHY_FIND_SIG_LOW_FIRPWR_SIGN_BIT 19
194#define AR_PHY_FIND_SIG_LOW_RELSTEP 0x1f
195#define AR_PHY_FIND_SIG_LOW_RELSTEP_S 0
196#define AR_PHY_FIND_SIG_LOW_RELSTEP_SIGN_BIT 5
197#define AR_PHY_CHAN_INFO_TAB_S2_READ 0x00000008
198#define AR_PHY_CHAN_INFO_TAB_S2_READ_S 3
199#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF 0x0000007F
200#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF_S 0
201#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF 0x00003F80
202#define AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF_S 7
203#define AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE 0x00004000
204#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF 0x003f8000
205#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF_S 15
206#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF 0x1fc00000
207#define AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF_S 22
208
209/*
210 * MRC Register Map
211 */
212#define AR_MRC_BASE 0x9c00
213
214#define AR_PHY_TIMING_3A (AR_MRC_BASE + 0x0)
215#define AR_PHY_LDPC_CNTL1 (AR_MRC_BASE + 0x4)
216#define AR_PHY_LDPC_CNTL2 (AR_MRC_BASE + 0x8)
217#define AR_PHY_PILOT_SPUR_MASK (AR_MRC_BASE + 0xc)
218#define AR_PHY_CHAN_SPUR_MASK (AR_MRC_BASE + 0x10)
219#define AR_PHY_SGI_DELTA (AR_MRC_BASE + 0x14)
220#define AR_PHY_ML_CNTL_1 (AR_MRC_BASE + 0x18)
221#define AR_PHY_ML_CNTL_2 (AR_MRC_BASE + 0x1c)
222#define AR_PHY_TST_ADC (AR_MRC_BASE + 0x20)
223
224#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A 0x00000FE0
225#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_IDX_A_S 5
226#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A 0x1F
227#define AR_PHY_PILOT_SPUR_MASK_CF_PILOT_MASK_A_S 0
228
229#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A 0x00000FE0
230#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_IDX_A_S 5
231#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A 0x1F
232#define AR_PHY_CHAN_SPUR_MASK_CF_CHAN_MASK_A_S 0
233
234/*
235 * MRC Feild Definitions
236 */
237#define AR_PHY_SGI_DSC_MAN 0x0007FFF0
238#define AR_PHY_SGI_DSC_MAN_S 4
239#define AR_PHY_SGI_DSC_EXP 0x0000000F
240#define AR_PHY_SGI_DSC_EXP_S 0
241/*
242 * BBB Register Map
243 */
244#define AR_BBB_BASE 0x9d00
245
246/*
247 * AGC Register Map
248 */
249#define AR_AGC_BASE 0x9e00
250
251#define AR_PHY_SETTLING (AR_AGC_BASE + 0x0)
252#define AR_PHY_FORCEMAX_GAINS_0 (AR_AGC_BASE + 0x4)
253#define AR_PHY_GAINS_MINOFF0 (AR_AGC_BASE + 0x8)
254#define AR_PHY_DESIRED_SZ (AR_AGC_BASE + 0xc)
255#define AR_PHY_FIND_SIG (AR_AGC_BASE + 0x10)
256#define AR_PHY_AGC (AR_AGC_BASE + 0x14)
257#define AR_PHY_EXT_ATTEN_CTL_0 (AR_AGC_BASE + 0x18)
258#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
259#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
260#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
261#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
262#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
263#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
264#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
265#define AR_PHY_RIFS_SRCH (AR_AGC_BASE + 0x38)
266#define AR_PHY_PEAK_DET_CTRL_1 (AR_AGC_BASE + 0x3c)
267#define AR_PHY_PEAK_DET_CTRL_2 (AR_AGC_BASE + 0x40)
268#define AR_PHY_RX_GAIN_BOUNDS_1 (AR_AGC_BASE + 0x44)
269#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
270#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
271#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
272#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
273#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
274#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
275
276#define AR_PHY_CCK_SPUR_MIT (AR_AGC_BASE + 0x1cc)
277#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR 0x000001fe
278#define AR_PHY_CCK_SPUR_MIT_SPUR_RSSI_THR_S 1
279#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE 0x60000000
280#define AR_PHY_CCK_SPUR_MIT_SPUR_FILTER_TYPE_S 29
281#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT 0x00000001
282#define AR_PHY_CCK_SPUR_MIT_USE_CCK_SPUR_MIT_S 0
283#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ 0x1ffffe00
284#define AR_PHY_CCK_SPUR_MIT_CCK_SPUR_FREQ_S 9
285
286#define AR_PHY_RX_OCGAIN (AR_AGC_BASE + 0x200)
287
288#define AR_PHY_CCA_NOM_VAL_9300_2GHZ -110
289#define AR_PHY_CCA_NOM_VAL_9300_5GHZ -115
290#define AR_PHY_CCA_MIN_GOOD_VAL_9300_2GHZ -125
291#define AR_PHY_CCA_MIN_GOOD_VAL_9300_5GHZ -125
292#define AR_PHY_CCA_MAX_GOOD_VAL_9300_2GHZ -95
293#define AR_PHY_CCA_MAX_GOOD_VAL_9300_5GHZ -100
294
295/*
296 * AGC Field Definitions
297 */
298#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN 0x00FC0000
299#define AR_PHY_EXT_ATTEN_CTL_RXTX_MARGIN_S 18
300#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN 0x00003C00
301#define AR_PHY_EXT_ATTEN_CTL_BSW_MARGIN_S 10
302#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN 0x0000001F
303#define AR_PHY_EXT_ATTEN_CTL_BSW_ATTEN_S 0
304#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN 0x003E0000
305#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_MARGIN_S 17
306#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN 0x0001F000
307#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN_S 12
308#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB 0x00000FC0
309#define AR_PHY_EXT_ATTEN_CTL_XATTEN2_DB_S 6
310#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB 0x0000003F
311#define AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB_S 0
312#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
313#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
314#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
315#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
316#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
317#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
318#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
319#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
320#define AR_PHY_SETTLING_SWITCH 0x00003F80
321#define AR_PHY_SETTLING_SWITCH_S 7
322#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
323#define AR_PHY_DESIRED_SZ_ADC_S 0
324#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
325#define AR_PHY_DESIRED_SZ_PGA_S 8
326#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
327#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
328#define AR_PHY_MINCCA_PWR 0x1FF00000
329#define AR_PHY_MINCCA_PWR_S 20
330#define AR_PHY_CCA_THRESH62 0x0007F000
331#define AR_PHY_CCA_THRESH62_S 12
332#define AR9280_PHY_MINCCA_PWR 0x1FF00000
333#define AR9280_PHY_MINCCA_PWR_S 20
334#define AR9280_PHY_CCA_THRESH62 0x000FF000
335#define AR9280_PHY_CCA_THRESH62_S 12
336#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
337#define AR_PHY_EXT_CCA0_THRESH62_S 0
338#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
339#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
340#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
341#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
342#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
343
344#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
345#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR_S 9
346#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
347#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
348
349#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
350#define AR_PHY_AGC_COARSE_LOW 0x00007F80
351#define AR_PHY_AGC_COARSE_LOW_S 7
352#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
353#define AR_PHY_AGC_COARSE_HIGH_S 15
354#define AR_PHY_AGC_COARSE_PWR_CONST 0x0000007F
355#define AR_PHY_AGC_COARSE_PWR_CONST_S 0
356#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
357#define AR_PHY_FIND_SIG_FIRSTEP_S 12
358#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
359#define AR_PHY_FIND_SIG_FIRPWR_S 18
360#define AR_PHY_FIND_SIG_FIRPWR_SIGN_BIT 25
361#define AR_PHY_FIND_SIG_RELPWR (0x1f << 6)
362#define AR_PHY_FIND_SIG_RELPWR_S 6
363#define AR_PHY_FIND_SIG_RELPWR_SIGN_BIT 11
364#define AR_PHY_FIND_SIG_RELSTEP 0x1f
365#define AR_PHY_FIND_SIG_RELSTEP_S 0
366#define AR_PHY_FIND_SIG_RELSTEP_SIGN_BIT 5
367#define AR_PHY_RESTART_DIV_GC 0x001C0000
368#define AR_PHY_RESTART_DIV_GC_S 18
369#define AR_PHY_RESTART_ENA 0x01
370#define AR_PHY_DC_RESTART_DIS 0x40000000
371
372#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON 0xFF000000
373#define AR_PHY_TPC_OLPC_GAIN_DELTA_PAL_ON_S 24
374#define AR_PHY_TPC_OLPC_GAIN_DELTA 0x00FF0000
375#define AR_PHY_TPC_OLPC_GAIN_DELTA_S 16
376
377#define AR_PHY_TPC_6_ERROR_EST_MODE 0x03000000
378#define AR_PHY_TPC_6_ERROR_EST_MODE_S 24
379
380/*
381 * SM Register Map
382 */
383#define AR_SM_BASE 0xa200
384
385#define AR_PHY_D2_CHIP_ID (AR_SM_BASE + 0x0)
386#define AR_PHY_GEN_CTRL (AR_SM_BASE + 0x4)
387#define AR_PHY_MODE (AR_SM_BASE + 0x8)
388#define AR_PHY_ACTIVE (AR_SM_BASE + 0xc)
389#define AR_PHY_SPUR_MASK_A (AR_SM_BASE + 0x20)
390#define AR_PHY_SPUR_MASK_B (AR_SM_BASE + 0x24)
391#define AR_PHY_SPECTRAL_SCAN (AR_SM_BASE + 0x28)
392#define AR_PHY_RADAR_BW_FILTER (AR_SM_BASE + 0x2c)
393#define AR_PHY_SEARCH_START_DELAY (AR_SM_BASE + 0x30)
394#define AR_PHY_MAX_RX_LEN (AR_SM_BASE + 0x34)
395#define AR_PHY_FRAME_CTL (AR_SM_BASE + 0x38)
396#define AR_PHY_RFBUS_REQ (AR_SM_BASE + 0x3c)
397#define AR_PHY_RFBUS_GRANT (AR_SM_BASE + 0x40)
398#define AR_PHY_RIFS (AR_SM_BASE + 0x44)
399#define AR_PHY_RX_CLR_DELAY (AR_SM_BASE + 0x50)
400#define AR_PHY_RX_DELAY (AR_SM_BASE + 0x54)
401
402#define AR_PHY_XPA_TIMING_CTL (AR_SM_BASE + 0x64)
403#define AR_PHY_MISC_PA_CTL (AR_SM_BASE + 0x80)
404#define AR_PHY_SWITCH_CHAIN_0 (AR_SM_BASE + 0x84)
405#define AR_PHY_SWITCH_COM (AR_SM_BASE + 0x88)
406#define AR_PHY_SWITCH_COM_2 (AR_SM_BASE + 0x8c)
407#define AR_PHY_RX_CHAINMASK (AR_SM_BASE + 0xa0)
408#define AR_PHY_CAL_CHAINMASK (AR_SM_BASE + 0xc0)
409#define AR_PHY_CALMODE (AR_SM_BASE + 0xc8)
410#define AR_PHY_FCAL_1 (AR_SM_BASE + 0xcc)
411#define AR_PHY_FCAL_2_0 (AR_SM_BASE + 0xd0)
412#define AR_PHY_DFT_TONE_CTL_0 (AR_SM_BASE + 0xd4)
413#define AR_PHY_CL_CAL_CTL (AR_SM_BASE + 0xd8)
414#define AR_PHY_CL_TAB_0 (AR_SM_BASE + 0x100)
415#define AR_PHY_SYNTH_CONTROL (AR_SM_BASE + 0x140)
416#define AR_PHY_ADDAC_CLK_SEL (AR_SM_BASE + 0x144)
417#define AR_PHY_PLL_CTL (AR_SM_BASE + 0x148)
418#define AR_PHY_ANALOG_SWAP (AR_SM_BASE + 0x14c)
419#define AR_PHY_ADDAC_PARA_CTL (AR_SM_BASE + 0x150)
420#define AR_PHY_XPA_CFG (AR_SM_BASE + 0x158)
421
422#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A 0x0001FC00
423#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_IDX_A_S 10
424#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A 0x3FF
425#define AR_PHY_SPUR_MASK_A_CF_PUNC_MASK_A_S 0
426
427#define AR_PHY_TEST (AR_SM_BASE + 0x160)
428
429#define AR_PHY_TEST_BBB_OBS_SEL 0x780000
430#define AR_PHY_TEST_BBB_OBS_SEL_S 19
431
432#define AR_PHY_TEST_RX_OBS_SEL_BIT5_S 23
433#define AR_PHY_TEST_RX_OBS_SEL_BIT5 (1 << AR_PHY_TEST_RX_OBS_SEL_BIT5_S)
434
435#define AR_PHY_TEST_CHAIN_SEL 0xC0000000
436#define AR_PHY_TEST_CHAIN_SEL_S 30
437
438#define AR_PHY_TEST_CTL_STATUS (AR_SM_BASE + 0x164)
439#define AR_PHY_TEST_CTL_TSTDAC_EN 0x1
440#define AR_PHY_TEST_CTL_TSTDAC_EN_S 0
441#define AR_PHY_TEST_CTL_TX_OBS_SEL 0x1C
442#define AR_PHY_TEST_CTL_TX_OBS_SEL_S 2
443#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL 0x60
444#define AR_PHY_TEST_CTL_TX_OBS_MUX_SEL_S 5
445#define AR_PHY_TEST_CTL_TSTADC_EN 0x100
446#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
447#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
448#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
449
450
451#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
452
453#define AR_PHY_CHAN_STATUS (AR_SM_BASE + 0x16c)
454#define AR_PHY_CHAN_INFO_MEMORY (AR_SM_BASE + 0x170)
455#define AR_PHY_CHNINFO_NOISEPWR (AR_SM_BASE + 0x174)
456#define AR_PHY_CHNINFO_GAINDIFF (AR_SM_BASE + 0x178)
457#define AR_PHY_CHNINFO_FINETIM (AR_SM_BASE + 0x17c)
458#define AR_PHY_CHAN_INFO_GAIN_0 (AR_SM_BASE + 0x180)
459#define AR_PHY_SCRAMBLER_SEED (AR_SM_BASE + 0x190)
460#define AR_PHY_CCK_TX_CTRL (AR_SM_BASE + 0x194)
461
462#define AR_PHY_HEAVYCLIP_CTL (AR_SM_BASE + 0x1a4)
463#define AR_PHY_HEAVYCLIP_20 (AR_SM_BASE + 0x1a8)
464#define AR_PHY_HEAVYCLIP_40 (AR_SM_BASE + 0x1ac)
465#define AR_PHY_ILLEGAL_TXRATE (AR_SM_BASE + 0x1b0)
466
467#define AR_PHY_PWRTX_MAX (AR_SM_BASE + 0x1f0)
468#define AR_PHY_POWER_TX_SUB (AR_SM_BASE + 0x1f4)
469
470#define AR_PHY_TPC_4_B0 (AR_SM_BASE + 0x204)
471#define AR_PHY_TPC_5_B0 (AR_SM_BASE + 0x208)
472#define AR_PHY_TPC_6_B0 (AR_SM_BASE + 0x20c)
473#define AR_PHY_TPC_11_B0 (AR_SM_BASE + 0x220)
474#define AR_PHY_TPC_18 (AR_SM_BASE + 0x23c)
475#define AR_PHY_TPC_19 (AR_SM_BASE + 0x240)
476
477#define AR_PHY_TX_FORCED_GAIN (AR_SM_BASE + 0x258)
478
479#define AR_PHY_PDADC_TAB_0 (AR_SM_BASE + 0x280)
480
481#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
482#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
483#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
484#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450)
485
486#define AR_PHY_PANIC_WD_STATUS (AR_SM_BASE + 0x5c0)
487#define AR_PHY_PANIC_WD_CTL_1 (AR_SM_BASE + 0x5c4)
488#define AR_PHY_PANIC_WD_CTL_2 (AR_SM_BASE + 0x5c8)
489#define AR_PHY_BT_CTL (AR_SM_BASE + 0x5cc)
490#define AR_PHY_ONLY_WARMRESET (AR_SM_BASE + 0x5d0)
491#define AR_PHY_ONLY_CTL (AR_SM_BASE + 0x5d4)
492#define AR_PHY_ECO_CTRL (AR_SM_BASE + 0x5dc)
493#define AR_PHY_BB_THERM_ADC_1 (AR_SM_BASE + 0x248)
494
495#define AR_PHY_65NM_CH0_SYNTH4 0x1608c
496#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT 0x00000002
497#define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S 1
498#define AR_PHY_65NM_CH0_SYNTH7 0x16098
499#define AR_PHY_65NM_CH0_BIAS1 0x160c0
500#define AR_PHY_65NM_CH0_BIAS2 0x160c4
501#define AR_PHY_65NM_CH0_BIAS4 0x160cc
502#define AR_PHY_65NM_CH0_RXTX4 0x1610c
503#define AR_PHY_65NM_CH0_THERM 0x16290
504
505#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
506#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
507#define AR_PHY_65NM_CH0_THERM_START 0x20000000
508#define AR_PHY_65NM_CH0_THERM_START_S 29
509#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT 0x0000ff00
510#define AR_PHY_65NM_CH0_THERM_SAR_ADC_OUT_S 8
511
512#define AR_PHY_65NM_CH0_RXTX1 0x16100
513#define AR_PHY_65NM_CH0_RXTX2 0x16104
514#define AR_PHY_65NM_CH1_RXTX1 0x16500
515#define AR_PHY_65NM_CH1_RXTX2 0x16504
516#define AR_PHY_65NM_CH2_RXTX1 0x16900
517#define AR_PHY_65NM_CH2_RXTX2 0x16904
518
519#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
520#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
521#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
522#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT_S 22
523#define AR_PHY_LNAGAIN_LONG_SHIFT 0xe0000000
524#define AR_PHY_LNAGAIN_LONG_SHIFT_S 29
525#define AR_PHY_MXRGAIN_LONG_SHIFT 0x03000000
526#define AR_PHY_MXRGAIN_LONG_SHIFT_S 24
527#define AR_PHY_VGAGAIN_LONG_SHIFT 0x1c000000
528#define AR_PHY_VGAGAIN_LONG_SHIFT_S 26
529#define AR_PHY_SCFIR_GAIN_LONG_SHIFT 0x00000001
530#define AR_PHY_SCFIR_GAIN_LONG_SHIFT_S 0
531#define AR_PHY_MANRXGAIN_LONG_SHIFT 0x00000002
532#define AR_PHY_MANRXGAIN_LONG_SHIFT_S 1
533
534/*
535 * SM Field Definitions
536 */
537#define AR_PHY_CL_CAL_ENABLE 0x00000002
538#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
539#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
540#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
541
542#define AR_PHY_ADDAC_PARACTL_OFF_PWDADC 0x00008000
543
544#define AR_PHY_FCAL20_CAP_STATUS_0 0x01f00000
545#define AR_PHY_FCAL20_CAP_STATUS_0_S 20
546
547#define AR_PHY_RFBUS_REQ_EN 0x00000001 /* request for RF bus */
548#define AR_PHY_RFBUS_GRANT_EN 0x00000001 /* RF bus granted */
549#define AR_PHY_GC_TURBO_MODE 0x00000001 /* set turbo mode bits */
550#define AR_PHY_GC_TURBO_SHORT 0x00000002 /* set short symbols to turbo mode setting */
551#define AR_PHY_GC_DYN2040_EN 0x00000004 /* enable dyn 20/40 mode */
552#define AR_PHY_GC_DYN2040_PRI_ONLY 0x00000008 /* dyn 20/40 - primary only */
553#define AR_PHY_GC_DYN2040_PRI_CH 0x00000010 /* dyn 20/40 - primary ch offset (0=+10MHz, 1=-10MHz)*/
554#define AR_PHY_GC_DYN2040_PRI_CH_S 4
555#define AR_PHY_GC_DYN2040_EXT_CH 0x00000020 /* dyn 20/40 - ext ch spacing (0=20MHz/ 1=25MHz) */
556#define AR_PHY_GC_HT_EN 0x00000040 /* ht enable */
557#define AR_PHY_GC_SHORT_GI_40 0x00000080 /* allow short GI for HT 40 */
558#define AR_PHY_GC_WALSH 0x00000100 /* walsh spatial spreading for 2 chains,2 streams TX */
559#define AR_PHY_GC_SINGLE_HT_LTF1 0x00000200 /* single length (4us) 1st HT long training symbol */
560#define AR_PHY_GC_GF_DETECT_EN 0x00000400 /* enable Green Field detection. Only affects rx, not tx */
561#define AR_PHY_GC_ENABLE_DAC_FIFO 0x00000800 /* fifo between bb and dac */
562#define AR_PHY_RX_DELAY_DELAY 0x00003FFF /* delay from wakeup to rx ena */
563
564#define AR_PHY_CALMODE_IQ 0x00000000
565#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
566#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
567#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
568#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
569#define AR_PHY_MODE_OFDM 0x00000000
570#define AR_PHY_MODE_CCK 0x00000001
571#define AR_PHY_MODE_DYNAMIC 0x00000004
572#define AR_PHY_MODE_DYNAMIC_S 2
573#define AR_PHY_MODE_HALF 0x00000020
574#define AR_PHY_MODE_QUARTER 0x00000040
575#define AR_PHY_MAC_CLK_MODE 0x00000080
576#define AR_PHY_MODE_DYN_CCK_DISABLE 0x00000100
577#define AR_PHY_MODE_SVD_HALF 0x00000200
578#define AR_PHY_ACTIVE_EN 0x00000001
579#define AR_PHY_ACTIVE_DIS 0x00000000
580#define AR_PHY_FORCE_XPA_CFG 0x000000001
581#define AR_PHY_FORCE_XPA_CFG_S 0
582#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF 0xFF000000
583#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF_S 24
584#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF 0x00FF0000
585#define AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF_S 16
586#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON 0x0000FF00
587#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAB_ON_S 8
588#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON 0x000000FF
589#define AR_PHY_XPA_TIMING_CTL_FRAME_XPAA_ON_S 0
590#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
591#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
592#define AR_PHY_TX_END_DATA_START 0x000000FF
593#define AR_PHY_TX_END_DATA_START_S 0
594#define AR_PHY_TX_END_PA_ON 0x0000FF00
595#define AR_PHY_TX_END_PA_ON_S 8
596#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
597#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
598#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
599#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
600#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
601#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
602#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
603#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
604#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
605#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
606#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
607#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
608#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
609#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
610#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
611#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
612#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
613#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
614#define AR_PHY_TPCGR1_FORCED_DAC_GAIN 0x0000003e
615#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
616#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
617#define AR_PHY_TXGAIN_FORCE 0x00000001
618#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
619#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
620#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
621#define AR_PHY_TXGAIN_FORCED_PADVGNRB_S 14
622#define AR_PHY_TXGAIN_FORCED_PADVGNRD 0x00c00000
623#define AR_PHY_TXGAIN_FORCED_PADVGNRD_S 22
624#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN 0x000003c0
625#define AR_PHY_TXGAIN_FORCED_TXMXRGAIN_S 6
626#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN 0x0000000e
627#define AR_PHY_TXGAIN_FORCED_TXBB1DBGAIN_S 1
628
629#define AR_PHY_POWER_TX_RATE1 0x9934
630#define AR_PHY_POWER_TX_RATE2 0x9938
631#define AR_PHY_POWER_TX_RATE_MAX 0x993c
632#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
633#define PHY_AGC_CLR 0x10000000
634#define RFSILENT_BB 0x00002000
635#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_MASK 0xFFF
636#define AR_PHY_CHAN_INFO_GAIN_DIFF_PPM_SIGNED_BIT 0x800
637#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
638#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
639#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
640#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
641#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x00000001
642#define AR_PHY_SPECTRAL_SCAN_ENABLE_S 0
643#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002
644#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1
645#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0
646#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
647#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00
648#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
649#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000
650#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
651#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000
652#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24
653#define AR_PHY_CHANNEL_STATUS_RX_CLEAR 0x00000004
654#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT 0x01fc0000
655#define AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT_S 18
656#define AR_PHY_TX_IQCAL_START_DO_CAL 0x00000001
657#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
658
659#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
660#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff
661#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0
662
663#define AR_PHY_TPC_18_THERM_CAL_VALUE 0xff
664#define AR_PHY_TPC_18_THERM_CAL_VALUE_S 0
665#define AR_PHY_TPC_19_ALPHA_THERM 0xff
666#define AR_PHY_TPC_19_ALPHA_THERM_S 0
667
668#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
669#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
670
671#define AR_PHY_BB_THERM_ADC_1_INIT_THERM 0x000000ff
672#define AR_PHY_BB_THERM_ADC_1_INIT_THERM_S 0
673
674/*
675 * Channel 1 Register Map
676 */
677#define AR_CHAN1_BASE 0xa800
678
679#define AR_PHY_EXT_CCA_1 (AR_CHAN1_BASE + 0x30)
680#define AR_PHY_TX_PHASE_RAMP_1 (AR_CHAN1_BASE + 0xd0)
681#define AR_PHY_ADC_GAIN_DC_CORR_1 (AR_CHAN1_BASE + 0xd4)
682
683#define AR_PHY_SPUR_REPORT_1 (AR_CHAN1_BASE + 0xa8)
684#define AR_PHY_CHAN_INFO_TAB_1 (AR_CHAN1_BASE + 0x300)
685#define AR_PHY_RX_IQCAL_CORR_B1 (AR_CHAN1_BASE + 0xdc)
686
687/*
688 * Channel 1 Field Definitions
689 */
690#define AR_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
691#define AR_PHY_CH1_EXT_MINCCA_PWR_S 16
692
693/*
694 * AGC 1 Register Map
695 */
696#define AR_AGC1_BASE 0xae00
697
698#define AR_PHY_FORCEMAX_GAINS_1 (AR_AGC1_BASE + 0x4)
699#define AR_PHY_EXT_ATTEN_CTL_1 (AR_AGC1_BASE + 0x18)
700#define AR_PHY_CCA_1 (AR_AGC1_BASE + 0x1c)
701#define AR_PHY_CCA_CTRL_1 (AR_AGC1_BASE + 0x20)
702#define AR_PHY_RSSI_1 (AR_AGC1_BASE + 0x180)
703#define AR_PHY_SPUR_CCK_REP_1 (AR_AGC1_BASE + 0x184)
704#define AR_PHY_RX_OCGAIN_2 (AR_AGC1_BASE + 0x200)
705
706/*
707 * AGC 1 Field Definitions
708 */
709#define AR_PHY_CH1_MINCCA_PWR 0x1FF00000
710#define AR_PHY_CH1_MINCCA_PWR_S 20
711
712/*
713 * SM 1 Register Map
714 */
715#define AR_SM1_BASE 0xb200
716
717#define AR_PHY_SWITCH_CHAIN_1 (AR_SM1_BASE + 0x84)
718#define AR_PHY_FCAL_2_1 (AR_SM1_BASE + 0xd0)
719#define AR_PHY_DFT_TONE_CTL_1 (AR_SM1_BASE + 0xd4)
720#define AR_PHY_CL_TAB_1 (AR_SM1_BASE + 0x100)
721#define AR_PHY_CHAN_INFO_GAIN_1 (AR_SM1_BASE + 0x180)
722#define AR_PHY_TPC_4_B1 (AR_SM1_BASE + 0x204)
723#define AR_PHY_TPC_5_B1 (AR_SM1_BASE + 0x208)
724#define AR_PHY_TPC_6_B1 (AR_SM1_BASE + 0x20c)
725#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
726#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
727#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
728#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450)
729
730/*
731 * Channel 2 Register Map
732 */
733#define AR_CHAN2_BASE 0xb800
734
735#define AR_PHY_EXT_CCA_2 (AR_CHAN2_BASE + 0x30)
736#define AR_PHY_TX_PHASE_RAMP_2 (AR_CHAN2_BASE + 0xd0)
737#define AR_PHY_ADC_GAIN_DC_CORR_2 (AR_CHAN2_BASE + 0xd4)
738
739#define AR_PHY_SPUR_REPORT_2 (AR_CHAN2_BASE + 0xa8)
740#define AR_PHY_CHAN_INFO_TAB_2 (AR_CHAN2_BASE + 0x300)
741#define AR_PHY_RX_IQCAL_CORR_B2 (AR_CHAN2_BASE + 0xdc)
742
743/*
744 * Channel 2 Field Definitions
745 */
746#define AR_PHY_CH2_EXT_MINCCA_PWR 0x01FF0000
747#define AR_PHY_CH2_EXT_MINCCA_PWR_S 16
748/*
749 * AGC 2 Register Map
750 */
751#define AR_AGC2_BASE 0xbe00
752
753#define AR_PHY_FORCEMAX_GAINS_2 (AR_AGC2_BASE + 0x4)
754#define AR_PHY_EXT_ATTEN_CTL_2 (AR_AGC2_BASE + 0x18)
755#define AR_PHY_CCA_2 (AR_AGC2_BASE + 0x1c)
756#define AR_PHY_CCA_CTRL_2 (AR_AGC2_BASE + 0x20)
757#define AR_PHY_RSSI_2 (AR_AGC2_BASE + 0x180)
758
759/*
760 * AGC 2 Field Definitions
761 */
762#define AR_PHY_CH2_MINCCA_PWR 0x1FF00000
763#define AR_PHY_CH2_MINCCA_PWR_S 20
764
765/*
766 * SM 2 Register Map
767 */
768#define AR_SM2_BASE 0xc200
769
770#define AR_PHY_SWITCH_CHAIN_2 (AR_SM2_BASE + 0x84)
771#define AR_PHY_FCAL_2_2 (AR_SM2_BASE + 0xd0)
772#define AR_PHY_DFT_TONE_CTL_2 (AR_SM2_BASE + 0xd4)
773#define AR_PHY_CL_TAB_2 (AR_SM2_BASE + 0x100)
774#define AR_PHY_CHAN_INFO_GAIN_2 (AR_SM2_BASE + 0x180)
775#define AR_PHY_TPC_4_B2 (AR_SM2_BASE + 0x204)
776#define AR_PHY_TPC_5_B2 (AR_SM2_BASE + 0x208)
777#define AR_PHY_TPC_6_B2 (AR_SM2_BASE + 0x20c)
778#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
779#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
780#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
781#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450)
782
783#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
784
785/*
786 * AGC 3 Register Map
787 */
788#define AR_AGC3_BASE 0xce00
789
790#define AR_PHY_RSSI_3 (AR_AGC3_BASE + 0x180)
791
792/*
793 * Misc helper defines
794 */
795#define AR_PHY_CHAIN_OFFSET (AR_CHAN1_BASE - AR_CHAN_BASE)
796
797#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (AR_PHY_ADC_GAIN_DC_CORR_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
798#define AR_PHY_NEW_ADC_DC_GAIN_CORR_9300_10(_i) (AR_PHY_ADC_GAIN_DC_CORR_0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
799#define AR_PHY_SWITCH_CHAIN(_i) (AR_PHY_SWITCH_CHAIN_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
800#define AR_PHY_EXT_ATTEN_CTL(_i) (AR_PHY_EXT_ATTEN_CTL_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
801
802#define AR_PHY_RXGAIN(_i) (AR_PHY_FORCEMAX_GAINS_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
803#define AR_PHY_TPCRG5(_i) (AR_PHY_TPC_5_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
804#define AR_PHY_PDADC_TAB(_i) (AR_PHY_PDADC_TAB_0 + (AR_PHY_CHAIN_OFFSET * (_i)))
805
806#define AR_PHY_CAL_MEAS_0(_i) (AR_PHY_IQ_ADC_MEAS_0_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
807#define AR_PHY_CAL_MEAS_1(_i) (AR_PHY_IQ_ADC_MEAS_1_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
808#define AR_PHY_CAL_MEAS_2(_i) (AR_PHY_IQ_ADC_MEAS_2_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
809#define AR_PHY_CAL_MEAS_3(_i) (AR_PHY_IQ_ADC_MEAS_3_B0 + (AR_PHY_CHAIN_OFFSET * (_i)))
810#define AR_PHY_CAL_MEAS_0_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_0_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
811#define AR_PHY_CAL_MEAS_1_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_1_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
812#define AR_PHY_CAL_MEAS_2_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_2_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
813#define AR_PHY_CAL_MEAS_3_9300_10(_i) (AR_PHY_IQ_ADC_MEAS_3_B0_9300_10 + (AR_PHY_CHAIN_OFFSET * (_i)))
814
815#define AR_PHY_BB_PANIC_NON_IDLE_ENABLE 0x00000001
816#define AR_PHY_BB_PANIC_IDLE_ENABLE 0x00000002
817#define AR_PHY_BB_PANIC_IDLE_MASK 0xFFFF0000
818#define AR_PHY_BB_PANIC_NON_IDLE_MASK 0x0000FFFC
819
820#define AR_PHY_BB_PANIC_RST_ENABLE 0x00000002
821#define AR_PHY_BB_PANIC_IRQ_ENABLE 0x00000004
822#define AR_PHY_BB_PANIC_CNTL2_MASK 0xFFFFFFF9
823
824#define AR_PHY_BB_WD_STATUS 0x00000007
825#define AR_PHY_BB_WD_STATUS_S 0
826#define AR_PHY_BB_WD_DET_HANG 0x00000008
827#define AR_PHY_BB_WD_DET_HANG_S 3
828#define AR_PHY_BB_WD_RADAR_SM 0x000000F0
829#define AR_PHY_BB_WD_RADAR_SM_S 4
830#define AR_PHY_BB_WD_RX_OFDM_SM 0x00000F00
831#define AR_PHY_BB_WD_RX_OFDM_SM_S 8
832#define AR_PHY_BB_WD_RX_CCK_SM 0x0000F000
833#define AR_PHY_BB_WD_RX_CCK_SM_S 12
834#define AR_PHY_BB_WD_TX_OFDM_SM 0x000F0000
835#define AR_PHY_BB_WD_TX_OFDM_SM_S 16
836#define AR_PHY_BB_WD_TX_CCK_SM 0x00F00000
837#define AR_PHY_BB_WD_TX_CCK_SM_S 20
838#define AR_PHY_BB_WD_AGC_SM 0x0F000000
839#define AR_PHY_BB_WD_AGC_SM_S 24
840#define AR_PHY_BB_WD_SRCH_SM 0xF0000000
841#define AR_PHY_BB_WD_SRCH_SM_S 28
842
843#define AR_PHY_BB_WD_STATUS_CLR 0x00000008
844
845void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
846
847#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4c007f..fbb7dec6ddeb 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -114,8 +114,10 @@ enum buffer_type {
114#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY) 114#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
115#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) 115#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
116 116
117#define ATH_TXSTATUS_RING_SIZE 64
118
117struct ath_descdma { 119struct ath_descdma {
118 struct ath_desc *dd_desc; 120 void *dd_desc;
119 dma_addr_t dd_desc_paddr; 121 dma_addr_t dd_desc_paddr;
120 u32 dd_desc_len; 122 u32 dd_desc_len;
121 struct ath_buf *dd_bufptr; 123 struct ath_buf *dd_bufptr;
@@ -123,7 +125,7 @@ struct ath_descdma {
123 125
124int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 126int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
125 struct list_head *head, const char *name, 127 struct list_head *head, const char *name,
126 int nbuf, int ndesc); 128 int nbuf, int ndesc, bool is_tx);
127void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, 129void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
128 struct list_head *head); 130 struct list_head *head);
129 131
@@ -178,9 +180,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
178#define BAW_WITHIN(_start, _bawsz, _seqno) \ 180#define BAW_WITHIN(_start, _bawsz, _seqno) \
179 ((((_seqno) - (_start)) & 4095) < (_bawsz)) 181 ((((_seqno) - (_start)) & 4095) < (_bawsz))
180 182
181#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
182#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
183#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
184#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)]) 183#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
185 184
186#define ATH_TX_COMPLETE_POLL_INT 1000 185#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -191,6 +190,7 @@ enum ATH_AGGR_STATUS {
191 ATH_AGGR_LIMITED, 190 ATH_AGGR_LIMITED,
192}; 191};
193 192
193#define ATH_TXFIFO_DEPTH 8
194struct ath_txq { 194struct ath_txq {
195 u32 axq_qnum; 195 u32 axq_qnum;
196 u32 *axq_link; 196 u32 *axq_link;
@@ -200,6 +200,10 @@ struct ath_txq {
200 bool stopped; 200 bool stopped;
201 bool axq_tx_inprogress; 201 bool axq_tx_inprogress;
202 struct list_head axq_acq; 202 struct list_head axq_acq;
203 struct list_head txq_fifo[ATH_TXFIFO_DEPTH];
204 struct list_head txq_fifo_pending;
205 u8 txq_headidx;
206 u8 txq_tailidx;
203}; 207};
204 208
205#define AGGR_CLEANUP BIT(1) 209#define AGGR_CLEANUP BIT(1)
@@ -226,6 +230,12 @@ struct ath_tx {
226 struct ath_descdma txdma; 230 struct ath_descdma txdma;
227}; 231};
228 232
233struct ath_rx_edma {
234 struct sk_buff_head rx_fifo;
235 struct sk_buff_head rx_buffers;
236 u32 rx_fifo_hwsize;
237};
238
229struct ath_rx { 239struct ath_rx {
230 u8 defant; 240 u8 defant;
231 u8 rxotherant; 241 u8 rxotherant;
@@ -235,6 +245,8 @@ struct ath_rx {
235 spinlock_t rxbuflock; 245 spinlock_t rxbuflock;
236 struct list_head rxbuf; 246 struct list_head rxbuf;
237 struct ath_descdma rxdma; 247 struct ath_descdma rxdma;
248 struct ath_buf *rx_bufptr;
249 struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
238}; 250};
239 251
240int ath_startrecv(struct ath_softc *sc); 252int ath_startrecv(struct ath_softc *sc);
@@ -243,7 +255,7 @@ void ath_flushrecv(struct ath_softc *sc);
243u32 ath_calcrxfilter(struct ath_softc *sc); 255u32 ath_calcrxfilter(struct ath_softc *sc);
244int ath_rx_init(struct ath_softc *sc, int nbufs); 256int ath_rx_init(struct ath_softc *sc, int nbufs);
245void ath_rx_cleanup(struct ath_softc *sc); 257void ath_rx_cleanup(struct ath_softc *sc);
246int ath_rx_tasklet(struct ath_softc *sc, int flush); 258int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
247struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 259struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
248void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 260void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
249int ath_tx_setup(struct ath_softc *sc, int haltype); 261int ath_tx_setup(struct ath_softc *sc, int haltype);
@@ -261,6 +273,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
261int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 273int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
262 struct ath_tx_control *txctl); 274 struct ath_tx_control *txctl);
263void ath_tx_tasklet(struct ath_softc *sc); 275void ath_tx_tasklet(struct ath_softc *sc);
276void ath_tx_edma_tasklet(struct ath_softc *sc);
264void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb); 277void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
265bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno); 278bool ath_tx_aggr_check(struct ath_softc *sc, struct ath_node *an, u8 tidno);
266void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 279void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -483,7 +496,6 @@ struct ath_softc {
483 bool ps_enabled; 496 bool ps_enabled;
484 bool ps_idle; 497 bool ps_idle;
485 unsigned long ps_usecount; 498 unsigned long ps_usecount;
486 enum ath9k_int imask;
487 499
488 struct ath_config config; 500 struct ath_config config;
489 struct ath_rx rx; 501 struct ath_rx rx;
@@ -511,6 +523,8 @@ struct ath_softc {
511 struct ath_beacon_config cur_beacon_conf; 523 struct ath_beacon_config cur_beacon_conf;
512 struct delayed_work tx_complete_work; 524 struct delayed_work tx_complete_work;
513 struct ath_btcoex btcoex; 525 struct ath_btcoex btcoex;
526
527 struct ath_descdma txsdma;
514}; 528};
515 529
516struct ath_wiphy { 530struct ath_wiphy {
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a43a62c..c8a4558f79ba 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -93,8 +93,6 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
93 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1); 93 antenna = ((sc->beacon.ast_be_xmit / sc->nbcnvifs) & 1 ? 2 : 1);
94 } 94 }
95 95
96 ds->ds_data = bf->bf_buf_addr;
97
98 sband = &sc->sbands[common->hw->conf.channel->band]; 96 sband = &sc->sbands[common->hw->conf.channel->band];
99 rate = sband->bitrates[rateidx].hw_value; 97 rate = sband->bitrates[rateidx].hw_value;
100 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT) 98 if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
@@ -109,7 +107,8 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
109 107
110 /* NB: beacon's BufLen must be a multiple of 4 bytes */ 108 /* NB: beacon's BufLen must be a multiple of 4 bytes */
111 ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4), 109 ath9k_hw_filltxdesc(ah, ds, roundup(skb->len, 4),
112 true, true, ds); 110 true, true, ds, bf->bf_buf_addr,
111 sc->beacon.beaconq);
113 112
114 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 113 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
115 series[0].Tries = 1; 114 series[0].Tries = 1;
@@ -524,6 +523,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
524static void ath_beacon_config_ap(struct ath_softc *sc, 523static void ath_beacon_config_ap(struct ath_softc *sc,
525 struct ath_beacon_config *conf) 524 struct ath_beacon_config *conf)
526{ 525{
526 struct ath_hw *ah = sc->sc_ah;
527 u32 nexttbtt, intval; 527 u32 nexttbtt, intval;
528 528
529 /* NB: the beacon interval is kept internally in TU's */ 529 /* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +539,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
539 * prepare beacon frames. 539 * prepare beacon frames.
540 */ 540 */
541 intval |= ATH9K_BEACON_ENA; 541 intval |= ATH9K_BEACON_ENA;
542 sc->imask |= ATH9K_INT_SWBA; 542 ah->imask |= ATH9K_INT_SWBA;
543 ath_beaconq_config(sc); 543 ath_beaconq_config(sc);
544 544
545 /* Set the computed AP beacon timers */ 545 /* Set the computed AP beacon timers */
546 546
547 ath9k_hw_set_interrupts(sc->sc_ah, 0); 547 ath9k_hw_set_interrupts(ah, 0);
548 ath9k_beacon_init(sc, nexttbtt, intval); 548 ath9k_beacon_init(sc, nexttbtt, intval);
549 sc->beacon.bmisscnt = 0; 549 sc->beacon.bmisscnt = 0;
550 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 550 ath9k_hw_set_interrupts(ah, ah->imask);
551 551
552 /* Clear the reset TSF flag, so that subsequent beacon updation 552 /* Clear the reset TSF flag, so that subsequent beacon updation
553 will not reset the HW TSF. */ 553 will not reset the HW TSF. */
@@ -566,7 +566,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
566static void ath_beacon_config_sta(struct ath_softc *sc, 566static void ath_beacon_config_sta(struct ath_softc *sc,
567 struct ath_beacon_config *conf) 567 struct ath_beacon_config *conf)
568{ 568{
569 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 569 struct ath_hw *ah = sc->sc_ah;
570 struct ath_common *common = ath9k_hw_common(ah);
570 struct ath9k_beacon_state bs; 571 struct ath9k_beacon_state bs;
571 int dtimperiod, dtimcount, sleepduration; 572 int dtimperiod, dtimcount, sleepduration;
572 int cfpperiod, cfpcount; 573 int cfpperiod, cfpcount;
@@ -605,7 +606,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
605 * Pull nexttbtt forward to reflect the current 606 * Pull nexttbtt forward to reflect the current
606 * TSF and calculate dtim+cfp state for the result. 607 * TSF and calculate dtim+cfp state for the result.
607 */ 608 */
608 tsf = ath9k_hw_gettsf64(sc->sc_ah); 609 tsf = ath9k_hw_gettsf64(ah);
609 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 610 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
610 611
611 num_beacons = tsftu / intval + 1; 612 num_beacons = tsftu / intval + 1;
@@ -678,17 +679,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
678 679
679 /* Set the computed STA beacon timers */ 680 /* Set the computed STA beacon timers */
680 681
681 ath9k_hw_set_interrupts(sc->sc_ah, 0); 682 ath9k_hw_set_interrupts(ah, 0);
682 ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs); 683 ath9k_hw_set_sta_beacon_timers(ah, &bs);
683 sc->imask |= ATH9K_INT_BMISS; 684 ah->imask |= ATH9K_INT_BMISS;
684 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 685 ath9k_hw_set_interrupts(ah, ah->imask);
685} 686}
686 687
687static void ath_beacon_config_adhoc(struct ath_softc *sc, 688static void ath_beacon_config_adhoc(struct ath_softc *sc,
688 struct ath_beacon_config *conf, 689 struct ath_beacon_config *conf,
689 struct ieee80211_vif *vif) 690 struct ieee80211_vif *vif)
690{ 691{
691 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 692 struct ath_hw *ah = sc->sc_ah;
693 struct ath_common *common = ath9k_hw_common(ah);
692 u64 tsf; 694 u64 tsf;
693 u32 tsftu, intval, nexttbtt; 695 u32 tsftu, intval, nexttbtt;
694 696
@@ -703,7 +705,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
703 else if (intval) 705 else if (intval)
704 nexttbtt = roundup(nexttbtt, intval); 706 nexttbtt = roundup(nexttbtt, intval);
705 707
706 tsf = ath9k_hw_gettsf64(sc->sc_ah); 708 tsf = ath9k_hw_gettsf64(ah);
707 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE; 709 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
708 do { 710 do {
709 nexttbtt += intval; 711 nexttbtt += intval;
@@ -719,20 +721,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
719 * self-linked tx descriptor and let the hardware deal with things. 721 * self-linked tx descriptor and let the hardware deal with things.
720 */ 722 */
721 intval |= ATH9K_BEACON_ENA; 723 intval |= ATH9K_BEACON_ENA;
722 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) 724 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
723 sc->imask |= ATH9K_INT_SWBA; 725 ah->imask |= ATH9K_INT_SWBA;
724 726
725 ath_beaconq_config(sc); 727 ath_beaconq_config(sc);
726 728
727 /* Set the computed ADHOC beacon timers */ 729 /* Set the computed ADHOC beacon timers */
728 730
729 ath9k_hw_set_interrupts(sc->sc_ah, 0); 731 ath9k_hw_set_interrupts(ah, 0);
730 ath9k_beacon_init(sc, nexttbtt, intval); 732 ath9k_beacon_init(sc, nexttbtt, intval);
731 sc->beacon.bmisscnt = 0; 733 sc->beacon.bmisscnt = 0;
732 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 734 ath9k_hw_set_interrupts(ah, ah->imask);
733 735
734 /* FIXME: Handle properly when vif is NULL */ 736 /* FIXME: Handle properly when vif is NULL */
735 if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) 737 if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
736 ath_beacon_start_adhoc(sc, vif); 738 ath_beacon_start_adhoc(sc, vif);
737} 739}
738 740
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a5744d8e9..07b8fa6fb62f 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -15,6 +15,9 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "hw-ops.h"
19
20/* Common calibration code */
18 21
19/* We can tune this as we go by monitoring really low values */ 22/* We can tune this as we go by monitoring really low values */
20#define ATH9K_NF_TOO_LOW -60 23#define ATH9K_NF_TOO_LOW -60
@@ -83,93 +86,11 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
83 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer); 86 ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
84 } 87 }
85 } 88 }
86 return;
87} 89}
88 90
89static void ath9k_hw_do_getnf(struct ath_hw *ah, 91static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
90 int16_t nfarray[NUM_NF_READINGS]) 92 enum ieee80211_band band,
91{ 93 int16_t *nft)
92 struct ath_common *common = ath9k_hw_common(ah);
93 int16_t nf;
94
95 if (AR_SREV_9280_10_OR_LATER(ah))
96 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
97 else
98 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
99
100 if (nf & 0x100)
101 nf = 0 - ((nf ^ 0x1ff) + 1);
102 ath_print(common, ATH_DBG_CALIBRATE,
103 "NF calibrated [ctl] [chain 0] is %d\n", nf);
104 nfarray[0] = nf;
105
106 if (!AR_SREV_9285(ah)) {
107 if (AR_SREV_9280_10_OR_LATER(ah))
108 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
109 AR9280_PHY_CH1_MINCCA_PWR);
110 else
111 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
112 AR_PHY_CH1_MINCCA_PWR);
113
114 if (nf & 0x100)
115 nf = 0 - ((nf ^ 0x1ff) + 1);
116 ath_print(common, ATH_DBG_CALIBRATE,
117 "NF calibrated [ctl] [chain 1] is %d\n", nf);
118 nfarray[1] = nf;
119
120 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
121 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA),
122 AR_PHY_CH2_MINCCA_PWR);
123 if (nf & 0x100)
124 nf = 0 - ((nf ^ 0x1ff) + 1);
125 ath_print(common, ATH_DBG_CALIBRATE,
126 "NF calibrated [ctl] [chain 2] is %d\n", nf);
127 nfarray[2] = nf;
128 }
129 }
130
131 if (AR_SREV_9280_10_OR_LATER(ah))
132 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
133 AR9280_PHY_EXT_MINCCA_PWR);
134 else
135 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA),
136 AR_PHY_EXT_MINCCA_PWR);
137
138 if (nf & 0x100)
139 nf = 0 - ((nf ^ 0x1ff) + 1);
140 ath_print(common, ATH_DBG_CALIBRATE,
141 "NF calibrated [ext] [chain 0] is %d\n", nf);
142 nfarray[3] = nf;
143
144 if (!AR_SREV_9285(ah)) {
145 if (AR_SREV_9280_10_OR_LATER(ah))
146 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
147 AR9280_PHY_CH1_EXT_MINCCA_PWR);
148 else
149 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
150 AR_PHY_CH1_EXT_MINCCA_PWR);
151
152 if (nf & 0x100)
153 nf = 0 - ((nf ^ 0x1ff) + 1);
154 ath_print(common, ATH_DBG_CALIBRATE,
155 "NF calibrated [ext] [chain 1] is %d\n", nf);
156 nfarray[4] = nf;
157
158 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
159 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA),
160 AR_PHY_CH2_EXT_MINCCA_PWR);
161 if (nf & 0x100)
162 nf = 0 - ((nf ^ 0x1ff) + 1);
163 ath_print(common, ATH_DBG_CALIBRATE,
164 "NF calibrated [ext] [chain 2] is %d\n", nf);
165 nfarray[5] = nf;
166 }
167 }
168}
169
170static bool getNoiseFloorThresh(struct ath_hw *ah,
171 enum ieee80211_band band,
172 int16_t *nft)
173{ 94{
174 switch (band) { 95 switch (band) {
175 case IEEE80211_BAND_5GHZ: 96 case IEEE80211_BAND_5GHZ:
@@ -186,44 +107,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
186 return true; 107 return true;
187} 108}
188 109
189static void ath9k_hw_setup_calibration(struct ath_hw *ah, 110void ath9k_hw_reset_calibration(struct ath_hw *ah,
190 struct ath9k_cal_list *currCal) 111 struct ath9k_cal_list *currCal)
191{
192 struct ath_common *common = ath9k_hw_common(ah);
193
194 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
195 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
196 currCal->calData->calCountMax);
197
198 switch (currCal->calData->calType) {
199 case IQ_MISMATCH_CAL:
200 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
201 ath_print(common, ATH_DBG_CALIBRATE,
202 "starting IQ Mismatch Calibration\n");
203 break;
204 case ADC_GAIN_CAL:
205 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
206 ath_print(common, ATH_DBG_CALIBRATE,
207 "starting ADC Gain Calibration\n");
208 break;
209 case ADC_DC_CAL:
210 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
211 ath_print(common, ATH_DBG_CALIBRATE,
212 "starting ADC DC Calibration\n");
213 break;
214 case ADC_DC_INIT_CAL:
215 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
216 ath_print(common, ATH_DBG_CALIBRATE,
217 "starting Init ADC DC Calibration\n");
218 break;
219 }
220
221 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
222 AR_PHY_TIMING_CTRL4_DO_CAL);
223}
224
225static void ath9k_hw_reset_calibration(struct ath_hw *ah,
226 struct ath9k_cal_list *currCal)
227{ 112{
228 int i; 113 int i;
229 114
@@ -241,324 +126,6 @@ static void ath9k_hw_reset_calibration(struct ath_hw *ah,
241 ah->cal_samples = 0; 126 ah->cal_samples = 0;
242} 127}
243 128
244static bool ath9k_hw_per_calibration(struct ath_hw *ah,
245 struct ath9k_channel *ichan,
246 u8 rxchainmask,
247 struct ath9k_cal_list *currCal)
248{
249 bool iscaldone = false;
250
251 if (currCal->calState == CAL_RUNNING) {
252 if (!(REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) &
253 AR_PHY_TIMING_CTRL4_DO_CAL)) {
254
255 currCal->calData->calCollect(ah);
256 ah->cal_samples++;
257
258 if (ah->cal_samples >= currCal->calData->calNumSamples) {
259 int i, numChains = 0;
260 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
261 if (rxchainmask & (1 << i))
262 numChains++;
263 }
264
265 currCal->calData->calPostProc(ah, numChains);
266 ichan->CalValid |= currCal->calData->calType;
267 currCal->calState = CAL_DONE;
268 iscaldone = true;
269 } else {
270 ath9k_hw_setup_calibration(ah, currCal);
271 }
272 }
273 } else if (!(ichan->CalValid & currCal->calData->calType)) {
274 ath9k_hw_reset_calibration(ah, currCal);
275 }
276
277 return iscaldone;
278}
279
280/* Assumes you are talking about the currently configured channel */
281static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
282 enum ath9k_cal_types calType)
283{
284 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
285
286 switch (calType & ah->supp_cals) {
287 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
288 return true;
289 case ADC_GAIN_CAL:
290 case ADC_DC_CAL:
291 if (!(conf->channel->band == IEEE80211_BAND_2GHZ &&
292 conf_is_ht20(conf)))
293 return true;
294 break;
295 }
296 return false;
297}
298
299static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
300{
301 int i;
302
303 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
304 ah->totalPowerMeasI[i] +=
305 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
306 ah->totalPowerMeasQ[i] +=
307 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
308 ah->totalIqCorrMeas[i] +=
309 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
310 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
311 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
312 ah->cal_samples, i, ah->totalPowerMeasI[i],
313 ah->totalPowerMeasQ[i],
314 ah->totalIqCorrMeas[i]);
315 }
316}
317
318static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
319{
320 int i;
321
322 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
323 ah->totalAdcIOddPhase[i] +=
324 REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
325 ah->totalAdcIEvenPhase[i] +=
326 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
327 ah->totalAdcQOddPhase[i] +=
328 REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
329 ah->totalAdcQEvenPhase[i] +=
330 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
331
332 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
333 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
334 "oddq=0x%08x; evenq=0x%08x;\n",
335 ah->cal_samples, i,
336 ah->totalAdcIOddPhase[i],
337 ah->totalAdcIEvenPhase[i],
338 ah->totalAdcQOddPhase[i],
339 ah->totalAdcQEvenPhase[i]);
340 }
341}
342
343static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
344{
345 int i;
346
347 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
348 ah->totalAdcDcOffsetIOddPhase[i] +=
349 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_0(i));
350 ah->totalAdcDcOffsetIEvenPhase[i] +=
351 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
352 ah->totalAdcDcOffsetQOddPhase[i] +=
353 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
354 ah->totalAdcDcOffsetQEvenPhase[i] +=
355 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
356
357 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
358 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
359 "oddq=0x%08x; evenq=0x%08x;\n",
360 ah->cal_samples, i,
361 ah->totalAdcDcOffsetIOddPhase[i],
362 ah->totalAdcDcOffsetIEvenPhase[i],
363 ah->totalAdcDcOffsetQOddPhase[i],
364 ah->totalAdcDcOffsetQEvenPhase[i]);
365 }
366}
367
368static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
369{
370 struct ath_common *common = ath9k_hw_common(ah);
371 u32 powerMeasQ, powerMeasI, iqCorrMeas;
372 u32 qCoffDenom, iCoffDenom;
373 int32_t qCoff, iCoff;
374 int iqCorrNeg, i;
375
376 for (i = 0; i < numChains; i++) {
377 powerMeasI = ah->totalPowerMeasI[i];
378 powerMeasQ = ah->totalPowerMeasQ[i];
379 iqCorrMeas = ah->totalIqCorrMeas[i];
380
381 ath_print(common, ATH_DBG_CALIBRATE,
382 "Starting IQ Cal and Correction for Chain %d\n",
383 i);
384
385 ath_print(common, ATH_DBG_CALIBRATE,
386 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
387 i, ah->totalIqCorrMeas[i]);
388
389 iqCorrNeg = 0;
390
391 if (iqCorrMeas > 0x80000000) {
392 iqCorrMeas = (0xffffffff - iqCorrMeas) + 1;
393 iqCorrNeg = 1;
394 }
395
396 ath_print(common, ATH_DBG_CALIBRATE,
397 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
398 ath_print(common, ATH_DBG_CALIBRATE,
399 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
400 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
401 iqCorrNeg);
402
403 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
404 qCoffDenom = powerMeasQ / 64;
405
406 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
407 (qCoffDenom != 0)) {
408 iCoff = iqCorrMeas / iCoffDenom;
409 qCoff = powerMeasI / qCoffDenom - 64;
410 ath_print(common, ATH_DBG_CALIBRATE,
411 "Chn %d iCoff = 0x%08x\n", i, iCoff);
412 ath_print(common, ATH_DBG_CALIBRATE,
413 "Chn %d qCoff = 0x%08x\n", i, qCoff);
414
415 iCoff = iCoff & 0x3f;
416 ath_print(common, ATH_DBG_CALIBRATE,
417 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
418 if (iqCorrNeg == 0x0)
419 iCoff = 0x40 - iCoff;
420
421 if (qCoff > 15)
422 qCoff = 15;
423 else if (qCoff <= -16)
424 qCoff = 16;
425
426 ath_print(common, ATH_DBG_CALIBRATE,
427 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
428 i, iCoff, qCoff);
429
430 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
431 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
432 iCoff);
433 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
434 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
435 qCoff);
436 ath_print(common, ATH_DBG_CALIBRATE,
437 "IQ Cal and Correction done for Chain %d\n",
438 i);
439 }
440 }
441
442 REG_SET_BIT(ah, AR_PHY_TIMING_CTRL4(0),
443 AR_PHY_TIMING_CTRL4_IQCORR_ENABLE);
444}
445
446static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
447{
448 struct ath_common *common = ath9k_hw_common(ah);
449 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
450 u32 qGainMismatch, iGainMismatch, val, i;
451
452 for (i = 0; i < numChains; i++) {
453 iOddMeasOffset = ah->totalAdcIOddPhase[i];
454 iEvenMeasOffset = ah->totalAdcIEvenPhase[i];
455 qOddMeasOffset = ah->totalAdcQOddPhase[i];
456 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
457
458 ath_print(common, ATH_DBG_CALIBRATE,
459 "Starting ADC Gain Cal for Chain %d\n", i);
460
461 ath_print(common, ATH_DBG_CALIBRATE,
462 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
463 iOddMeasOffset);
464 ath_print(common, ATH_DBG_CALIBRATE,
465 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
466 iEvenMeasOffset);
467 ath_print(common, ATH_DBG_CALIBRATE,
468 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
469 qOddMeasOffset);
470 ath_print(common, ATH_DBG_CALIBRATE,
471 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
472 qEvenMeasOffset);
473
474 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
475 iGainMismatch =
476 ((iEvenMeasOffset * 32) /
477 iOddMeasOffset) & 0x3f;
478 qGainMismatch =
479 ((qOddMeasOffset * 32) /
480 qEvenMeasOffset) & 0x3f;
481
482 ath_print(common, ATH_DBG_CALIBRATE,
483 "Chn %d gain_mismatch_i = 0x%08x\n", i,
484 iGainMismatch);
485 ath_print(common, ATH_DBG_CALIBRATE,
486 "Chn %d gain_mismatch_q = 0x%08x\n", i,
487 qGainMismatch);
488
489 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
490 val &= 0xfffff000;
491 val |= (qGainMismatch) | (iGainMismatch << 6);
492 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
493
494 ath_print(common, ATH_DBG_CALIBRATE,
495 "ADC Gain Cal done for Chain %d\n", i);
496 }
497 }
498
499 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
500 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
501 AR_PHY_NEW_ADC_GAIN_CORR_ENABLE);
502}
503
504static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
505{
506 struct ath_common *common = ath9k_hw_common(ah);
507 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
508 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
509 const struct ath9k_percal_data *calData =
510 ah->cal_list_curr->calData;
511 u32 numSamples =
512 (1 << (calData->calCountMax + 5)) * calData->calNumSamples;
513
514 for (i = 0; i < numChains; i++) {
515 iOddMeasOffset = ah->totalAdcDcOffsetIOddPhase[i];
516 iEvenMeasOffset = ah->totalAdcDcOffsetIEvenPhase[i];
517 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
518 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
519
520 ath_print(common, ATH_DBG_CALIBRATE,
521 "Starting ADC DC Offset Cal for Chain %d\n", i);
522
523 ath_print(common, ATH_DBG_CALIBRATE,
524 "Chn %d pwr_meas_odd_i = %d\n", i,
525 iOddMeasOffset);
526 ath_print(common, ATH_DBG_CALIBRATE,
527 "Chn %d pwr_meas_even_i = %d\n", i,
528 iEvenMeasOffset);
529 ath_print(common, ATH_DBG_CALIBRATE,
530 "Chn %d pwr_meas_odd_q = %d\n", i,
531 qOddMeasOffset);
532 ath_print(common, ATH_DBG_CALIBRATE,
533 "Chn %d pwr_meas_even_q = %d\n", i,
534 qEvenMeasOffset);
535
536 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
537 numSamples) & 0x1ff;
538 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
539 numSamples) & 0x1ff;
540
541 ath_print(common, ATH_DBG_CALIBRATE,
542 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
543 iDcMismatch);
544 ath_print(common, ATH_DBG_CALIBRATE,
545 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
546 qDcMismatch);
547
548 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
549 val &= 0xc0000fff;
550 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
551 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
552
553 ath_print(common, ATH_DBG_CALIBRATE,
554 "ADC DC Offset Cal done for Chain %d\n", i);
555 }
556
557 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
558 REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0)) |
559 AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE);
560}
561
562/* This is done for the currently configured channel */ 129/* This is done for the currently configured channel */
563bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 130bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
564{ 131{
@@ -605,72 +172,6 @@ void ath9k_hw_start_nfcal(struct ath_hw *ah)
605 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 172 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
606} 173}
607 174
608void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
609{
610 struct ath9k_nfcal_hist *h;
611 int i, j;
612 int32_t val;
613 const u32 ar5416_cca_regs[6] = {
614 AR_PHY_CCA,
615 AR_PHY_CH1_CCA,
616 AR_PHY_CH2_CCA,
617 AR_PHY_EXT_CCA,
618 AR_PHY_CH1_EXT_CCA,
619 AR_PHY_CH2_EXT_CCA
620 };
621 u8 chainmask, rx_chain_status;
622
623 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
624 if (AR_SREV_9285(ah))
625 chainmask = 0x9;
626 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
627 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
628 chainmask = 0x1B;
629 else
630 chainmask = 0x09;
631 } else {
632 if (rx_chain_status & 0x4)
633 chainmask = 0x3F;
634 else if (rx_chain_status & 0x2)
635 chainmask = 0x1B;
636 else
637 chainmask = 0x09;
638 }
639
640 h = ah->nfCalHist;
641
642 for (i = 0; i < NUM_NF_READINGS; i++) {
643 if (chainmask & (1 << i)) {
644 val = REG_READ(ah, ar5416_cca_regs[i]);
645 val &= 0xFFFFFE00;
646 val |= (((u32) (h[i].privNF) << 1) & 0x1ff);
647 REG_WRITE(ah, ar5416_cca_regs[i], val);
648 }
649 }
650
651 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
652 AR_PHY_AGC_CONTROL_ENABLE_NF);
653 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
654 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
655 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
656
657 for (j = 0; j < 5; j++) {
658 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
659 AR_PHY_AGC_CONTROL_NF) == 0)
660 break;
661 udelay(50);
662 }
663
664 for (i = 0; i < NUM_NF_READINGS; i++) {
665 if (chainmask & (1 << i)) {
666 val = REG_READ(ah, ar5416_cca_regs[i]);
667 val &= 0xFFFFFE00;
668 val |= (((u32) (-50) << 1) & 0x1ff);
669 REG_WRITE(ah, ar5416_cca_regs[i], val);
670 }
671 }
672}
673
674int16_t ath9k_hw_getnf(struct ath_hw *ah, 175int16_t ath9k_hw_getnf(struct ath_hw *ah,
675 struct ath9k_channel *chan) 176 struct ath9k_channel *chan)
676{ 177{
@@ -690,7 +191,7 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
690 } else { 191 } else {
691 ath9k_hw_do_getnf(ah, nfarray); 192 ath9k_hw_do_getnf(ah, nfarray);
692 nf = nfarray[0]; 193 nf = nfarray[0];
693 if (getNoiseFloorThresh(ah, c->band, &nfThresh) 194 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
694 && nf > nfThresh) { 195 && nf > nfThresh) {
695 ath_print(common, ATH_DBG_CALIBRATE, 196 ath_print(common, ATH_DBG_CALIBRATE,
696 "noise floor failed detected; " 197 "noise floor failed detected; "
@@ -715,7 +216,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
715 216
716 if (AR_SREV_9280(ah)) 217 if (AR_SREV_9280(ah))
717 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE; 218 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
718 else if (AR_SREV_9285(ah)) 219 else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
719 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE; 220 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
720 else if (AR_SREV_9287(ah)) 221 else if (AR_SREV_9287(ah))
721 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE; 222 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -748,508 +249,3 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
748 return nf; 249 return nf;
749} 250}
750EXPORT_SYMBOL(ath9k_hw_getchan_noise); 251EXPORT_SYMBOL(ath9k_hw_getchan_noise);
751
752static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
753{
754 u32 rddata;
755 int32_t delta, currPDADC, slope;
756
757 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
758 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
759
760 if (ah->initPDADC == 0 || currPDADC == 0) {
761 /*
762 * Zero value indicates that no frames have been transmitted yet,
763 * can't do temperature compensation until frames are transmitted.
764 */
765 return;
766 } else {
767 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
768
769 if (slope == 0) { /* to avoid divide by zero case */
770 delta = 0;
771 } else {
772 delta = ((currPDADC - ah->initPDADC)*4) / slope;
773 }
774 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
775 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
776 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
777 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
778 }
779}
780
781static void ath9k_olc_temp_compensation(struct ath_hw *ah)
782{
783 u32 rddata, i;
784 int delta, currPDADC, regval;
785
786 if (OLC_FOR_AR9287_10_LATER) {
787 ath9k_olc_temp_compensation_9287(ah);
788 } else {
789 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
790 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
791
792 if (ah->initPDADC == 0 || currPDADC == 0) {
793 return;
794 } else {
795 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
796 delta = (currPDADC - ah->initPDADC + 4) / 8;
797 else
798 delta = (currPDADC - ah->initPDADC + 5) / 10;
799
800 if (delta != ah->PDADCdelta) {
801 ah->PDADCdelta = delta;
802 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
803 regval = ah->originalGain[i] - delta;
804 if (regval < 0)
805 regval = 0;
806
807 REG_RMW_FIELD(ah,
808 AR_PHY_TX_GAIN_TBL1 + i * 4,
809 AR_PHY_TX_GAIN, regval);
810 }
811 }
812 }
813 }
814}
815
816static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
817{
818 u32 regVal;
819 unsigned int i;
820 u32 regList [][2] = {
821 { 0x786c, 0 },
822 { 0x7854, 0 },
823 { 0x7820, 0 },
824 { 0x7824, 0 },
825 { 0x7868, 0 },
826 { 0x783c, 0 },
827 { 0x7838, 0 } ,
828 { 0x7828, 0 } ,
829 };
830
831 for (i = 0; i < ARRAY_SIZE(regList); i++)
832 regList[i][1] = REG_READ(ah, regList[i][0]);
833
834 regVal = REG_READ(ah, 0x7834);
835 regVal &= (~(0x1));
836 REG_WRITE(ah, 0x7834, regVal);
837 regVal = REG_READ(ah, 0x9808);
838 regVal |= (0x1 << 27);
839 REG_WRITE(ah, 0x9808, regVal);
840
841 /* 786c,b23,1, pwddac=1 */
842 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
843 /* 7854, b5,1, pdrxtxbb=1 */
844 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
845 /* 7854, b7,1, pdv2i=1 */
846 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
847 /* 7854, b8,1, pddacinterface=1 */
848 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
849 /* 7824,b12,0, offcal=0 */
850 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
851 /* 7838, b1,0, pwddb=0 */
852 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
853 /* 7820,b11,0, enpacal=0 */
854 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
855 /* 7820,b25,1, pdpadrv1=0 */
856 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
857 /* 7820,b24,0, pdpadrv2=0 */
858 REG_RMW_FIELD(ah, AR9285_AN_RF2G1,AR9285_AN_RF2G1_PDPADRV2,0);
859 /* 7820,b23,0, pdpaout=0 */
860 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
861 /* 783c,b14-16,7, padrvgn2tab_0=7 */
862 REG_RMW_FIELD(ah, AR9285_AN_RF2G8,AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
863 /*
864 * 7838,b29-31,0, padrvgn1tab_0=0
865 * does not matter since we turn it off
866 */
867 REG_RMW_FIELD(ah, AR9285_AN_RF2G7,AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
868
869 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_CCOMP, 0xfff);
870
871 /* Set:
872 * localmode=1,bmode=1,bmoderxtx=1,synthon=1,
873 * txon=1,paon=1,oscon=1,synthon_force=1
874 */
875 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
876 udelay(30);
877 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
878
879 /* find off_6_1; */
880 for (i = 6; i > 0; i--) {
881 regVal = REG_READ(ah, 0x7834);
882 regVal |= (1 << (20 + i));
883 REG_WRITE(ah, 0x7834, regVal);
884 udelay(1);
885 //regVal = REG_READ(ah, 0x7834);
886 regVal &= (~(0x1 << (20 + i)));
887 regVal |= (MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9)
888 << (20 + i));
889 REG_WRITE(ah, 0x7834, regVal);
890 }
891
892 regVal = (regVal >>20) & 0x7f;
893
894 /* Update PA cal info */
895 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
896 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
897 ah->pacal_info.max_skipcount =
898 2 * ah->pacal_info.max_skipcount;
899 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
900 } else {
901 ah->pacal_info.max_skipcount = 1;
902 ah->pacal_info.skipcount = 0;
903 ah->pacal_info.prev_offset = regVal;
904 }
905
906 regVal = REG_READ(ah, 0x7834);
907 regVal |= 0x1;
908 REG_WRITE(ah, 0x7834, regVal);
909 regVal = REG_READ(ah, 0x9808);
910 regVal &= (~(0x1 << 27));
911 REG_WRITE(ah, 0x9808, regVal);
912
913 for (i = 0; i < ARRAY_SIZE(regList); i++)
914 REG_WRITE(ah, regList[i][0], regList[i][1]);
915}
916
917static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
918{
919 struct ath_common *common = ath9k_hw_common(ah);
920 u32 regVal;
921 int i, offset, offs_6_1, offs_0;
922 u32 ccomp_org, reg_field;
923 u32 regList[][2] = {
924 { 0x786c, 0 },
925 { 0x7854, 0 },
926 { 0x7820, 0 },
927 { 0x7824, 0 },
928 { 0x7868, 0 },
929 { 0x783c, 0 },
930 { 0x7838, 0 },
931 };
932
933 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
934
935 /* PA CAL is not needed for high power solution */
936 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
937 AR5416_EEP_TXGAIN_HIGH_POWER)
938 return;
939
940 if (AR_SREV_9285_11(ah)) {
941 REG_WRITE(ah, AR9285_AN_TOP4, (AR9285_AN_TOP4_DEFAULT | 0x14));
942 udelay(10);
943 }
944
945 for (i = 0; i < ARRAY_SIZE(regList); i++)
946 regList[i][1] = REG_READ(ah, regList[i][0]);
947
948 regVal = REG_READ(ah, 0x7834);
949 regVal &= (~(0x1));
950 REG_WRITE(ah, 0x7834, regVal);
951 regVal = REG_READ(ah, 0x9808);
952 regVal |= (0x1 << 27);
953 REG_WRITE(ah, 0x9808, regVal);
954
955 REG_RMW_FIELD(ah, AR9285_AN_TOP3, AR9285_AN_TOP3_PWDDAC, 1);
956 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDRXTXBB1, 1);
957 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDV2I, 1);
958 REG_RMW_FIELD(ah, AR9285_AN_RXTXBB1, AR9285_AN_RXTXBB1_PDDACIF, 1);
959 REG_RMW_FIELD(ah, AR9285_AN_RF2G2, AR9285_AN_RF2G2_OFFCAL, 0);
960 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PWDDB, 0);
961 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_ENPACAL, 0);
962 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV1, 0);
963 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPADRV2, 0);
964 REG_RMW_FIELD(ah, AR9285_AN_RF2G1, AR9285_AN_RF2G1_PDPAOUT, 0);
965 REG_RMW_FIELD(ah, AR9285_AN_RF2G8, AR9285_AN_RF2G8_PADRVGN2TAB0, 7);
966 REG_RMW_FIELD(ah, AR9285_AN_RF2G7, AR9285_AN_RF2G7_PADRVGN2TAB0, 0);
967 ccomp_org = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_CCOMP);
968 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, 0xf);
969
970 REG_WRITE(ah, AR9285_AN_TOP2, 0xca0358a0);
971 udelay(30);
972 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, 0);
973 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 0);
974
975 for (i = 6; i > 0; i--) {
976 regVal = REG_READ(ah, 0x7834);
977 regVal |= (1 << (19 + i));
978 REG_WRITE(ah, 0x7834, regVal);
979 udelay(1);
980 regVal = REG_READ(ah, 0x7834);
981 regVal &= (~(0x1 << (19 + i)));
982 reg_field = MS(REG_READ(ah, 0x7840), AR9285_AN_RXTXBB1_SPARE9);
983 regVal |= (reg_field << (19 + i));
984 REG_WRITE(ah, 0x7834, regVal);
985 }
986
987 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, 1);
988 udelay(1);
989 reg_field = MS(REG_READ(ah, AR9285_AN_RF2G9), AR9285_AN_RXTXBB1_SPARE9);
990 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, reg_field);
991 offs_6_1 = MS(REG_READ(ah, AR9285_AN_RF2G6), AR9285_AN_RF2G6_OFFS);
992 offs_0 = MS(REG_READ(ah, AR9285_AN_RF2G3), AR9285_AN_RF2G3_PDVCCOMP);
993
994 offset = (offs_6_1<<1) | offs_0;
995 offset = offset - 0;
996 offs_6_1 = offset>>1;
997 offs_0 = offset & 1;
998
999 if ((!is_reset) && (ah->pacal_info.prev_offset == offset)) {
1000 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
1001 ah->pacal_info.max_skipcount =
1002 2 * ah->pacal_info.max_skipcount;
1003 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
1004 } else {
1005 ah->pacal_info.max_skipcount = 1;
1006 ah->pacal_info.skipcount = 0;
1007 ah->pacal_info.prev_offset = offset;
1008 }
1009
1010 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_OFFS, offs_6_1);
1011 REG_RMW_FIELD(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_PDVCCOMP, offs_0);
1012
1013 regVal = REG_READ(ah, 0x7834);
1014 regVal |= 0x1;
1015 REG_WRITE(ah, 0x7834, regVal);
1016 regVal = REG_READ(ah, 0x9808);
1017 regVal &= (~(0x1 << 27));
1018 REG_WRITE(ah, 0x9808, regVal);
1019
1020 for (i = 0; i < ARRAY_SIZE(regList); i++)
1021 REG_WRITE(ah, regList[i][0], regList[i][1]);
1022
1023 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9285_AN_RF2G6_CCOMP, ccomp_org);
1024
1025 if (AR_SREV_9285_11(ah))
1026 REG_WRITE(ah, AR9285_AN_TOP4, AR9285_AN_TOP4_DEFAULT);
1027
1028}
1029
1030bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1031 u8 rxchainmask, bool longcal)
1032{
1033 bool iscaldone = true;
1034 struct ath9k_cal_list *currCal = ah->cal_list_curr;
1035
1036 if (currCal &&
1037 (currCal->calState == CAL_RUNNING ||
1038 currCal->calState == CAL_WAITING)) {
1039 iscaldone = ath9k_hw_per_calibration(ah, chan,
1040 rxchainmask, currCal);
1041 if (iscaldone) {
1042 ah->cal_list_curr = currCal = currCal->calNext;
1043
1044 if (currCal->calState == CAL_WAITING) {
1045 iscaldone = false;
1046 ath9k_hw_reset_calibration(ah, currCal);
1047 }
1048 }
1049 }
1050
1051 /* Do NF cal only at longer intervals */
1052 if (longcal) {
1053 /* Do periodic PAOffset Cal */
1054 if (AR_SREV_9271(ah))
1055 ath9k_hw_9271_pa_cal(ah, false);
1056 else if (AR_SREV_9285_11_OR_LATER(ah)) {
1057 if (!ah->pacal_info.skipcount)
1058 ath9k_hw_9285_pa_cal(ah, false);
1059 else
1060 ah->pacal_info.skipcount--;
1061 }
1062
1063 if (OLC_FOR_AR9280_20_LATER || OLC_FOR_AR9287_10_LATER)
1064 ath9k_olc_temp_compensation(ah);
1065
1066 /* Get the value from the previous NF cal and update history buffer */
1067 ath9k_hw_getnf(ah, chan);
1068
1069 /*
1070 * Load the NF from history buffer of the current channel.
1071 * NF is slow time-variant, so it is OK to use a historical value.
1072 */
1073 ath9k_hw_loadnf(ah, ah->curchan);
1074
1075 ath9k_hw_start_nfcal(ah);
1076 }
1077
1078 return iscaldone;
1079}
1080EXPORT_SYMBOL(ath9k_hw_calibrate);
1081
1082/* Carrier leakage Calibration fix */
1083static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1084{
1085 struct ath_common *common = ath9k_hw_common(ah);
1086
1087 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1088 if (IS_CHAN_HT20(chan)) {
1089 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
1090 REG_SET_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
1091 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1092 AR_PHY_AGC_CONTROL_FLTR_CAL);
1093 REG_CLR_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
1094 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1095 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
1096 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
1097 ath_print(common, ATH_DBG_CALIBRATE, "offset "
1098 "calibration failed to complete in "
1099 "1ms; noisy ??\n");
1100 return false;
1101 }
1102 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
1103 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
1104 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1105 }
1106 REG_CLR_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
1107 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
1108 REG_SET_BIT(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_CAL_ENABLE);
1109 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1110 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1111 0, AH_WAIT_TIMEOUT)) {
1112 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
1113 "failed to complete in 1ms; noisy ??\n");
1114 return false;
1115 }
1116
1117 REG_SET_BIT(ah, AR_PHY_ADC_CTL, AR_PHY_ADC_CTL_OFF_PWDADC);
1118 REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1119 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_FLTR_CAL);
1120
1121 return true;
1122}
1123
1124bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1125{
1126 struct ath_common *common = ath9k_hw_common(ah);
1127
1128 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
1129 if (!ar9285_clc(ah, chan))
1130 return false;
1131 } else {
1132 if (AR_SREV_9280_10_OR_LATER(ah)) {
1133 if (!AR_SREV_9287_10_OR_LATER(ah))
1134 REG_CLR_BIT(ah, AR_PHY_ADC_CTL,
1135 AR_PHY_ADC_CTL_OFF_PWDADC);
1136 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
1137 AR_PHY_AGC_CONTROL_FLTR_CAL);
1138 }
1139
1140 /* Calibrate the AGC */
1141 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
1142 REG_READ(ah, AR_PHY_AGC_CONTROL) |
1143 AR_PHY_AGC_CONTROL_CAL);
1144
1145 /* Poll for offset calibration complete */
1146 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1147 0, AH_WAIT_TIMEOUT)) {
1148 ath_print(common, ATH_DBG_CALIBRATE,
1149 "offset calibration failed to "
1150 "complete in 1ms; noisy environment?\n");
1151 return false;
1152 }
1153
1154 if (AR_SREV_9280_10_OR_LATER(ah)) {
1155 if (!AR_SREV_9287_10_OR_LATER(ah))
1156 REG_SET_BIT(ah, AR_PHY_ADC_CTL,
1157 AR_PHY_ADC_CTL_OFF_PWDADC);
1158 REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
1159 AR_PHY_AGC_CONTROL_FLTR_CAL);
1160 }
1161 }
1162
1163 /* Do PA Calibration */
1164 if (AR_SREV_9271(ah))
1165 ath9k_hw_9271_pa_cal(ah, true);
1166 else if (AR_SREV_9285_11_OR_LATER(ah))
1167 ath9k_hw_9285_pa_cal(ah, true);
1168
1169 /* Do NF Calibration after DC offset and other calibrations */
1170 REG_WRITE(ah, AR_PHY_AGC_CONTROL,
1171 REG_READ(ah, AR_PHY_AGC_CONTROL) | AR_PHY_AGC_CONTROL_NF);
1172
1173 ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL;
1174
1175 /* Enable IQ, ADC Gain and ADC DC offset CALs */
1176 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah)) {
1177 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
1178 INIT_CAL(&ah->adcgain_caldata);
1179 INSERT_CAL(ah, &ah->adcgain_caldata);
1180 ath_print(common, ATH_DBG_CALIBRATE,
1181 "enabling ADC Gain Calibration.\n");
1182 }
1183 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
1184 INIT_CAL(&ah->adcdc_caldata);
1185 INSERT_CAL(ah, &ah->adcdc_caldata);
1186 ath_print(common, ATH_DBG_CALIBRATE,
1187 "enabling ADC DC Calibration.\n");
1188 }
1189 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
1190 INIT_CAL(&ah->iq_caldata);
1191 INSERT_CAL(ah, &ah->iq_caldata);
1192 ath_print(common, ATH_DBG_CALIBRATE,
1193 "enabling IQ Calibration.\n");
1194 }
1195
1196 ah->cal_list_curr = ah->cal_list;
1197
1198 if (ah->cal_list_curr)
1199 ath9k_hw_reset_calibration(ah, ah->cal_list_curr);
1200 }
1201
1202 chan->CalValid = 0;
1203
1204 return true;
1205}
1206
1207const struct ath9k_percal_data iq_cal_multi_sample = {
1208 IQ_MISMATCH_CAL,
1209 MAX_CAL_SAMPLES,
1210 PER_MIN_LOG_COUNT,
1211 ath9k_hw_iqcal_collect,
1212 ath9k_hw_iqcalibrate
1213};
1214const struct ath9k_percal_data iq_cal_single_sample = {
1215 IQ_MISMATCH_CAL,
1216 MIN_CAL_SAMPLES,
1217 PER_MAX_LOG_COUNT,
1218 ath9k_hw_iqcal_collect,
1219 ath9k_hw_iqcalibrate
1220};
1221const struct ath9k_percal_data adc_gain_cal_multi_sample = {
1222 ADC_GAIN_CAL,
1223 MAX_CAL_SAMPLES,
1224 PER_MIN_LOG_COUNT,
1225 ath9k_hw_adc_gaincal_collect,
1226 ath9k_hw_adc_gaincal_calibrate
1227};
1228const struct ath9k_percal_data adc_gain_cal_single_sample = {
1229 ADC_GAIN_CAL,
1230 MIN_CAL_SAMPLES,
1231 PER_MAX_LOG_COUNT,
1232 ath9k_hw_adc_gaincal_collect,
1233 ath9k_hw_adc_gaincal_calibrate
1234};
1235const struct ath9k_percal_data adc_dc_cal_multi_sample = {
1236 ADC_DC_CAL,
1237 MAX_CAL_SAMPLES,
1238 PER_MIN_LOG_COUNT,
1239 ath9k_hw_adc_dccal_collect,
1240 ath9k_hw_adc_dccal_calibrate
1241};
1242const struct ath9k_percal_data adc_dc_cal_single_sample = {
1243 ADC_DC_CAL,
1244 MIN_CAL_SAMPLES,
1245 PER_MAX_LOG_COUNT,
1246 ath9k_hw_adc_dccal_collect,
1247 ath9k_hw_adc_dccal_calibrate
1248};
1249const struct ath9k_percal_data adc_init_dc_cal = {
1250 ADC_DC_INIT_CAL,
1251 MIN_CAL_SAMPLES,
1252 INIT_LOG_COUNT,
1253 ath9k_hw_adc_dccal_collect,
1254 ath9k_hw_adc_dccal_calibrate
1255};
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index b2c873e97485..24538bdb9126 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,14 +19,6 @@
19 19
20#include "hw.h" 20#include "hw.h"
21 21
22extern const struct ath9k_percal_data iq_cal_multi_sample;
23extern const struct ath9k_percal_data iq_cal_single_sample;
24extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
25extern const struct ath9k_percal_data adc_gain_cal_single_sample;
26extern const struct ath9k_percal_data adc_dc_cal_multi_sample;
27extern const struct ath9k_percal_data adc_dc_cal_single_sample;
28extern const struct ath9k_percal_data adc_init_dc_cal;
29
30#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85 22#define AR_PHY_CCA_MAX_AR5416_GOOD_VALUE -85
31#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112 23#define AR_PHY_CCA_MAX_AR9280_GOOD_VALUE -112
32#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118 24#define AR_PHY_CCA_MAX_AR9285_GOOD_VALUE -118
@@ -76,7 +68,8 @@ enum ath9k_cal_types {
76 ADC_DC_INIT_CAL = 0x1, 68 ADC_DC_INIT_CAL = 0x1,
77 ADC_GAIN_CAL = 0x2, 69 ADC_GAIN_CAL = 0x2,
78 ADC_DC_CAL = 0x4, 70 ADC_DC_CAL = 0x4,
79 IQ_MISMATCH_CAL = 0x8 71 IQ_MISMATCH_CAL = 0x8,
72 TEMP_COMP_CAL = 0x10,
80}; 73};
81 74
82enum ath9k_cal_state { 75enum ath9k_cal_state {
@@ -122,14 +115,12 @@ struct ath9k_pacal_info{
122 115
123bool ath9k_hw_reset_calvalid(struct ath_hw *ah); 116bool ath9k_hw_reset_calvalid(struct ath_hw *ah);
124void ath9k_hw_start_nfcal(struct ath_hw *ah); 117void ath9k_hw_start_nfcal(struct ath_hw *ah);
125void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan);
126int16_t ath9k_hw_getnf(struct ath_hw *ah, 118int16_t ath9k_hw_getnf(struct ath_hw *ah,
127 struct ath9k_channel *chan); 119 struct ath9k_channel *chan);
128void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah); 120void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah);
129s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan); 121s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan);
130bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, 122void ath9k_hw_reset_calibration(struct ath_hw *ah,
131 u8 rxchainmask, bool longcal); 123 struct ath9k_cal_list *currCal);
132bool ath9k_hw_init_cal(struct ath_hw *ah, 124
133 struct ath9k_channel *chan);
134 125
135#endif /* CALIB_H */ 126#endif /* CALIB_H */
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae141db..7707341cd0d3 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -57,13 +57,19 @@ static bool ath9k_rx_accept(struct ath_common *common,
57 * rs_more indicates chained descriptors which can be used 57 * rs_more indicates chained descriptors which can be used
58 * to link buffers together for a sort of scatter-gather 58 * to link buffers together for a sort of scatter-gather
59 * operation. 59 * operation.
60 * 60 * reject the frame, we don't support scatter-gather yet and
61 * the frame is probably corrupt anyway
62 */
63 if (rx_stats->rs_more)
64 return false;
65
66 /*
61 * The rx_stats->rs_status will not be set until the end of the 67 * The rx_stats->rs_status will not be set until the end of the
62 * chained descriptors so it can be ignored if rs_more is set. The 68 * chained descriptors so it can be ignored if rs_more is set. The
63 * rs_more will be false at the last element of the chained 69 * rs_more will be false at the last element of the chained
64 * descriptors. 70 * descriptors.
65 */ 71 */
66 if (!rx_stats->rs_more && rx_stats->rs_status != 0) { 72 if (rx_stats->rs_status != 0) {
67 if (rx_stats->rs_status & ATH9K_RXERR_CRC) 73 if (rx_stats->rs_status & ATH9K_RXERR_CRC)
68 rxs->flag |= RX_FLAG_FAILED_FCS_CRC; 74 rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
69 if (rx_stats->rs_status & ATH9K_RXERR_PHY) 75 if (rx_stats->rs_status & ATH9K_RXERR_PHY)
@@ -102,11 +108,11 @@ static bool ath9k_rx_accept(struct ath_common *common,
102 return true; 108 return true;
103} 109}
104 110
105static u8 ath9k_process_rate(struct ath_common *common, 111static int ath9k_process_rate(struct ath_common *common,
106 struct ieee80211_hw *hw, 112 struct ieee80211_hw *hw,
107 struct ath_rx_status *rx_stats, 113 struct ath_rx_status *rx_stats,
108 struct ieee80211_rx_status *rxs, 114 struct ieee80211_rx_status *rxs,
109 struct sk_buff *skb) 115 struct sk_buff *skb)
110{ 116{
111 struct ieee80211_supported_band *sband; 117 struct ieee80211_supported_band *sband;
112 enum ieee80211_band band; 118 enum ieee80211_band band;
@@ -122,25 +128,32 @@ static u8 ath9k_process_rate(struct ath_common *common,
122 rxs->flag |= RX_FLAG_40MHZ; 128 rxs->flag |= RX_FLAG_40MHZ;
123 if (rx_stats->rs_flags & ATH9K_RX_GI) 129 if (rx_stats->rs_flags & ATH9K_RX_GI)
124 rxs->flag |= RX_FLAG_SHORT_GI; 130 rxs->flag |= RX_FLAG_SHORT_GI;
125 return rx_stats->rs_rate & 0x7f; 131 rxs->rate_idx = rx_stats->rs_rate & 0x7f;
132 return 0;
126 } 133 }
127 134
128 for (i = 0; i < sband->n_bitrates; i++) { 135 for (i = 0; i < sband->n_bitrates; i++) {
129 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) 136 if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
130 return i; 137 rxs->rate_idx = i;
138 return 0;
139 }
131 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) { 140 if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
132 rxs->flag |= RX_FLAG_SHORTPRE; 141 rxs->flag |= RX_FLAG_SHORTPRE;
133 return i; 142 rxs->rate_idx = i;
143 return 0;
134 } 144 }
135 } 145 }
136 146
137 /* No valid hardware bitrate found -- we should not get here */ 147 /*
148 * No valid hardware bitrate found -- we should not get here
149 * because hardware has already validated this frame as OK.
150 */
138 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 151 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected "
139 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 152 "0x%02x using 1 Mbit\n", rx_stats->rs_rate);
140 if ((common->debug_mask & ATH_DBG_XMIT)) 153 if ((common->debug_mask & ATH_DBG_XMIT))
141 print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len); 154 print_hex_dump_bytes("", DUMP_PREFIX_NONE, skb->data, skb->len);
142 155
143 return 0; 156 return -EINVAL;
144} 157}
145 158
146static void ath9k_process_rssi(struct ath_common *common, 159static void ath9k_process_rssi(struct ath_common *common,
@@ -202,17 +215,22 @@ int ath9k_cmn_rx_skb_preprocess(struct ath_common *common,
202 struct ath_hw *ah = common->ah; 215 struct ath_hw *ah = common->ah;
203 216
204 memset(rx_status, 0, sizeof(struct ieee80211_rx_status)); 217 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
218
219 /*
220 * everything but the rate is checked here, the rate check is done
221 * separately to avoid doing two lookups for a rate for each frame.
222 */
205 if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error)) 223 if (!ath9k_rx_accept(common, skb, rx_status, rx_stats, decrypt_error))
206 return -EINVAL; 224 return -EINVAL;
207 225
208 ath9k_process_rssi(common, hw, skb, rx_stats); 226 ath9k_process_rssi(common, hw, skb, rx_stats);
209 227
210 rx_status->rate_idx = ath9k_process_rate(common, hw, 228 if (ath9k_process_rate(common, hw, rx_stats, rx_status, skb))
211 rx_stats, rx_status, skb); 229 return -EINVAL;
230
212 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp); 231 rx_status->mactime = ath9k_hw_extend_tsf(ah, rx_stats->rs_tstamp);
213 rx_status->band = hw->conf.channel->band; 232 rx_status->band = hw->conf.channel->band;
214 rx_status->freq = hw->conf.channel->center_freq; 233 rx_status->freq = hw->conf.channel->center_freq;
215 rx_status->noise = common->ani.noise_floor;
216 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi; 234 rx_status->signal = ATH_DEFAULT_NOISE_FLOOR + rx_stats->rs_rssi;
217 rx_status->antenna = rx_stats->rs_antenna; 235 rx_status->antenna = rx_stats->rs_antenna;
218 rx_status->flag |= RX_FLAG_TSFT; 236 rx_status->flag |= RX_FLAG_TSFT;
@@ -255,7 +273,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
255 273
256 keyix = rx_stats->rs_keyix; 274 keyix = rx_stats->rs_keyix;
257 275
258 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 276 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
277 ieee80211_has_protected(fc)) {
259 rxs->flag |= RX_FLAG_DECRYPTED; 278 rxs->flag |= RX_FLAG_DECRYPTED;
260 } else if (ieee80211_has_protected(fc) 279 } else if (ieee80211_has_protected(fc)
261 && !decrypt_error && skb->len >= hdrlen + 4) { 280 && !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +305,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
286} 305}
287EXPORT_SYMBOL(ath9k_cmn_padpos); 306EXPORT_SYMBOL(ath9k_cmn_padpos);
288 307
308int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
309{
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311
312 if (tx_info->control.hw_key) {
313 if (tx_info->control.hw_key->alg == ALG_WEP)
314 return ATH9K_KEY_TYPE_WEP;
315 else if (tx_info->control.hw_key->alg == ALG_TKIP)
316 return ATH9K_KEY_TYPE_TKIP;
317 else if (tx_info->control.hw_key->alg == ALG_CCMP)
318 return ATH9K_KEY_TYPE_AES;
319 }
320
321 return ATH9K_KEY_TYPE_CLEAR;
322}
323EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
324
325static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
326 enum nl80211_channel_type channel_type)
327{
328 u32 chanmode = 0;
329
330 switch (chan->band) {
331 case IEEE80211_BAND_2GHZ:
332 switch (channel_type) {
333 case NL80211_CHAN_NO_HT:
334 case NL80211_CHAN_HT20:
335 chanmode = CHANNEL_G_HT20;
336 break;
337 case NL80211_CHAN_HT40PLUS:
338 chanmode = CHANNEL_G_HT40PLUS;
339 break;
340 case NL80211_CHAN_HT40MINUS:
341 chanmode = CHANNEL_G_HT40MINUS;
342 break;
343 }
344 break;
345 case IEEE80211_BAND_5GHZ:
346 switch (channel_type) {
347 case NL80211_CHAN_NO_HT:
348 case NL80211_CHAN_HT20:
349 chanmode = CHANNEL_A_HT20;
350 break;
351 case NL80211_CHAN_HT40PLUS:
352 chanmode = CHANNEL_A_HT40PLUS;
353 break;
354 case NL80211_CHAN_HT40MINUS:
355 chanmode = CHANNEL_A_HT40MINUS;
356 break;
357 }
358 break;
359 default:
360 break;
361 }
362
363 return chanmode;
364}
365
366/*
367 * Update internal channel flags.
368 */
369void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
370 struct ath9k_channel *ichan)
371{
372 struct ieee80211_channel *chan = hw->conf.channel;
373 struct ieee80211_conf *conf = &hw->conf;
374
375 ichan->channel = chan->center_freq;
376 ichan->chan = chan;
377
378 if (chan->band == IEEE80211_BAND_2GHZ) {
379 ichan->chanmode = CHANNEL_G;
380 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
381 } else {
382 ichan->chanmode = CHANNEL_A;
383 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
384 }
385
386 if (conf_is_ht(conf))
387 ichan->chanmode = ath9k_get_extchanmode(chan,
388 conf->channel_type);
389}
390EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
391
392/*
393 * Get the internal channel reference.
394 */
395struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
396 struct ath_hw *ah)
397{
398 struct ieee80211_channel *curchan = hw->conf.channel;
399 struct ath9k_channel *channel;
400 u8 chan_idx;
401
402 chan_idx = curchan->hw_value;
403 channel = &ah->channels[chan_idx];
404 ath9k_cmn_update_ichannel(hw, channel);
405
406 return channel;
407}
408EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
409
410static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
411 struct ath9k_keyval *hk, const u8 *addr,
412 bool authenticator)
413{
414 struct ath_hw *ah = common->ah;
415 const u8 *key_rxmic;
416 const u8 *key_txmic;
417
418 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
419 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
420
421 if (addr == NULL) {
422 /*
423 * Group key installation - only two key cache entries are used
424 * regardless of splitmic capability since group key is only
425 * used either for TX or RX.
426 */
427 if (authenticator) {
428 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
429 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
430 } else {
431 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
432 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
433 }
434 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
435 }
436 if (!common->splitmic) {
437 /* TX and RX keys share the same key cache entry. */
438 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
439 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
440 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
441 }
442
443 /* Separate key cache entries for TX and RX */
444
445 /* TX key goes at first index, RX key at +32. */
446 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
447 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
448 /* TX MIC entry failed. No need to proceed further */
449 ath_print(common, ATH_DBG_FATAL,
450 "Setting TX MIC Key Failed\n");
451 return 0;
452 }
453
454 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
455 /* XXX delete tx key on failure? */
456 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
457}
458
459static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
460{
461 int i;
462
463 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
464 if (test_bit(i, common->keymap) ||
465 test_bit(i + 64, common->keymap))
466 continue; /* At least one part of TKIP key allocated */
467 if (common->splitmic &&
468 (test_bit(i + 32, common->keymap) ||
469 test_bit(i + 64 + 32, common->keymap)))
470 continue; /* At least one part of TKIP key allocated */
471
472 /* Found a free slot for a TKIP key */
473 return i;
474 }
475 return -1;
476}
477
478static int ath_reserve_key_cache_slot(struct ath_common *common)
479{
480 int i;
481
482 /* First, try to find slots that would not be available for TKIP. */
483 if (common->splitmic) {
484 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
485 if (!test_bit(i, common->keymap) &&
486 (test_bit(i + 32, common->keymap) ||
487 test_bit(i + 64, common->keymap) ||
488 test_bit(i + 64 + 32, common->keymap)))
489 return i;
490 if (!test_bit(i + 32, common->keymap) &&
491 (test_bit(i, common->keymap) ||
492 test_bit(i + 64, common->keymap) ||
493 test_bit(i + 64 + 32, common->keymap)))
494 return i + 32;
495 if (!test_bit(i + 64, common->keymap) &&
496 (test_bit(i , common->keymap) ||
497 test_bit(i + 32, common->keymap) ||
498 test_bit(i + 64 + 32, common->keymap)))
499 return i + 64;
500 if (!test_bit(i + 64 + 32, common->keymap) &&
501 (test_bit(i, common->keymap) ||
502 test_bit(i + 32, common->keymap) ||
503 test_bit(i + 64, common->keymap)))
504 return i + 64 + 32;
505 }
506 } else {
507 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
508 if (!test_bit(i, common->keymap) &&
509 test_bit(i + 64, common->keymap))
510 return i;
511 if (test_bit(i, common->keymap) &&
512 !test_bit(i + 64, common->keymap))
513 return i + 64;
514 }
515 }
516
517 /* No partially used TKIP slots, pick any available slot */
518 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
519 /* Do not allow slots that could be needed for TKIP group keys
520 * to be used. This limitation could be removed if we know that
521 * TKIP will not be used. */
522 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
523 continue;
524 if (common->splitmic) {
525 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
526 continue;
527 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
528 continue;
529 }
530
531 if (!test_bit(i, common->keymap))
532 return i; /* Found a free slot for a key */
533 }
534
535 /* No free slot found */
536 return -1;
537}
538
539/*
540 * Configure encryption in the HW.
541 */
542int ath9k_cmn_key_config(struct ath_common *common,
543 struct ieee80211_vif *vif,
544 struct ieee80211_sta *sta,
545 struct ieee80211_key_conf *key)
546{
547 struct ath_hw *ah = common->ah;
548 struct ath9k_keyval hk;
549 const u8 *mac = NULL;
550 int ret = 0;
551 int idx;
552
553 memset(&hk, 0, sizeof(hk));
554
555 switch (key->alg) {
556 case ALG_WEP:
557 hk.kv_type = ATH9K_CIPHER_WEP;
558 break;
559 case ALG_TKIP:
560 hk.kv_type = ATH9K_CIPHER_TKIP;
561 break;
562 case ALG_CCMP:
563 hk.kv_type = ATH9K_CIPHER_AES_CCM;
564 break;
565 default:
566 return -EOPNOTSUPP;
567 }
568
569 hk.kv_len = key->keylen;
570 memcpy(hk.kv_val, key->key, key->keylen);
571
572 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
573 /* For now, use the default keys for broadcast keys. This may
574 * need to change with virtual interfaces. */
575 idx = key->keyidx;
576 } else if (key->keyidx) {
577 if (WARN_ON(!sta))
578 return -EOPNOTSUPP;
579 mac = sta->addr;
580
581 if (vif->type != NL80211_IFTYPE_AP) {
582 /* Only keyidx 0 should be used with unicast key, but
583 * allow this for client mode for now. */
584 idx = key->keyidx;
585 } else
586 return -EIO;
587 } else {
588 if (WARN_ON(!sta))
589 return -EOPNOTSUPP;
590 mac = sta->addr;
591
592 if (key->alg == ALG_TKIP)
593 idx = ath_reserve_key_cache_slot_tkip(common);
594 else
595 idx = ath_reserve_key_cache_slot(common);
596 if (idx < 0)
597 return -ENOSPC; /* no free key cache entries */
598 }
599
600 if (key->alg == ALG_TKIP)
601 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
602 vif->type == NL80211_IFTYPE_AP);
603 else
604 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
605
606 if (!ret)
607 return -EIO;
608
609 set_bit(idx, common->keymap);
610 if (key->alg == ALG_TKIP) {
611 set_bit(idx + 64, common->keymap);
612 if (common->splitmic) {
613 set_bit(idx + 32, common->keymap);
614 set_bit(idx + 64 + 32, common->keymap);
615 }
616 }
617
618 return idx;
619}
620EXPORT_SYMBOL(ath9k_cmn_key_config);
621
622/*
623 * Delete Key.
624 */
625void ath9k_cmn_key_delete(struct ath_common *common,
626 struct ieee80211_key_conf *key)
627{
628 struct ath_hw *ah = common->ah;
629
630 ath9k_hw_keyreset(ah, key->hw_key_idx);
631 if (key->hw_key_idx < IEEE80211_WEP_NKID)
632 return;
633
634 clear_bit(key->hw_key_idx, common->keymap);
635 if (key->alg != ALG_TKIP)
636 return;
637
638 clear_bit(key->hw_key_idx + 64, common->keymap);
639 if (common->splitmic) {
640 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
641 clear_bit(key->hw_key_idx + 32, common->keymap);
642 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
643 }
644}
645EXPORT_SYMBOL(ath9k_cmn_key_delete);
646
289static int __init ath9k_cmn_init(void) 647static int __init ath9k_cmn_init(void)
290{ 648{
291 return 0; 649 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c2fe9c..e08f7e5a26e0 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -20,9 +20,12 @@
20#include "../debug.h" 20#include "../debug.h"
21 21
22#include "hw.h" 22#include "hw.h"
23#include "hw-ops.h"
23 24
24/* Common header for Atheros 802.11n base driver cores */ 25/* Common header for Atheros 802.11n base driver cores */
25 26
27#define IEEE80211_WEP_NKID 4
28
26#define WME_NUM_TID 16 29#define WME_NUM_TID 16
27#define WME_BA_BMP_SIZE 64 30#define WME_BA_BMP_SIZE 64
28#define WME_MAX_BA WME_BA_BMP_SIZE 31#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -74,11 +77,12 @@ struct ath_buf {
74 an aggregate) */ 77 an aggregate) */
75 struct ath_buf *bf_next; /* next subframe in the aggregate */ 78 struct ath_buf *bf_next; /* next subframe in the aggregate */
76 struct sk_buff *bf_mpdu; /* enclosing frame structure */ 79 struct sk_buff *bf_mpdu; /* enclosing frame structure */
77 struct ath_desc *bf_desc; /* virtual addr of desc */ 80 void *bf_desc; /* virtual addr of desc */
78 dma_addr_t bf_daddr; /* physical addr of desc */ 81 dma_addr_t bf_daddr; /* physical addr of desc */
79 dma_addr_t bf_buf_addr; /* physical addr of data buffer */ 82 dma_addr_t bf_buf_addr; /* physical addr of data buffer */
80 bool bf_stale; 83 bool bf_stale;
81 bool bf_isnullfunc; 84 bool bf_isnullfunc;
85 bool bf_tx_aborted;
82 u16 bf_flags; 86 u16 bf_flags;
83 struct ath_buf_state bf_state; 87 struct ath_buf_state bf_state;
84 dma_addr_t bf_dmacontext; 88 dma_addr_t bf_dmacontext;
@@ -125,3 +129,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
125 bool decrypt_error); 129 bool decrypt_error);
126 130
127int ath9k_cmn_padpos(__le16 frame_control); 131int ath9k_cmn_padpos(__le16 frame_control);
132int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
133void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
134 struct ath9k_channel *ichan);
135struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
136 struct ath_hw *ah);
137int ath9k_cmn_key_config(struct ath_common *common,
138 struct ieee80211_vif *vif,
139 struct ieee80211_sta *sta,
140 struct ieee80211_key_conf *key);
141void ath9k_cmn_key_delete(struct ath_common *common,
142 struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e0085ed4c..29898f8d1893 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -78,6 +78,90 @@ static const struct file_operations fops_debug = {
78 78
79#define DMA_BUF_LEN 1024 79#define DMA_BUF_LEN 1024
80 80
81static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
82 size_t count, loff_t *ppos)
83{
84 struct ath_softc *sc = file->private_data;
85 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
86 char buf[32];
87 unsigned int len;
88
89 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->tx_chainmask);
90 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
91}
92
93static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
94 size_t count, loff_t *ppos)
95{
96 struct ath_softc *sc = file->private_data;
97 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
98 unsigned long mask;
99 char buf[32];
100 ssize_t len;
101
102 len = min(count, sizeof(buf) - 1);
103 if (copy_from_user(buf, user_buf, len))
104 return -EINVAL;
105
106 buf[len] = '\0';
107 if (strict_strtoul(buf, 0, &mask))
108 return -EINVAL;
109
110 common->tx_chainmask = mask;
111 sc->sc_ah->caps.tx_chainmask = mask;
112 return count;
113}
114
115static const struct file_operations fops_tx_chainmask = {
116 .read = read_file_tx_chainmask,
117 .write = write_file_tx_chainmask,
118 .open = ath9k_debugfs_open,
119 .owner = THIS_MODULE
120};
121
122
123static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
124 size_t count, loff_t *ppos)
125{
126 struct ath_softc *sc = file->private_data;
127 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
128 char buf[32];
129 unsigned int len;
130
131 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->rx_chainmask);
132 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
133}
134
135static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
136 size_t count, loff_t *ppos)
137{
138 struct ath_softc *sc = file->private_data;
139 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
140 unsigned long mask;
141 char buf[32];
142 ssize_t len;
143
144 len = min(count, sizeof(buf) - 1);
145 if (copy_from_user(buf, user_buf, len))
146 return -EINVAL;
147
148 buf[len] = '\0';
149 if (strict_strtoul(buf, 0, &mask))
150 return -EINVAL;
151
152 common->rx_chainmask = mask;
153 sc->sc_ah->caps.rx_chainmask = mask;
154 return count;
155}
156
157static const struct file_operations fops_rx_chainmask = {
158 .read = read_file_rx_chainmask,
159 .write = write_file_rx_chainmask,
160 .open = ath9k_debugfs_open,
161 .owner = THIS_MODULE
162};
163
164
81static ssize_t read_file_dma(struct file *file, char __user *user_buf, 165static ssize_t read_file_dma(struct file *file, char __user *user_buf,
82 size_t count, loff_t *ppos) 166 size_t count, loff_t *ppos)
83{ 167{
@@ -157,10 +241,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
157 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", 241 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
158 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 242 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
159 243
160 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n", 244 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
161 REG_READ_D(ah, AR_OBS_BUS_1)); 245 REG_READ_D(ah, AR_OBS_BUS_1));
162 len += snprintf(buf + len, DMA_BUF_LEN - len, 246 len += snprintf(buf + len, DMA_BUF_LEN - len,
163 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR)); 247 "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
164 248
165 ath9k_ps_restore(sc); 249 ath9k_ps_restore(sc);
166 250
@@ -180,8 +264,15 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status)
180{ 264{
181 if (status) 265 if (status)
182 sc->debug.stats.istats.total++; 266 sc->debug.stats.istats.total++;
183 if (status & ATH9K_INT_RX) 267 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
184 sc->debug.stats.istats.rxok++; 268 if (status & ATH9K_INT_RXLP)
269 sc->debug.stats.istats.rxlp++;
270 if (status & ATH9K_INT_RXHP)
271 sc->debug.stats.istats.rxhp++;
272 } else {
273 if (status & ATH9K_INT_RX)
274 sc->debug.stats.istats.rxok++;
275 }
185 if (status & ATH9K_INT_RXEOL) 276 if (status & ATH9K_INT_RXEOL)
186 sc->debug.stats.istats.rxeol++; 277 sc->debug.stats.istats.rxeol++;
187 if (status & ATH9K_INT_RXORN) 278 if (status & ATH9K_INT_RXORN)
@@ -223,8 +314,15 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
223 char buf[512]; 314 char buf[512];
224 unsigned int len = 0; 315 unsigned int len = 0;
225 316
226 len += snprintf(buf + len, sizeof(buf) - len, 317 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
227 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok); 318 len += snprintf(buf + len, sizeof(buf) - len,
319 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
320 len += snprintf(buf + len, sizeof(buf) - len,
321 "%8s: %10u\n", "RXHP", sc->debug.stats.istats.rxhp);
322 } else {
323 len += snprintf(buf + len, sizeof(buf) - len,
324 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
325 }
228 len += snprintf(buf + len, sizeof(buf) - len, 326 len += snprintf(buf + len, sizeof(buf) - len,
229 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol); 327 "%8s: %10u\n", "RXEOL", sc->debug.stats.istats.rxeol);
230 len += snprintf(buf + len, sizeof(buf) - len, 328 len += snprintf(buf + len, sizeof(buf) - len,
@@ -557,10 +655,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
557} 655}
558 656
559void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 657void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
560 struct ath_buf *bf) 658 struct ath_buf *bf, struct ath_tx_status *ts)
561{ 659{
562 struct ath_desc *ds = bf->bf_desc;
563
564 if (bf_isampdu(bf)) { 660 if (bf_isampdu(bf)) {
565 if (bf_isxretried(bf)) 661 if (bf_isxretried(bf))
566 TX_STAT_INC(txq->axq_qnum, a_xretries); 662 TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +666,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
570 TX_STAT_INC(txq->axq_qnum, completed); 666 TX_STAT_INC(txq->axq_qnum, completed);
571 } 667 }
572 668
573 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO) 669 if (ts->ts_status & ATH9K_TXERR_FIFO)
574 TX_STAT_INC(txq->axq_qnum, fifo_underrun); 670 TX_STAT_INC(txq->axq_qnum, fifo_underrun);
575 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP) 671 if (ts->ts_status & ATH9K_TXERR_XTXOP)
576 TX_STAT_INC(txq->axq_qnum, xtxop); 672 TX_STAT_INC(txq->axq_qnum, xtxop);
577 if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED) 673 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
578 TX_STAT_INC(txq->axq_qnum, timer_exp); 674 TX_STAT_INC(txq->axq_qnum, timer_exp);
579 if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR) 675 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
580 TX_STAT_INC(txq->axq_qnum, desc_cfg_err); 676 TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
581 if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN) 677 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
582 TX_STAT_INC(txq->axq_qnum, data_underrun); 678 TX_STAT_INC(txq->axq_qnum, data_underrun);
583 if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN) 679 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
584 TX_STAT_INC(txq->axq_qnum, delim_underrun); 680 TX_STAT_INC(txq->axq_qnum, delim_underrun);
585} 681}
586 682
@@ -663,30 +759,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
663#undef PHY_ERR 759#undef PHY_ERR
664} 760}
665 761
666void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf) 762void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
667{ 763{
668#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ 764#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
669#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 765#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
670 766
671 struct ath_desc *ds = bf->bf_desc;
672 u32 phyerr; 767 u32 phyerr;
673 768
674 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 769 if (rs->rs_status & ATH9K_RXERR_CRC)
675 RX_STAT_INC(crc_err); 770 RX_STAT_INC(crc_err);
676 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) 771 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
677 RX_STAT_INC(decrypt_crc_err); 772 RX_STAT_INC(decrypt_crc_err);
678 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) 773 if (rs->rs_status & ATH9K_RXERR_MIC)
679 RX_STAT_INC(mic_err); 774 RX_STAT_INC(mic_err);
680 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE) 775 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
681 RX_STAT_INC(pre_delim_crc_err); 776 RX_STAT_INC(pre_delim_crc_err);
682 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST) 777 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
683 RX_STAT_INC(post_delim_crc_err); 778 RX_STAT_INC(post_delim_crc_err);
684 if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY) 779 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
685 RX_STAT_INC(decrypt_busy_err); 780 RX_STAT_INC(decrypt_busy_err);
686 781
687 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { 782 if (rs->rs_status & ATH9K_RXERR_PHY) {
688 RX_STAT_INC(phy_err); 783 RX_STAT_INC(phy_err);
689 phyerr = ds->ds_rxstat.rs_phyerr & 0x24; 784 phyerr = rs->rs_phyerr & 0x24;
690 RX_PHY_ERR_INC(phyerr); 785 RX_PHY_ERR_INC(phyerr);
691 } 786 }
692 787
@@ -700,6 +795,86 @@ static const struct file_operations fops_recv = {
700 .owner = THIS_MODULE 795 .owner = THIS_MODULE
701}; 796};
702 797
798static ssize_t read_file_regidx(struct file *file, char __user *user_buf,
799 size_t count, loff_t *ppos)
800{
801 struct ath_softc *sc = file->private_data;
802 char buf[32];
803 unsigned int len;
804
805 len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.regidx);
806 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
807}
808
809static ssize_t write_file_regidx(struct file *file, const char __user *user_buf,
810 size_t count, loff_t *ppos)
811{
812 struct ath_softc *sc = file->private_data;
813 unsigned long regidx;
814 char buf[32];
815 ssize_t len;
816
817 len = min(count, sizeof(buf) - 1);
818 if (copy_from_user(buf, user_buf, len))
819 return -EINVAL;
820
821 buf[len] = '\0';
822 if (strict_strtoul(buf, 0, &regidx))
823 return -EINVAL;
824
825 sc->debug.regidx = regidx;
826 return count;
827}
828
829static const struct file_operations fops_regidx = {
830 .read = read_file_regidx,
831 .write = write_file_regidx,
832 .open = ath9k_debugfs_open,
833 .owner = THIS_MODULE
834};
835
836static ssize_t read_file_regval(struct file *file, char __user *user_buf,
837 size_t count, loff_t *ppos)
838{
839 struct ath_softc *sc = file->private_data;
840 struct ath_hw *ah = sc->sc_ah;
841 char buf[32];
842 unsigned int len;
843 u32 regval;
844
845 regval = REG_READ_D(ah, sc->debug.regidx);
846 len = snprintf(buf, sizeof(buf), "0x%08x\n", regval);
847 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
848}
849
850static ssize_t write_file_regval(struct file *file, const char __user *user_buf,
851 size_t count, loff_t *ppos)
852{
853 struct ath_softc *sc = file->private_data;
854 struct ath_hw *ah = sc->sc_ah;
855 unsigned long regval;
856 char buf[32];
857 ssize_t len;
858
859 len = min(count, sizeof(buf) - 1);
860 if (copy_from_user(buf, user_buf, len))
861 return -EINVAL;
862
863 buf[len] = '\0';
864 if (strict_strtoul(buf, 0, &regval))
865 return -EINVAL;
866
867 REG_WRITE_D(ah, sc->debug.regidx, regval);
868 return count;
869}
870
871static const struct file_operations fops_regval = {
872 .read = read_file_regval,
873 .write = write_file_regval,
874 .open = ath9k_debugfs_open,
875 .owner = THIS_MODULE
876};
877
703int ath9k_init_debug(struct ath_hw *ah) 878int ath9k_init_debug(struct ath_hw *ah)
704{ 879{
705 struct ath_common *common = ath9k_hw_common(ah); 880 struct ath_common *common = ath9k_hw_common(ah);
@@ -711,54 +886,55 @@ int ath9k_init_debug(struct ath_hw *ah)
711 sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy), 886 sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
712 ath9k_debugfs_root); 887 ath9k_debugfs_root);
713 if (!sc->debug.debugfs_phy) 888 if (!sc->debug.debugfs_phy)
714 goto err; 889 return -ENOMEM;
715 890
716#ifdef CONFIG_ATH_DEBUG 891#ifdef CONFIG_ATH_DEBUG
717 sc->debug.debugfs_debug = debugfs_create_file("debug", 892 if (!debugfs_create_file("debug", S_IRUSR | S_IWUSR,
718 S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_debug); 893 sc->debug.debugfs_phy, sc, &fops_debug))
719 if (!sc->debug.debugfs_debug)
720 goto err; 894 goto err;
721#endif 895#endif
722 896
723 sc->debug.debugfs_dma = debugfs_create_file("dma", S_IRUSR, 897 if (!debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy,
724 sc->debug.debugfs_phy, sc, &fops_dma); 898 sc, &fops_dma))
725 if (!sc->debug.debugfs_dma) 899 goto err;
900
901 if (!debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy,
902 sc, &fops_interrupt))
903 goto err;
904
905 if (!debugfs_create_file("rcstat", S_IRUSR, sc->debug.debugfs_phy,
906 sc, &fops_rcstat))
907 goto err;
908
909 if (!debugfs_create_file("wiphy", S_IRUSR | S_IWUSR,
910 sc->debug.debugfs_phy, sc, &fops_wiphy))
911 goto err;
912
913 if (!debugfs_create_file("xmit", S_IRUSR, sc->debug.debugfs_phy,
914 sc, &fops_xmit))
726 goto err; 915 goto err;
727 916
728 sc->debug.debugfs_interrupt = debugfs_create_file("interrupt", 917 if (!debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy,
729 S_IRUSR, 918 sc, &fops_recv))
730 sc->debug.debugfs_phy,
731 sc, &fops_interrupt);
732 if (!sc->debug.debugfs_interrupt)
733 goto err; 919 goto err;
734 920
735 sc->debug.debugfs_rcstat = debugfs_create_file("rcstat", 921 if (!debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
736 S_IRUSR, 922 sc->debug.debugfs_phy, sc, &fops_rx_chainmask))
737 sc->debug.debugfs_phy,
738 sc, &fops_rcstat);
739 if (!sc->debug.debugfs_rcstat)
740 goto err; 923 goto err;
741 924
742 sc->debug.debugfs_wiphy = debugfs_create_file( 925 if (!debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
743 "wiphy", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, 926 sc->debug.debugfs_phy, sc, &fops_tx_chainmask))
744 &fops_wiphy);
745 if (!sc->debug.debugfs_wiphy)
746 goto err; 927 goto err;
747 928
748 sc->debug.debugfs_xmit = debugfs_create_file("xmit", 929 if (!debugfs_create_file("regidx", S_IRUSR | S_IWUSR,
749 S_IRUSR, 930 sc->debug.debugfs_phy, sc, &fops_regidx))
750 sc->debug.debugfs_phy,
751 sc, &fops_xmit);
752 if (!sc->debug.debugfs_xmit)
753 goto err; 931 goto err;
754 932
755 sc->debug.debugfs_recv = debugfs_create_file("recv", 933 if (!debugfs_create_file("regval", S_IRUSR | S_IWUSR,
756 S_IRUSR, 934 sc->debug.debugfs_phy, sc, &fops_regval))
757 sc->debug.debugfs_phy,
758 sc, &fops_recv);
759 if (!sc->debug.debugfs_recv)
760 goto err; 935 goto err;
761 936
937 sc->debug.regidx = 0;
762 return 0; 938 return 0;
763err: 939err:
764 ath9k_exit_debug(ah); 940 ath9k_exit_debug(ah);
@@ -770,14 +946,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
770 struct ath_common *common = ath9k_hw_common(ah); 946 struct ath_common *common = ath9k_hw_common(ah);
771 struct ath_softc *sc = (struct ath_softc *) common->priv; 947 struct ath_softc *sc = (struct ath_softc *) common->priv;
772 948
773 debugfs_remove(sc->debug.debugfs_recv); 949 debugfs_remove_recursive(sc->debug.debugfs_phy);
774 debugfs_remove(sc->debug.debugfs_xmit);
775 debugfs_remove(sc->debug.debugfs_wiphy);
776 debugfs_remove(sc->debug.debugfs_rcstat);
777 debugfs_remove(sc->debug.debugfs_interrupt);
778 debugfs_remove(sc->debug.debugfs_dma);
779 debugfs_remove(sc->debug.debugfs_debug);
780 debugfs_remove(sc->debug.debugfs_phy);
781} 950}
782 951
783int ath9k_debug_create_root(void) 952int ath9k_debug_create_root(void)
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e68b31e..5147b8709e10 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -35,6 +35,8 @@ struct ath_buf;
35 * struct ath_interrupt_stats - Contains statistics about interrupts 35 * struct ath_interrupt_stats - Contains statistics about interrupts
36 * @total: Total no. of interrupts generated so far 36 * @total: Total no. of interrupts generated so far
37 * @rxok: RX with no errors 37 * @rxok: RX with no errors
38 * @rxlp: RX with low priority RX
39 * @rxhp: RX with high priority, uapsd only
38 * @rxeol: RX with no more RXDESC available 40 * @rxeol: RX with no more RXDESC available
39 * @rxorn: RX FIFO overrun 41 * @rxorn: RX FIFO overrun
40 * @txok: TX completed at the requested rate 42 * @txok: TX completed at the requested rate
@@ -55,6 +57,8 @@ struct ath_buf;
55struct ath_interrupt_stats { 57struct ath_interrupt_stats {
56 u32 total; 58 u32 total;
57 u32 rxok; 59 u32 rxok;
60 u32 rxlp;
61 u32 rxhp;
58 u32 rxeol; 62 u32 rxeol;
59 u32 rxorn; 63 u32 rxorn;
60 u32 txok; 64 u32 txok;
@@ -149,13 +153,7 @@ struct ath_stats {
149 153
150struct ath9k_debug { 154struct ath9k_debug {
151 struct dentry *debugfs_phy; 155 struct dentry *debugfs_phy;
152 struct dentry *debugfs_debug; 156 u32 regidx;
153 struct dentry *debugfs_dma;
154 struct dentry *debugfs_interrupt;
155 struct dentry *debugfs_rcstat;
156 struct dentry *debugfs_wiphy;
157 struct dentry *debugfs_xmit;
158 struct dentry *debugfs_recv;
159 struct ath_stats stats; 157 struct ath_stats stats;
160}; 158};
161 159
@@ -167,8 +165,8 @@ void ath9k_debug_remove_root(void);
167void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 165void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); 166void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 167void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
170 struct ath_buf *bf); 168 struct ath_buf *bf, struct ath_tx_status *ts);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf); 169void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
172void ath_debug_stat_retries(struct ath_softc *sc, int rix, 170void ath_debug_stat_retries(struct ath_softc *sc, int rix,
173 int xretries, int retries, u8 per); 171 int xretries, int retries, u8 per);
174 172
@@ -204,12 +202,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
204 202
205static inline void ath_debug_stat_tx(struct ath_softc *sc, 203static inline void ath_debug_stat_tx(struct ath_softc *sc,
206 struct ath_txq *txq, 204 struct ath_txq *txq,
207 struct ath_buf *bf) 205 struct ath_buf *bf,
206 struct ath_tx_status *ts)
208{ 207{
209} 208}
210 209
211static inline void ath_debug_stat_rx(struct ath_softc *sc, 210static inline void ath_debug_stat_rx(struct ath_softc *sc,
212 struct ath_buf *bf) 211 struct ath_rx_status *rs)
213{ 212{
214} 213}
215 214
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index dacaae934148..ca8704a9d7ac 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -36,8 +36,6 @@ void ath9k_hw_analog_shift_rmw(struct ath_hw *ah, u32 reg, u32 mask,
36 36
37 if (ah->config.analog_shiftreg) 37 if (ah->config.analog_shiftreg)
38 udelay(100); 38 udelay(100);
39
40 return;
41} 39}
42 40
43int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight, 41int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
@@ -256,14 +254,13 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah)
256{ 254{
257 int status; 255 int status;
258 256
259 if (AR_SREV_9287(ah)) { 257 if (AR_SREV_9300_20_OR_LATER(ah))
260 ah->eep_map = EEP_MAP_AR9287; 258 ah->eep_ops = &eep_ar9300_ops;
261 ah->eep_ops = &eep_AR9287_ops; 259 else if (AR_SREV_9287(ah)) {
260 ah->eep_ops = &eep_ar9287_ops;
262 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) { 261 } else if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
263 ah->eep_map = EEP_MAP_4KBITS;
264 ah->eep_ops = &eep_4k_ops; 262 ah->eep_ops = &eep_4k_ops;
265 } else { 263 } else {
266 ah->eep_map = EEP_MAP_DEFAULT;
267 ah->eep_ops = &eep_def_ops; 264 ah->eep_ops = &eep_def_ops;
268 } 265 }
269 266
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 2f2993b50e2f..21354c15a9a9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -19,6 +19,7 @@
19 19
20#include "../ath.h" 20#include "../ath.h"
21#include <net/cfg80211.h> 21#include <net/cfg80211.h>
22#include "ar9003_eeprom.h"
22 23
23#define AH_USE_EEPROM 0x1 24#define AH_USE_EEPROM 0x1
24 25
@@ -93,7 +94,6 @@
93 */ 94 */
94#define AR9285_RDEXT_DEFAULT 0x1F 95#define AR9285_RDEXT_DEFAULT 0x1F
95 96
96#define AR_EEPROM_MAC(i) (0x1d+(i))
97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s)) 97#define ATH9K_POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5)) 98#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM)) 99#define ath9k_hw_use_flash(_ah) (!(_ah->ah_flags & AH_USE_EEPROM))
@@ -155,6 +155,7 @@
155#define AR5416_BCHAN_UNUSED 0xFF 155#define AR5416_BCHAN_UNUSED 0xFF
156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
157#define AR5416_MAX_CHAINS 3 157#define AR5416_MAX_CHAINS 3
158#define AR9300_MAX_CHAINS 3
158#define AR5416_PWR_TABLE_OFFSET_DB -5 159#define AR5416_PWR_TABLE_OFFSET_DB -5
159 160
160/* Rx gain type values */ 161/* Rx gain type values */
@@ -249,16 +250,20 @@ enum eeprom_param {
249 EEP_MINOR_REV, 250 EEP_MINOR_REV,
250 EEP_TX_MASK, 251 EEP_TX_MASK,
251 EEP_RX_MASK, 252 EEP_RX_MASK,
253 EEP_FSTCLK_5G,
252 EEP_RXGAIN_TYPE, 254 EEP_RXGAIN_TYPE,
253 EEP_TXGAIN_TYPE,
254 EEP_OL_PWRCTRL, 255 EEP_OL_PWRCTRL,
256 EEP_TXGAIN_TYPE,
255 EEP_RC_CHAIN_MASK, 257 EEP_RC_CHAIN_MASK,
256 EEP_DAC_HPWR_5G, 258 EEP_DAC_HPWR_5G,
257 EEP_FRAC_N_5G, 259 EEP_FRAC_N_5G,
258 EEP_DEV_TYPE, 260 EEP_DEV_TYPE,
259 EEP_TEMPSENSE_SLOPE, 261 EEP_TEMPSENSE_SLOPE,
260 EEP_TEMPSENSE_SLOPE_PAL_ON, 262 EEP_TEMPSENSE_SLOPE_PAL_ON,
261 EEP_PWR_TABLE_OFFSET 263 EEP_PWR_TABLE_OFFSET,
264 EEP_DRIVE_STRENGTH,
265 EEP_INTERNAL_REGULATOR,
266 EEP_SWREG
262}; 267};
263 268
264enum ar5416_rates { 269enum ar5416_rates {
@@ -295,7 +300,8 @@ struct base_eep_header {
295 u32 binBuildNumber; 300 u32 binBuildNumber;
296 u8 deviceType; 301 u8 deviceType;
297 u8 pwdclkind; 302 u8 pwdclkind;
298 u8 futureBase_1[2]; 303 u8 fastClk5g;
304 u8 divChain;
299 u8 rxGainType; 305 u8 rxGainType;
300 u8 dacHiPwrMode_5G; 306 u8 dacHiPwrMode_5G;
301 u8 openLoopPwrCntl; 307 u8 openLoopPwrCntl;
@@ -656,13 +662,6 @@ struct ath9k_country_entry {
656 u8 iso[3]; 662 u8 iso[3];
657}; 663};
658 664
659enum ath9k_eep_map {
660 EEP_MAP_DEFAULT = 0x0,
661 EEP_MAP_4KBITS,
662 EEP_MAP_AR9287,
663 EEP_MAP_MAX
664};
665
666struct eeprom_ops { 665struct eeprom_ops {
667 int (*check_eeprom)(struct ath_hw *hw); 666 int (*check_eeprom)(struct ath_hw *hw);
668 u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param); 667 u32 (*get_eeprom)(struct ath_hw *hw, enum eeprom_param param);
@@ -713,6 +712,8 @@ int ath9k_hw_eeprom_init(struct ath_hw *ah);
713 712
714extern const struct eeprom_ops eep_def_ops; 713extern const struct eeprom_ops eep_def_ops;
715extern const struct eeprom_ops eep_4k_ops; 714extern const struct eeprom_ops eep_4k_ops;
716extern const struct eeprom_ops eep_AR9287_ops; 715extern const struct eeprom_ops eep_ar9287_ops;
716extern const struct eeprom_ops eep_ar9287_ops;
717extern const struct eeprom_ops eep_ar9300_ops;
717 718
718#endif /* EEPROM_H */ 719#endif /* EEPROM_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db16690abf..41a77d1bd439 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) 20static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
20{ 21{
@@ -43,7 +44,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
43 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 44 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
44 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 45 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
45 ath_print(common, ATH_DBG_EEPROM, 46 ath_print(common, ATH_DBG_EEPROM,
46 "Unable to read eeprom region \n"); 47 "Unable to read eeprom region\n");
47 return false; 48 return false;
48 } 49 }
49 eep_data++; 50 eep_data++;
@@ -182,11 +183,11 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
182 switch (param) { 183 switch (param) {
183 case EEP_NFTHRESH_2: 184 case EEP_NFTHRESH_2:
184 return pModal->noiseFloorThreshCh[0]; 185 return pModal->noiseFloorThreshCh[0];
185 case AR_EEPROM_MAC(0): 186 case EEP_MAC_LSW:
186 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 187 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
187 case AR_EEPROM_MAC(1): 188 case EEP_MAC_MID:
188 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 189 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
189 case AR_EEPROM_MAC(2): 190 case EEP_MAC_MSW:
190 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 191 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
191 case EEP_REG_0: 192 case EEP_REG_0:
192 return pBase->regDmn[0]; 193 return pBase->regDmn[0];
@@ -453,6 +454,8 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
453 &tMinCalPower, gainBoundaries, 454 &tMinCalPower, gainBoundaries,
454 pdadcValues, numXpdGain); 455 pdadcValues, numXpdGain);
455 456
457 ENABLE_REGWRITE_BUFFER(ah);
458
456 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 459 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
457 REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, 460 REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset,
458 SM(pdGainOverlap_t2, 461 SM(pdGainOverlap_t2,
@@ -493,6 +496,9 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
493 496
494 regOffset += 4; 497 regOffset += 4;
495 } 498 }
499
500 REGWRITE_BUFFER_FLUSH(ah);
501 DISABLE_REGWRITE_BUFFER(ah);
496 } 502 }
497 } 503 }
498 504
@@ -758,6 +764,8 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
758 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; 764 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
759 } 765 }
760 766
767 ENABLE_REGWRITE_BUFFER(ah);
768
761 /* OFDM power per rate */ 769 /* OFDM power per rate */
762 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 770 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
763 ATH9K_POW_SM(ratesArray[rate18mb], 24) 771 ATH9K_POW_SM(ratesArray[rate18mb], 24)
@@ -820,6 +828,9 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
820 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 828 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
821 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 829 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
822 } 830 }
831
832 REGWRITE_BUFFER_FLUSH(ah);
833 DISABLE_REGWRITE_BUFFER(ah);
823} 834}
824 835
825static void ath9k_hw_4k_set_addac(struct ath_hw *ah, 836static void ath9k_hw_4k_set_addac(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a1df29..b471db5fb82d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) 20static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
20{ 21{
@@ -44,7 +45,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
44 if (!ath9k_hw_nvram_read(common, 45 if (!ath9k_hw_nvram_read(common,
45 addr + eep_start_loc, eep_data)) { 46 addr + eep_start_loc, eep_data)) {
46 ath_print(common, ATH_DBG_EEPROM, 47 ath_print(common, ATH_DBG_EEPROM,
47 "Unable to read eeprom region \n"); 48 "Unable to read eeprom region\n");
48 return false; 49 return false;
49 } 50 }
50 eep_data++; 51 eep_data++;
@@ -172,11 +173,11 @@ static u32 ath9k_hw_AR9287_get_eeprom(struct ath_hw *ah,
172 switch (param) { 173 switch (param) {
173 case EEP_NFTHRESH_2: 174 case EEP_NFTHRESH_2:
174 return pModal->noiseFloorThreshCh[0]; 175 return pModal->noiseFloorThreshCh[0];
175 case AR_EEPROM_MAC(0): 176 case EEP_MAC_LSW:
176 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 177 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
177 case AR_EEPROM_MAC(1): 178 case EEP_MAC_MID:
178 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 179 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
179 case AR_EEPROM_MAC(2): 180 case EEP_MAC_MSW:
180 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 181 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
181 case EEP_REG_0: 182 case EEP_REG_0:
182 return pBase->regDmn[0]; 183 return pBase->regDmn[0];
@@ -1169,7 +1170,7 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
1169#undef EEP_MAP9287_SPURCHAN 1170#undef EEP_MAP9287_SPURCHAN
1170} 1171}
1171 1172
1172const struct eeprom_ops eep_AR9287_ops = { 1173const struct eeprom_ops eep_ar9287_ops = {
1173 .check_eeprom = ath9k_hw_AR9287_check_eeprom, 1174 .check_eeprom = ath9k_hw_AR9287_check_eeprom,
1174 .get_eeprom = ath9k_hw_AR9287_get_eeprom, 1175 .get_eeprom = ath9k_hw_AR9287_get_eeprom,
1175 .fill_eeprom = ath9k_hw_AR9287_fill_eeprom, 1176 .fill_eeprom = ath9k_hw_AR9287_fill_eeprom,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 404a0341242c..7e1ed78d0e64 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include "hw.h" 17#include "hw.h"
18#include "ar9002_phy.h"
18 19
19static void ath9k_get_txgain_index(struct ath_hw *ah, 20static void ath9k_get_txgain_index(struct ath_hw *ah,
20 struct ath9k_channel *chan, 21 struct ath9k_channel *chan,
@@ -49,7 +50,6 @@ static void ath9k_get_txgain_index(struct ath_hw *ah,
49 i++; 50 i++;
50 51
51 *pcdacIdx = i; 52 *pcdacIdx = i;
52 return;
53} 53}
54 54
55static void ath9k_olc_get_pdadcs(struct ath_hw *ah, 55static void ath9k_olc_get_pdadcs(struct ath_hw *ah,
@@ -222,6 +222,12 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
222 return -EINVAL; 222 return -EINVAL;
223 } 223 }
224 224
225 /* Enable fixup for AR_AN_TOP2 if necessary */
226 if (AR_SREV_9280_10_OR_LATER(ah) &&
227 (eep->baseEepHeader.version & 0xff) > 0x0a &&
228 eep->baseEepHeader.pwdclkind == 0)
229 ah->need_an_top2_fixup = 1;
230
225 return 0; 231 return 0;
226} 232}
227 233
@@ -237,11 +243,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
237 return pModal[0].noiseFloorThreshCh[0]; 243 return pModal[0].noiseFloorThreshCh[0];
238 case EEP_NFTHRESH_2: 244 case EEP_NFTHRESH_2:
239 return pModal[1].noiseFloorThreshCh[0]; 245 return pModal[1].noiseFloorThreshCh[0];
240 case AR_EEPROM_MAC(0): 246 case EEP_MAC_LSW:
241 return pBase->macAddr[0] << 8 | pBase->macAddr[1]; 247 return pBase->macAddr[0] << 8 | pBase->macAddr[1];
242 case AR_EEPROM_MAC(1): 248 case EEP_MAC_MID:
243 return pBase->macAddr[2] << 8 | pBase->macAddr[3]; 249 return pBase->macAddr[2] << 8 | pBase->macAddr[3];
244 case AR_EEPROM_MAC(2): 250 case EEP_MAC_MSW:
245 return pBase->macAddr[4] << 8 | pBase->macAddr[5]; 251 return pBase->macAddr[4] << 8 | pBase->macAddr[5];
246 case EEP_REG_0: 252 case EEP_REG_0:
247 return pBase->regDmn[0]; 253 return pBase->regDmn[0];
@@ -267,6 +273,8 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
267 return pBase->txMask; 273 return pBase->txMask;
268 case EEP_RX_MASK: 274 case EEP_RX_MASK:
269 return pBase->rxMask; 275 return pBase->rxMask;
276 case EEP_FSTCLK_5G:
277 return pBase->fastClk5g;
270 case EEP_RXGAIN_TYPE: 278 case EEP_RXGAIN_TYPE:
271 return pBase->rxGainType; 279 return pBase->rxGainType;
272 case EEP_TXGAIN_TYPE: 280 case EEP_TXGAIN_TYPE:
@@ -742,8 +750,6 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
742 pPDADCValues[k] = pPDADCValues[k - 1]; 750 pPDADCValues[k] = pPDADCValues[k - 1];
743 k++; 751 k++;
744 } 752 }
745
746 return;
747} 753}
748 754
749static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah, 755static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8beb0680..0ee75e79fe35 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
283 u32 timer_next, 283 u32 timer_next,
284 u32 timer_period) 284 u32 timer_period)
285{ 285{
286 struct ath_common *common = ath9k_hw_common(ah);
287 struct ath_softc *sc = (struct ath_softc *) common->priv;
288
289 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); 286 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
290 287
291 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) { 288 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
292 ath9k_hw_set_interrupts(ah, 0); 289 ath9k_hw_set_interrupts(ah, 0);
293 sc->imask |= ATH9K_INT_GENTIMER; 290 ah->imask |= ATH9K_INT_GENTIMER;
294 ath9k_hw_set_interrupts(ah, sc->imask); 291 ath9k_hw_set_interrupts(ah, ah->imask);
295 } 292 }
296} 293}
297 294
298static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) 295static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
299{ 296{
300 struct ath_common *common = ath9k_hw_common(ah);
301 struct ath_softc *sc = (struct ath_softc *) common->priv;
302 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 297 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
303 298
304 ath9k_hw_gen_timer_stop(ah, timer); 299 ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
306 /* if no timer is enabled, turn off interrupt mask */ 301 /* if no timer is enabled, turn off interrupt mask */
307 if (timer_table->timer_mask.val == 0) { 302 if (timer_table->timer_mask.val == 0) {
308 ath9k_hw_set_interrupts(ah, 0); 303 ath9k_hw_set_interrupts(ah, 0);
309 sc->imask &= ~ATH9K_INT_GENTIMER; 304 ah->imask &= ~ATH9K_INT_GENTIMER;
310 ath9k_hw_set_interrupts(ah, sc->imask); 305 ath9k_hw_set_interrupts(ah, ah->imask);
311 } 306 }
312} 307}
313 308
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
364 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 359 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
365 360
366 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 361 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
367 "no stomp timer running \n"); 362 "no stomp timer running\n");
368 363
369 spin_lock_bh(&btcoex->btcoex_lock); 364 spin_lock_bh(&btcoex->btcoex_lock);
370 365
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 000000000000..46dc41a16faa
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,1008 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#define ATH9K_FW_USB_DEV(devid, fw) \
20 { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
21
22static struct usb_device_id ath9k_hif_usb_ids[] = {
23 ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
24 ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
25 { },
26};
27
28MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
29
30static int __hif_usb_tx(struct hif_device_usb *hif_dev);
31
32static void hif_usb_regout_cb(struct urb *urb)
33{
34 struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
35
36 switch (urb->status) {
37 case 0:
38 break;
39 case -ENOENT:
40 case -ECONNRESET:
41 case -ENODEV:
42 case -ESHUTDOWN:
43 goto free;
44 default:
45 break;
46 }
47
48 if (cmd) {
49 ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
50 cmd->skb, 1);
51 kfree(cmd);
52 }
53
54 return;
55free:
56 kfree_skb(cmd->skb);
57 kfree(cmd);
58}
59
60static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
61 struct sk_buff *skb)
62{
63 struct urb *urb;
64 struct cmd_buf *cmd;
65 int ret = 0;
66
67 urb = usb_alloc_urb(0, GFP_KERNEL);
68 if (urb == NULL)
69 return -ENOMEM;
70
71 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
72 if (cmd == NULL) {
73 usb_free_urb(urb);
74 return -ENOMEM;
75 }
76
77 cmd->skb = skb;
78 cmd->hif_dev = hif_dev;
79
80 usb_fill_int_urb(urb, hif_dev->udev,
81 usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
82 skb->data, skb->len,
83 hif_usb_regout_cb, cmd, 1);
84
85 usb_anchor_urb(urb, &hif_dev->regout_submitted);
86 ret = usb_submit_urb(urb, GFP_KERNEL);
87 if (ret) {
88 usb_unanchor_urb(urb);
89 kfree(cmd);
90 }
91 usb_free_urb(urb);
92
93 return ret;
94}
95
96static inline void ath9k_skb_queue_purge(struct hif_device_usb *hif_dev,
97 struct sk_buff_head *list)
98{
99 struct sk_buff *skb;
100
101 while ((skb = __skb_dequeue(list)) != NULL) {
102 dev_kfree_skb_any(skb);
103 TX_STAT_INC(skb_dropped);
104 }
105}
106
107static void hif_usb_tx_cb(struct urb *urb)
108{
109 struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
110 struct hif_device_usb *hif_dev = tx_buf->hif_dev;
111 struct sk_buff *skb;
112
113 if (!hif_dev || !tx_buf)
114 return;
115
116 switch (urb->status) {
117 case 0:
118 break;
119 case -ENOENT:
120 case -ECONNRESET:
121 case -ENODEV:
122 case -ESHUTDOWN:
123 /*
124 * The URB has been killed, free the SKBs
125 * and return.
126 */
127 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
128 return;
129 default:
130 break;
131 }
132
133 /* Check if TX has been stopped */
134 spin_lock(&hif_dev->tx.tx_lock);
135 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
136 spin_unlock(&hif_dev->tx.tx_lock);
137 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
138 goto add_free;
139 }
140 spin_unlock(&hif_dev->tx.tx_lock);
141
142 /* Complete the queued SKBs. */
143 while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
144 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
145 skb, 1);
146 TX_STAT_INC(skb_completed);
147 }
148
149add_free:
150 /* Re-initialize the SKB queue */
151 tx_buf->len = tx_buf->offset = 0;
152 __skb_queue_head_init(&tx_buf->skb_queue);
153
154 /* Add this TX buffer to the free list */
155 spin_lock(&hif_dev->tx.tx_lock);
156 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
157 hif_dev->tx.tx_buf_cnt++;
158 if (!(hif_dev->tx.flags & HIF_USB_TX_STOP))
159 __hif_usb_tx(hif_dev); /* Check for pending SKBs */
160 TX_STAT_INC(buf_completed);
161 spin_unlock(&hif_dev->tx.tx_lock);
162}
163
164/* TX lock has to be taken */
165static int __hif_usb_tx(struct hif_device_usb *hif_dev)
166{
167 struct tx_buf *tx_buf = NULL;
168 struct sk_buff *nskb = NULL;
169 int ret = 0, i;
170 u16 *hdr, tx_skb_cnt = 0;
171 u8 *buf;
172
173 if (hif_dev->tx.tx_skb_cnt == 0)
174 return 0;
175
176 /* Check if a free TX buffer is available */
177 if (list_empty(&hif_dev->tx.tx_buf))
178 return 0;
179
180 tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
181 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
182 hif_dev->tx.tx_buf_cnt--;
183
184 tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
185
186 for (i = 0; i < tx_skb_cnt; i++) {
187 nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
188
189 /* Should never be NULL */
190 BUG_ON(!nskb);
191
192 hif_dev->tx.tx_skb_cnt--;
193
194 buf = tx_buf->buf;
195 buf += tx_buf->offset;
196 hdr = (u16 *)buf;
197 *hdr++ = nskb->len;
198 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
199 buf += 4;
200 memcpy(buf, nskb->data, nskb->len);
201 tx_buf->len = nskb->len + 4;
202
203 if (i < (tx_skb_cnt - 1))
204 tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
205
206 if (i == (tx_skb_cnt - 1))
207 tx_buf->len += tx_buf->offset;
208
209 __skb_queue_tail(&tx_buf->skb_queue, nskb);
210 TX_STAT_INC(skb_queued);
211 }
212
213 usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
214 usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
215 tx_buf->buf, tx_buf->len,
216 hif_usb_tx_cb, tx_buf);
217
218 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
219 if (ret) {
220 tx_buf->len = tx_buf->offset = 0;
221 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
222 __skb_queue_head_init(&tx_buf->skb_queue);
223 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
224 hif_dev->tx.tx_buf_cnt++;
225 }
226
227 if (!ret)
228 TX_STAT_INC(buf_queued);
229
230 return ret;
231}
232
233static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
234 struct ath9k_htc_tx_ctl *tx_ctl)
235{
236 unsigned long flags;
237
238 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
239
240 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
241 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
242 return -ENODEV;
243 }
244
245 /* Check if the max queue count has been reached */
246 if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
247 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
248 return -ENOMEM;
249 }
250
251 __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
252 hif_dev->tx.tx_skb_cnt++;
253
254 /* Send normal frames immediately */
255 if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
256 __hif_usb_tx(hif_dev);
257
258 /* Check if AMPDUs have to be sent immediately */
259 if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
260 (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
261 (hif_dev->tx.tx_skb_cnt < 2)) {
262 __hif_usb_tx(hif_dev);
263 }
264
265 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
266
267 return 0;
268}
269
270static void hif_usb_start(void *hif_handle, u8 pipe_id)
271{
272 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
273 unsigned long flags;
274
275 hif_dev->flags |= HIF_USB_START;
276
277 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
278 hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
279 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
280}
281
282static void hif_usb_stop(void *hif_handle, u8 pipe_id)
283{
284 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
285 unsigned long flags;
286
287 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
288 ath9k_skb_queue_purge(hif_dev, &hif_dev->tx.tx_skb_queue);
289 hif_dev->tx.tx_skb_cnt = 0;
290 hif_dev->tx.flags |= HIF_USB_TX_STOP;
291 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
292}
293
294static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
295 struct ath9k_htc_tx_ctl *tx_ctl)
296{
297 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
298 int ret = 0;
299
300 switch (pipe_id) {
301 case USB_WLAN_TX_PIPE:
302 ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
303 break;
304 case USB_REG_OUT_PIPE:
305 ret = hif_usb_send_regout(hif_dev, skb);
306 break;
307 default:
308 dev_err(&hif_dev->udev->dev,
309 "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
310 ret = -EINVAL;
311 break;
312 }
313
314 return ret;
315}
316
317static struct ath9k_htc_hif hif_usb = {
318 .transport = ATH9K_HIF_USB,
319 .name = "ath9k_hif_usb",
320
321 .control_ul_pipe = USB_REG_OUT_PIPE,
322 .control_dl_pipe = USB_REG_IN_PIPE,
323
324 .start = hif_usb_start,
325 .stop = hif_usb_stop,
326 .send = hif_usb_send,
327};
328
329static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
330 struct sk_buff *skb)
331{
332 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
333 int index = 0, i = 0, chk_idx, len = skb->len;
334 int rx_remain_len = 0, rx_pkt_len = 0;
335 u16 pkt_len, pkt_tag, pool_index = 0;
336 u8 *ptr;
337
338 spin_lock(&hif_dev->rx_lock);
339
340 rx_remain_len = hif_dev->rx_remain_len;
341 rx_pkt_len = hif_dev->rx_transfer_len;
342
343 if (rx_remain_len != 0) {
344 struct sk_buff *remain_skb = hif_dev->remain_skb;
345
346 if (remain_skb) {
347 ptr = (u8 *) remain_skb->data;
348
349 index = rx_remain_len;
350 rx_remain_len -= hif_dev->rx_pad_len;
351 ptr += rx_pkt_len;
352
353 memcpy(ptr, skb->data, rx_remain_len);
354
355 rx_pkt_len += rx_remain_len;
356 hif_dev->rx_remain_len = 0;
357 skb_put(remain_skb, rx_pkt_len);
358
359 skb_pool[pool_index++] = remain_skb;
360
361 } else {
362 index = rx_remain_len;
363 }
364 }
365
366 spin_unlock(&hif_dev->rx_lock);
367
368 while (index < len) {
369 ptr = (u8 *) skb->data;
370
371 pkt_len = ptr[index] + (ptr[index+1] << 8);
372 pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
373
374 if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
375 u16 pad_len;
376
377 pad_len = 4 - (pkt_len & 0x3);
378 if (pad_len == 4)
379 pad_len = 0;
380
381 chk_idx = index;
382 index = index + 4 + pkt_len + pad_len;
383
384 if (index > MAX_RX_BUF_SIZE) {
385 spin_lock(&hif_dev->rx_lock);
386 hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
387 hif_dev->rx_transfer_len =
388 MAX_RX_BUF_SIZE - chk_idx - 4;
389 hif_dev->rx_pad_len = pad_len;
390
391 nskb = __dev_alloc_skb(pkt_len + 32,
392 GFP_ATOMIC);
393 if (!nskb) {
394 dev_err(&hif_dev->udev->dev,
395 "ath9k_htc: RX memory allocation"
396 " error\n");
397 spin_unlock(&hif_dev->rx_lock);
398 goto err;
399 }
400 skb_reserve(nskb, 32);
401 RX_STAT_INC(skb_allocated);
402
403 memcpy(nskb->data, &(skb->data[chk_idx+4]),
404 hif_dev->rx_transfer_len);
405
406 /* Record the buffer pointer */
407 hif_dev->remain_skb = nskb;
408 spin_unlock(&hif_dev->rx_lock);
409 } else {
410 nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
411 if (!nskb) {
412 dev_err(&hif_dev->udev->dev,
413 "ath9k_htc: RX memory allocation"
414 " error\n");
415 goto err;
416 }
417 skb_reserve(nskb, 32);
418 RX_STAT_INC(skb_allocated);
419
420 memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
421 skb_put(nskb, pkt_len);
422 skb_pool[pool_index++] = nskb;
423 }
424 } else {
425 RX_STAT_INC(skb_dropped);
426 return;
427 }
428 }
429
430err:
431 for (i = 0; i < pool_index; i++) {
432 ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
433 skb_pool[i]->len, USB_WLAN_RX_PIPE);
434 RX_STAT_INC(skb_completed);
435 }
436}
437
438static void ath9k_hif_usb_rx_cb(struct urb *urb)
439{
440 struct sk_buff *skb = (struct sk_buff *) urb->context;
441 struct hif_device_usb *hif_dev = (struct hif_device_usb *)
442 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
443 int ret;
444
445 if (!skb)
446 return;
447
448 if (!hif_dev)
449 goto free;
450
451 switch (urb->status) {
452 case 0:
453 break;
454 case -ENOENT:
455 case -ECONNRESET:
456 case -ENODEV:
457 case -ESHUTDOWN:
458 goto free;
459 default:
460 goto resubmit;
461 }
462
463 if (likely(urb->actual_length != 0)) {
464 skb_put(skb, urb->actual_length);
465 ath9k_hif_usb_rx_stream(hif_dev, skb);
466 }
467
468resubmit:
469 skb_reset_tail_pointer(skb);
470 skb_trim(skb, 0);
471
472 usb_anchor_urb(urb, &hif_dev->rx_submitted);
473 ret = usb_submit_urb(urb, GFP_ATOMIC);
474 if (ret) {
475 usb_unanchor_urb(urb);
476 goto free;
477 }
478
479 return;
480free:
481 kfree_skb(skb);
482}
483
484static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
485{
486 struct sk_buff *skb = (struct sk_buff *) urb->context;
487 struct sk_buff *nskb;
488 struct hif_device_usb *hif_dev = (struct hif_device_usb *)
489 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
490 int ret;
491
492 if (!skb)
493 return;
494
495 if (!hif_dev)
496 goto free;
497
498 switch (urb->status) {
499 case 0:
500 break;
501 case -ENOENT:
502 case -ECONNRESET:
503 case -ENODEV:
504 case -ESHUTDOWN:
505 goto free;
506 default:
507 goto resubmit;
508 }
509
510 if (likely(urb->actual_length != 0)) {
511 skb_put(skb, urb->actual_length);
512
513 /* Process the command first */
514 ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
515 skb->len, USB_REG_IN_PIPE);
516
517
518 nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
519 if (!nskb) {
520 dev_err(&hif_dev->udev->dev,
521 "ath9k_htc: REG_IN memory allocation failure\n");
522 urb->context = NULL;
523 return;
524 }
525
526 usb_fill_int_urb(urb, hif_dev->udev,
527 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
528 nskb->data, MAX_REG_IN_BUF_SIZE,
529 ath9k_hif_usb_reg_in_cb, nskb, 1);
530
531 ret = usb_submit_urb(urb, GFP_ATOMIC);
532 if (ret) {
533 kfree_skb(nskb);
534 urb->context = NULL;
535 }
536
537 return;
538 }
539
540resubmit:
541 skb_reset_tail_pointer(skb);
542 skb_trim(skb, 0);
543
544 ret = usb_submit_urb(urb, GFP_ATOMIC);
545 if (ret)
546 goto free;
547
548 return;
549free:
550 kfree_skb(skb);
551 urb->context = NULL;
552}
553
554static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
555{
556 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
557
558 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
559 &hif_dev->tx.tx_buf, list) {
560 usb_kill_urb(tx_buf->urb);
561 list_del(&tx_buf->list);
562 usb_free_urb(tx_buf->urb);
563 kfree(tx_buf->buf);
564 kfree(tx_buf);
565 }
566
567 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
568 &hif_dev->tx.tx_pending, list) {
569 usb_kill_urb(tx_buf->urb);
570 list_del(&tx_buf->list);
571 usb_free_urb(tx_buf->urb);
572 kfree(tx_buf->buf);
573 kfree(tx_buf);
574 }
575}
576
577static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
578{
579 struct tx_buf *tx_buf;
580 int i;
581
582 INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
583 INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
584 spin_lock_init(&hif_dev->tx.tx_lock);
585 __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
586
587 for (i = 0; i < MAX_TX_URB_NUM; i++) {
588 tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
589 if (!tx_buf)
590 goto err;
591
592 tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
593 if (!tx_buf->buf)
594 goto err;
595
596 tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
597 if (!tx_buf->urb)
598 goto err;
599
600 tx_buf->hif_dev = hif_dev;
601 __skb_queue_head_init(&tx_buf->skb_queue);
602
603 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
604 }
605
606 hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
607
608 return 0;
609err:
610 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
611 return -ENOMEM;
612}
613
614static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
615{
616 usb_kill_anchored_urbs(&hif_dev->rx_submitted);
617}
618
619static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
620{
621 struct urb *urb = NULL;
622 struct sk_buff *skb = NULL;
623 int i, ret;
624
625 init_usb_anchor(&hif_dev->rx_submitted);
626 spin_lock_init(&hif_dev->rx_lock);
627
628 for (i = 0; i < MAX_RX_URB_NUM; i++) {
629
630 /* Allocate URB */
631 urb = usb_alloc_urb(0, GFP_KERNEL);
632 if (urb == NULL) {
633 ret = -ENOMEM;
634 goto err_urb;
635 }
636
637 /* Allocate buffer */
638 skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
639 if (!skb) {
640 ret = -ENOMEM;
641 goto err_skb;
642 }
643
644 usb_fill_bulk_urb(urb, hif_dev->udev,
645 usb_rcvbulkpipe(hif_dev->udev,
646 USB_WLAN_RX_PIPE),
647 skb->data, MAX_RX_BUF_SIZE,
648 ath9k_hif_usb_rx_cb, skb);
649
650 /* Anchor URB */
651 usb_anchor_urb(urb, &hif_dev->rx_submitted);
652
653 /* Submit URB */
654 ret = usb_submit_urb(urb, GFP_KERNEL);
655 if (ret) {
656 usb_unanchor_urb(urb);
657 goto err_submit;
658 }
659
660 /*
661 * Drop reference count.
662 * This ensures that the URB is freed when killing them.
663 */
664 usb_free_urb(urb);
665 }
666
667 return 0;
668
669err_submit:
670 kfree_skb(skb);
671err_skb:
672 usb_free_urb(urb);
673err_urb:
674 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
675 return ret;
676}
677
678static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
679{
680 if (hif_dev->reg_in_urb) {
681 usb_kill_urb(hif_dev->reg_in_urb);
682 if (hif_dev->reg_in_urb->context)
683 kfree_skb((void *)hif_dev->reg_in_urb->context);
684 usb_free_urb(hif_dev->reg_in_urb);
685 hif_dev->reg_in_urb = NULL;
686 }
687}
688
689static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
690{
691 struct sk_buff *skb;
692
693 hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
694 if (hif_dev->reg_in_urb == NULL)
695 return -ENOMEM;
696
697 skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
698 if (!skb)
699 goto err;
700
701 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
702 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
703 skb->data, MAX_REG_IN_BUF_SIZE,
704 ath9k_hif_usb_reg_in_cb, skb, 1);
705
706 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
707 goto err;
708
709 return 0;
710
711err:
712 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
713 return -ENOMEM;
714}
715
716static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
717{
718 /* Register Write */
719 init_usb_anchor(&hif_dev->regout_submitted);
720
721 /* TX */
722 if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
723 goto err;
724
725 /* RX */
726 if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
727 goto err;
728
729 /* Register Read */
730 if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
731 goto err;
732
733 return 0;
734err:
735 return -ENOMEM;
736}
737
738static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
739{
740 usb_kill_anchored_urbs(&hif_dev->regout_submitted);
741 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
742 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
743 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
744}
745
746static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
747{
748 int transfer, err;
749 const void *data = hif_dev->firmware->data;
750 size_t len = hif_dev->firmware->size;
751 u32 addr = AR9271_FIRMWARE;
752 u8 *buf = kzalloc(4096, GFP_KERNEL);
753
754 if (!buf)
755 return -ENOMEM;
756
757 while (len) {
758 transfer = min_t(int, len, 4096);
759 memcpy(buf, data, transfer);
760
761 err = usb_control_msg(hif_dev->udev,
762 usb_sndctrlpipe(hif_dev->udev, 0),
763 FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
764 addr >> 8, 0, buf, transfer, HZ);
765 if (err < 0) {
766 kfree(buf);
767 return err;
768 }
769
770 len -= transfer;
771 data += transfer;
772 addr += transfer;
773 }
774 kfree(buf);
775
776 /*
777 * Issue FW download complete command to firmware.
778 */
779 err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
780 FIRMWARE_DOWNLOAD_COMP,
781 0x40 | USB_DIR_OUT,
782 AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
783 if (err)
784 return -EIO;
785
786 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
787 "ar9271.fw", (unsigned long) hif_dev->firmware->size);
788
789 return 0;
790}
791
792static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
793 const char *fw_name)
794{
795 int ret;
796
797 /* Request firmware */
798 ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
799 if (ret) {
800 dev_err(&hif_dev->udev->dev,
801 "ath9k_htc: Firmware - %s not found\n", fw_name);
802 goto err_fw_req;
803 }
804
805 /* Alloc URBs */
806 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
807 if (ret) {
808 dev_err(&hif_dev->udev->dev,
809 "ath9k_htc: Unable to allocate URBs\n");
810 goto err_urb;
811 }
812
813 /* Download firmware */
814 ret = ath9k_hif_usb_download_fw(hif_dev);
815 if (ret) {
816 dev_err(&hif_dev->udev->dev,
817 "ath9k_htc: Firmware - %s download failed\n", fw_name);
818 goto err_fw_download;
819 }
820
821 return 0;
822
823err_fw_download:
824 ath9k_hif_usb_dealloc_urbs(hif_dev);
825err_urb:
826 release_firmware(hif_dev->firmware);
827err_fw_req:
828 hif_dev->firmware = NULL;
829 return ret;
830}
831
832static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
833{
834 ath9k_hif_usb_dealloc_urbs(hif_dev);
835 if (hif_dev->firmware)
836 release_firmware(hif_dev->firmware);
837}
838
839static int ath9k_hif_usb_probe(struct usb_interface *interface,
840 const struct usb_device_id *id)
841{
842 struct usb_device *udev = interface_to_usbdev(interface);
843 struct hif_device_usb *hif_dev;
844 const char *fw_name = (const char *) id->driver_info;
845 int ret = 0;
846
847 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
848 if (!hif_dev) {
849 ret = -ENOMEM;
850 goto err_alloc;
851 }
852
853 usb_get_dev(udev);
854 hif_dev->udev = udev;
855 hif_dev->interface = interface;
856 hif_dev->device_id = id->idProduct;
857#ifdef CONFIG_PM
858 udev->reset_resume = 1;
859#endif
860 usb_set_intfdata(interface, hif_dev);
861
862 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev, &hif_usb,
863 &hif_dev->udev->dev);
864 if (hif_dev->htc_handle == NULL) {
865 ret = -ENOMEM;
866 goto err_htc_hw_alloc;
867 }
868
869 ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
870 if (ret) {
871 ret = -EINVAL;
872 goto err_hif_init_usb;
873 }
874
875 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
876 &hif_dev->udev->dev, hif_dev->device_id);
877 if (ret) {
878 ret = -EINVAL;
879 goto err_htc_hw_init;
880 }
881
882 dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
883
884 return 0;
885
886err_htc_hw_init:
887 ath9k_hif_usb_dev_deinit(hif_dev);
888err_hif_init_usb:
889 ath9k_htc_hw_free(hif_dev->htc_handle);
890err_htc_hw_alloc:
891 usb_set_intfdata(interface, NULL);
892 kfree(hif_dev);
893 usb_put_dev(udev);
894err_alloc:
895 return ret;
896}
897
898static void ath9k_hif_usb_reboot(struct usb_device *udev)
899{
900 u32 reboot_cmd = 0xffffffff;
901 void *buf;
902 int ret;
903
904 buf = kmalloc(4, GFP_KERNEL);
905 if (!buf)
906 return;
907
908 memcpy(buf, &reboot_cmd, 4);
909
910 ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
911 buf, 4, NULL, HZ);
912 if (ret)
913 dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
914
915 kfree(buf);
916}
917
918static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
919{
920 struct usb_device *udev = interface_to_usbdev(interface);
921 struct hif_device_usb *hif_dev =
922 (struct hif_device_usb *) usb_get_intfdata(interface);
923
924 if (hif_dev) {
925 ath9k_htc_hw_deinit(hif_dev->htc_handle,
926 (udev->state == USB_STATE_NOTATTACHED) ? true : false);
927 ath9k_htc_hw_free(hif_dev->htc_handle);
928 ath9k_hif_usb_dev_deinit(hif_dev);
929 usb_set_intfdata(interface, NULL);
930 }
931
932 if (hif_dev->flags & HIF_USB_START)
933 ath9k_hif_usb_reboot(udev);
934
935 kfree(hif_dev);
936 dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
937 usb_put_dev(udev);
938}
939
940#ifdef CONFIG_PM
941static int ath9k_hif_usb_suspend(struct usb_interface *interface,
942 pm_message_t message)
943{
944 struct hif_device_usb *hif_dev =
945 (struct hif_device_usb *) usb_get_intfdata(interface);
946
947 ath9k_hif_usb_dealloc_urbs(hif_dev);
948
949 return 0;
950}
951
952static int ath9k_hif_usb_resume(struct usb_interface *interface)
953{
954 struct hif_device_usb *hif_dev =
955 (struct hif_device_usb *) usb_get_intfdata(interface);
956 int ret;
957
958 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
959 if (ret)
960 return ret;
961
962 if (hif_dev->firmware) {
963 ret = ath9k_hif_usb_download_fw(hif_dev);
964 if (ret)
965 goto fail_resume;
966 } else {
967 ath9k_hif_usb_dealloc_urbs(hif_dev);
968 return -EIO;
969 }
970
971 mdelay(100);
972
973 ret = ath9k_htc_resume(hif_dev->htc_handle);
974
975 if (ret)
976 goto fail_resume;
977
978 return 0;
979
980fail_resume:
981 ath9k_hif_usb_dealloc_urbs(hif_dev);
982
983 return ret;
984}
985#endif
986
987static struct usb_driver ath9k_hif_usb_driver = {
988 .name = "ath9k_hif_usb",
989 .probe = ath9k_hif_usb_probe,
990 .disconnect = ath9k_hif_usb_disconnect,
991#ifdef CONFIG_PM
992 .suspend = ath9k_hif_usb_suspend,
993 .resume = ath9k_hif_usb_resume,
994 .reset_resume = ath9k_hif_usb_resume,
995#endif
996 .id_table = ath9k_hif_usb_ids,
997 .soft_unbind = 1,
998};
999
1000int ath9k_hif_usb_init(void)
1001{
1002 return usb_register(&ath9k_hif_usb_driver);
1003}
1004
1005void ath9k_hif_usb_exit(void)
1006{
1007 usb_deregister(&ath9k_hif_usb_driver);
1008}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 000000000000..0aca49b6fcb6
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_USB_H
18#define HTC_USB_H
19
20#define AR9271_FIRMWARE 0x501000
21#define AR9271_FIRMWARE_TEXT 0x903000
22
23#define FIRMWARE_DOWNLOAD 0x30
24#define FIRMWARE_DOWNLOAD_COMP 0x31
25
26#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
27#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
28
29/* FIXME: Verify these numbers (with Windows) */
30#define MAX_TX_URB_NUM 8
31#define MAX_TX_BUF_NUM 1024
32#define MAX_TX_BUF_SIZE 32768
33#define MAX_TX_AGGR_NUM 20
34
35#define MAX_RX_URB_NUM 8
36#define MAX_RX_BUF_SIZE 16384
37#define MAX_PKT_NUM_IN_TRANSFER 10
38
39#define MAX_REG_OUT_URB_NUM 1
40#define MAX_REG_OUT_BUF_NUM 8
41
42#define MAX_REG_IN_BUF_SIZE 64
43
44/* USB Endpoint definition */
45#define USB_WLAN_TX_PIPE 1
46#define USB_WLAN_RX_PIPE 2
47#define USB_REG_IN_PIPE 3
48#define USB_REG_OUT_PIPE 4
49
50#define HIF_USB_MAX_RXPIPES 2
51#define HIF_USB_MAX_TXPIPES 4
52
53struct tx_buf {
54 u8 *buf;
55 u16 len;
56 u16 offset;
57 struct urb *urb;
58 struct sk_buff_head skb_queue;
59 struct hif_device_usb *hif_dev;
60 struct list_head list;
61};
62
63#define HIF_USB_TX_STOP BIT(0)
64
65struct hif_usb_tx {
66 u8 flags;
67 u8 tx_buf_cnt;
68 u16 tx_skb_cnt;
69 struct sk_buff_head tx_skb_queue;
70 struct list_head tx_buf;
71 struct list_head tx_pending;
72 spinlock_t tx_lock;
73};
74
75struct cmd_buf {
76 struct sk_buff *skb;
77 struct hif_device_usb *hif_dev;
78};
79
80#define HIF_USB_START BIT(0)
81
82struct hif_device_usb {
83 u16 device_id;
84 struct usb_device *udev;
85 struct usb_interface *interface;
86 const struct firmware *firmware;
87 struct htc_target *htc_handle;
88 struct hif_usb_tx tx;
89 struct urb *reg_in_urb;
90 struct usb_anchor regout_submitted;
91 struct usb_anchor rx_submitted;
92 struct sk_buff *remain_skb;
93 int rx_remain_len;
94 int rx_pkt_len;
95 int rx_transfer_len;
96 int rx_pad_len;
97 spinlock_t rx_lock;
98 u8 flags; /* HIF_USB_* */
99};
100
101int ath9k_hif_usb_init(void);
102void ath9k_hif_usb_exit(void);
103
104#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 000000000000..ad556aa8da39
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,464 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_H
18#define HTC_H
19
20#include <linux/module.h>
21#include <linux/usb.h>
22#include <linux/firmware.h>
23#include <linux/skbuff.h>
24#include <linux/netdevice.h>
25#include <linux/leds.h>
26#include <net/mac80211.h>
27
28#include "common.h"
29#include "htc_hst.h"
30#include "hif_usb.h"
31#include "wmi.h"
32
33#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
34#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
35#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
36#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
37
38#define ATH_DEFAULT_BMISS_LIMIT 10
39#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
40#define TSF_TO_TU(_h, _l) \
41 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
42
43extern struct ieee80211_ops ath9k_htc_ops;
44extern int htc_modparam_nohwcrypt;
45
46enum htc_phymode {
47 HTC_MODE_AUTO = 0,
48 HTC_MODE_11A = 1,
49 HTC_MODE_11B = 2,
50 HTC_MODE_11G = 3,
51 HTC_MODE_FH = 4,
52 HTC_MODE_TURBO_A = 5,
53 HTC_MODE_TURBO_G = 6,
54 HTC_MODE_11NA = 7,
55 HTC_MODE_11NG = 8
56};
57
58enum htc_opmode {
59 HTC_M_STA = 1,
60 HTC_M_IBSS = 0,
61 HTC_M_AHDEMO = 3,
62 HTC_M_HOSTAP = 6,
63 HTC_M_MONITOR = 8,
64 HTC_M_WDS = 2
65};
66
67#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
68#define ATH9K_HTC_AMPDU 1
69#define ATH9K_HTC_NORMAL 2
70
71#define ATH9K_HTC_TX_CTSONLY 0x1
72#define ATH9K_HTC_TX_RTSCTS 0x2
73#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
74
75struct tx_frame_hdr {
76 u8 data_type;
77 u8 node_idx;
78 u8 vif_idx;
79 u8 tidno;
80 u32 flags; /* ATH9K_HTC_TX_* */
81 u8 key_type;
82 u8 keyix;
83 u8 reserved[26];
84} __packed;
85
86struct tx_mgmt_hdr {
87 u8 node_idx;
88 u8 vif_idx;
89 u8 tidno;
90 u8 flags;
91 u8 key_type;
92 u8 keyix;
93 u16 reserved;
94} __packed;
95
96struct tx_beacon_header {
97 u8 len_changed;
98 u8 vif_index;
99 u16 rev;
100} __packed;
101
102struct ath9k_htc_target_hw {
103 u32 flags;
104 u32 flags_ext;
105 u32 ampdu_limit;
106 u8 ampdu_subframes;
107 u8 tx_chainmask;
108 u8 tx_chainmask_legacy;
109 u8 rtscts_ratecode;
110 u8 protmode;
111} __packed;
112
113struct ath9k_htc_cap_target {
114 u32 flags;
115 u32 flags_ext;
116 u32 ampdu_limit;
117 u8 ampdu_subframes;
118 u8 tx_chainmask;
119 u8 tx_chainmask_legacy;
120 u8 rtscts_ratecode;
121 u8 protmode;
122} __packed;
123
124struct ath9k_htc_target_vif {
125 u8 index;
126 u8 des_bssid[ETH_ALEN];
127 __be32 opmode;
128 u8 myaddr[ETH_ALEN];
129 u8 bssid[ETH_ALEN];
130 u32 flags;
131 u32 flags_ext;
132 u16 ps_sta;
133 __be16 rtsthreshold;
134 u8 ath_cap;
135 u8 node;
136 s8 mcast_rate;
137} __packed;
138
139#define ATH_HTC_STA_AUTH 0x0001
140#define ATH_HTC_STA_QOS 0x0002
141#define ATH_HTC_STA_ERP 0x0004
142#define ATH_HTC_STA_HT 0x0008
143
144/* FIXME: UAPSD variables */
145struct ath9k_htc_target_sta {
146 u16 associd;
147 u16 txpower;
148 u32 ucastkey;
149 u8 macaddr[ETH_ALEN];
150 u8 bssid[ETH_ALEN];
151 u8 sta_index;
152 u8 vif_index;
153 u8 vif_sta;
154 __be16 flags; /* ATH_HTC_STA_* */
155 u16 htcap;
156 u8 valid;
157 u16 capinfo;
158 struct ath9k_htc_target_hw *hw;
159 struct ath9k_htc_target_vif *vif;
160 u16 txseqmgmt;
161 u8 is_vif_sta;
162 u16 maxampdu;
163 u16 iv16;
164 u32 iv32;
165} __packed;
166
167struct ath9k_htc_target_aggr {
168 u8 sta_index;
169 u8 tidno;
170 u8 aggr_enable;
171 u8 padding;
172} __packed;
173
174#define ATH_HTC_RATE_MAX 30
175
176#define WLAN_RC_DS_FLAG 0x01
177#define WLAN_RC_40_FLAG 0x02
178#define WLAN_RC_SGI_FLAG 0x04
179#define WLAN_RC_HT_FLAG 0x08
180
181struct ath9k_htc_rateset {
182 u8 rs_nrates;
183 u8 rs_rates[ATH_HTC_RATE_MAX];
184};
185
186struct ath9k_htc_rate {
187 struct ath9k_htc_rateset legacy_rates;
188 struct ath9k_htc_rateset ht_rates;
189} __packed;
190
191struct ath9k_htc_target_rate {
192 u8 sta_index;
193 u8 isnew;
194 __be32 capflags;
195 struct ath9k_htc_rate rates;
196};
197
198struct ath9k_htc_target_stats {
199 __be32 tx_shortretry;
200 __be32 tx_longretry;
201 __be32 tx_xretries;
202 __be32 ht_txunaggr_xretry;
203 __be32 ht_tx_xretries;
204} __packed;
205
206struct ath9k_htc_vif {
207 u8 index;
208};
209
210#define ATH9K_HTC_MAX_STA 8
211#define ATH9K_HTC_MAX_TID 8
212
213enum tid_aggr_state {
214 AGGR_STOP = 0,
215 AGGR_PROGRESS,
216 AGGR_START,
217 AGGR_OPERATIONAL
218};
219
220struct ath9k_htc_sta {
221 u8 index;
222 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
223};
224
225struct ath9k_htc_aggr_work {
226 u16 tid;
227 u8 sta_addr[ETH_ALEN];
228 struct ieee80211_hw *hw;
229 struct ieee80211_vif *vif;
230 enum ieee80211_ampdu_mlme_action action;
231 struct mutex mutex;
232};
233
234#define ATH9K_HTC_RXBUF 256
235#define HTC_RX_FRAME_HEADER_SIZE 40
236
237struct ath9k_htc_rxbuf {
238 bool in_process;
239 struct sk_buff *skb;
240 struct ath_htc_rx_status rxstatus;
241 struct list_head list;
242};
243
244struct ath9k_htc_rx {
245 int last_rssi; /* FIXME: per-STA */
246 struct list_head rxbuf;
247 spinlock_t rxbuflock;
248};
249
250struct ath9k_htc_tx_ctl {
251 u8 type; /* ATH9K_HTC_* */
252};
253
254#ifdef CONFIG_ATH9K_HTC_DEBUGFS
255
256#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
257#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
258
259struct ath_tx_stats {
260 u32 buf_queued;
261 u32 buf_completed;
262 u32 skb_queued;
263 u32 skb_completed;
264 u32 skb_dropped;
265};
266
267struct ath_rx_stats {
268 u32 skb_allocated;
269 u32 skb_completed;
270 u32 skb_dropped;
271};
272
273struct ath9k_debug {
274 struct dentry *debugfs_phy;
275 struct dentry *debugfs_tgt_stats;
276 struct dentry *debugfs_xmit;
277 struct dentry *debugfs_recv;
278 struct ath_tx_stats tx_stats;
279 struct ath_rx_stats rx_stats;
280 u32 txrate;
281};
282
283#else
284
285#define TX_STAT_INC(c) do { } while (0)
286#define RX_STAT_INC(c) do { } while (0)
287
288#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
289
290#define ATH_LED_PIN_DEF 1
291#define ATH_LED_PIN_9287 8
292#define ATH_LED_PIN_9271 15
293#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
294#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
295
296enum ath_led_type {
297 ATH_LED_RADIO,
298 ATH_LED_ASSOC,
299 ATH_LED_TX,
300 ATH_LED_RX
301};
302
303struct ath_led {
304 struct ath9k_htc_priv *priv;
305 struct led_classdev led_cdev;
306 enum ath_led_type led_type;
307 struct delayed_work brightness_work;
308 char name[32];
309 bool registered;
310 int brightness;
311};
312
313struct htc_beacon_config {
314 u16 beacon_interval;
315 u16 listen_interval;
316 u16 dtim_period;
317 u16 bmiss_timeout;
318 u8 dtim_count;
319};
320
321#define OP_INVALID BIT(0)
322#define OP_SCANNING BIT(1)
323#define OP_FULL_RESET BIT(2)
324#define OP_LED_ASSOCIATED BIT(3)
325#define OP_LED_ON BIT(4)
326#define OP_PREAMBLE_SHORT BIT(5)
327#define OP_PROTECT_ENABLE BIT(6)
328#define OP_TXAGGR BIT(7)
329#define OP_ASSOCIATED BIT(8)
330#define OP_ENABLE_BEACON BIT(9)
331#define OP_LED_DEINIT BIT(10)
332#define OP_UNPLUGGED BIT(11)
333
334struct ath9k_htc_priv {
335 struct device *dev;
336 struct ieee80211_hw *hw;
337 struct ath_hw *ah;
338 struct htc_target *htc;
339 struct wmi *wmi;
340
341 enum htc_endpoint_id wmi_cmd_ep;
342 enum htc_endpoint_id beacon_ep;
343 enum htc_endpoint_id cab_ep;
344 enum htc_endpoint_id uapsd_ep;
345 enum htc_endpoint_id mgmt_ep;
346 enum htc_endpoint_id data_be_ep;
347 enum htc_endpoint_id data_bk_ep;
348 enum htc_endpoint_id data_vi_ep;
349 enum htc_endpoint_id data_vo_ep;
350
351 u16 op_flags;
352 u16 curtxpow;
353 u16 txpowlimit;
354 u16 nvifs;
355 u16 nstations;
356 u16 seq_no;
357 u32 bmiss_cnt;
358
359 spinlock_t beacon_lock;
360
361 bool tx_queues_stop;
362 spinlock_t tx_lock;
363
364 struct ieee80211_vif *vif;
365 struct htc_beacon_config cur_beacon_conf;
366 unsigned int rxfilter;
367 struct tasklet_struct wmi_tasklet;
368 struct tasklet_struct rx_tasklet;
369 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
370 struct ath9k_htc_rx rx;
371 struct tasklet_struct tx_tasklet;
372 struct sk_buff_head tx_queue;
373 struct ath9k_htc_aggr_work aggr_work;
374 struct delayed_work ath9k_aggr_work;
375 struct delayed_work ath9k_ani_work;
376 struct work_struct ps_work;
377
378 struct mutex htc_pm_lock;
379 unsigned long ps_usecount;
380 bool ps_enabled;
381 bool ps_idle;
382
383 struct ath_led radio_led;
384 struct ath_led assoc_led;
385 struct ath_led tx_led;
386 struct ath_led rx_led;
387 struct delayed_work ath9k_led_blink_work;
388 int led_on_duration;
389 int led_off_duration;
390 int led_on_cnt;
391 int led_off_cnt;
392 int hwq_map[ATH9K_WME_AC_VO+1];
393
394#ifdef CONFIG_ATH9K_HTC_DEBUGFS
395 struct ath9k_debug debug;
396#endif
397 struct ath9k_htc_target_rate tgt_rate;
398
399 struct mutex mutex;
400};
401
402static inline void ath_read_cachesize(struct ath_common *common, int *csz)
403{
404 common->bus_ops->read_cachesize(common, csz);
405}
406
407void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
408 struct ieee80211_vif *vif);
409void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
410
411void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
412 enum htc_endpoint_id ep_id);
413void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
414 bool txok);
415void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
416 enum htc_endpoint_id ep_id, bool txok);
417
418void ath9k_htc_station_work(struct work_struct *work);
419void ath9k_htc_aggr_work(struct work_struct *work);
420void ath9k_ani_work(struct work_struct *work);;
421
422int ath9k_tx_init(struct ath9k_htc_priv *priv);
423void ath9k_tx_tasklet(unsigned long data);
424int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
425void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
426bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
427 enum ath9k_tx_queue_subtype qtype);
428int get_hw_qnum(u16 queue, int *hwq_map);
429int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
430 struct ath9k_tx_queue_info *qinfo);
431
432int ath9k_rx_init(struct ath9k_htc_priv *priv);
433void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
434void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
435void ath9k_rx_tasklet(unsigned long data);
436u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
437
438void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
439void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
440void ath9k_ps_work(struct work_struct *work);
441
442void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
443void ath9k_init_leds(struct ath9k_htc_priv *priv);
444void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
445
446int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
447 u16 devid);
448void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
449#ifdef CONFIG_PM
450int ath9k_htc_resume(struct htc_target *htc_handle);
451#endif
452#ifdef CONFIG_ATH9K_HTC_DEBUGFS
453int ath9k_htc_debug_create_root(void);
454void ath9k_htc_debug_remove_root(void);
455int ath9k_htc_init_debug(struct ath_hw *ah);
456void ath9k_htc_exit_debug(struct ath_hw *ah);
457#else
458static inline int ath9k_htc_debug_create_root(void) { return 0; };
459static inline void ath9k_htc_debug_remove_root(void) {};
460static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
461static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
462#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
463
464#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 000000000000..c10c7d002eb7
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,255 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#define FUDGE 2
20
21static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
22 struct htc_beacon_config *bss_conf)
23{
24 struct ath_common *common = ath9k_hw_common(priv->ah);
25 struct ath9k_beacon_state bs;
26 enum ath9k_int imask = 0;
27 int dtimperiod, dtimcount, sleepduration;
28 int cfpperiod, cfpcount, bmiss_timeout;
29 u32 nexttbtt = 0, intval, tsftu;
30 __be32 htc_imask = 0;
31 u64 tsf;
32 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
33 int ret;
34 u8 cmd_rsp;
35
36 memset(&bs, 0, sizeof(bs));
37
38 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
39 bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
40
41 /*
42 * Setup dtim and cfp parameters according to
43 * last beacon we received (which may be none).
44 */
45 dtimperiod = bss_conf->dtim_period;
46 if (dtimperiod <= 0) /* NB: 0 if not known */
47 dtimperiod = 1;
48 dtimcount = 1;
49 if (dtimcount >= dtimperiod) /* NB: sanity check */
50 dtimcount = 0;
51 cfpperiod = 1; /* NB: no PCF support yet */
52 cfpcount = 0;
53
54 sleepduration = intval;
55 if (sleepduration <= 0)
56 sleepduration = intval;
57
58 /*
59 * Pull nexttbtt forward to reflect the current
60 * TSF and calculate dtim+cfp state for the result.
61 */
62 tsf = ath9k_hw_gettsf64(priv->ah);
63 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
64
65 num_beacons = tsftu / intval + 1;
66 offset = tsftu % intval;
67 nexttbtt = tsftu - offset;
68 if (offset)
69 nexttbtt += intval;
70
71 /* DTIM Beacon every dtimperiod Beacon */
72 dtim_dec_count = num_beacons % dtimperiod;
73 /* CFP every cfpperiod DTIM Beacon */
74 cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
75 if (dtim_dec_count)
76 cfp_dec_count++;
77
78 dtimcount -= dtim_dec_count;
79 if (dtimcount < 0)
80 dtimcount += dtimperiod;
81
82 cfpcount -= cfp_dec_count;
83 if (cfpcount < 0)
84 cfpcount += cfpperiod;
85
86 bs.bs_intval = intval;
87 bs.bs_nexttbtt = nexttbtt;
88 bs.bs_dtimperiod = dtimperiod*intval;
89 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
90 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
91 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
92 bs.bs_cfpmaxduration = 0;
93
94 /*
95 * Calculate the number of consecutive beacons to miss* before taking
96 * a BMISS interrupt. The configuration is specified in TU so we only
97 * need calculate based on the beacon interval. Note that we clamp the
98 * result to at most 15 beacons.
99 */
100 if (sleepduration > intval) {
101 bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
102 } else {
103 bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
104 if (bs.bs_bmissthreshold > 15)
105 bs.bs_bmissthreshold = 15;
106 else if (bs.bs_bmissthreshold <= 0)
107 bs.bs_bmissthreshold = 1;
108 }
109
110 /*
111 * Calculate sleep duration. The configuration is given in ms.
112 * We ensure a multiple of the beacon period is used. Also, if the sleep
113 * duration is greater than the DTIM period then it makes senses
114 * to make it a multiple of that.
115 *
116 * XXX fixed at 100ms
117 */
118
119 bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
120 if (bs.bs_sleepduration > bs.bs_dtimperiod)
121 bs.bs_sleepduration = bs.bs_dtimperiod;
122
123 /* TSF out of range threshold fixed at 1 second */
124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
125
126 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
127 ath_print(common, ATH_DBG_BEACON,
128 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
129 bs.bs_bmissthreshold, bs.bs_sleepduration,
130 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
131
132 /* Set the computed STA beacon timers */
133
134 WMI_CMD(WMI_DISABLE_INTR_CMDID);
135 ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
136 imask |= ATH9K_INT_BMISS;
137 htc_imask = cpu_to_be32(imask);
138 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
139}
140
141static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
142 struct htc_beacon_config *bss_conf)
143{
144 struct ath_common *common = ath9k_hw_common(priv->ah);
145 enum ath9k_int imask = 0;
146 u32 nexttbtt, intval;
147 __be32 htc_imask = 0;
148 int ret;
149 u8 cmd_rsp;
150
151 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
152 nexttbtt = intval;
153 intval |= ATH9K_BEACON_ENA;
154 if (priv->op_flags & OP_ENABLE_BEACON)
155 imask |= ATH9K_INT_SWBA;
156
157 ath_print(common, ATH_DBG_BEACON,
158 "IBSS Beacon config, intval: %d, imask: 0x%x\n",
159 bss_conf->beacon_interval, imask);
160
161 WMI_CMD(WMI_DISABLE_INTR_CMDID);
162 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
163 priv->bmiss_cnt = 0;
164 htc_imask = cpu_to_be32(imask);
165 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
166}
167
168void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
169 enum htc_endpoint_id ep_id, bool txok)
170{
171 dev_kfree_skb_any(skb);
172}
173
174void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
175{
176 struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
177 struct tx_beacon_header beacon_hdr;
178 struct ath9k_htc_tx_ctl tx_ctl;
179 struct ieee80211_tx_info *info;
180 struct sk_buff *beacon;
181 u8 *tx_fhdr;
182
183 memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
184 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
185
186 /* FIXME: Handle BMISS */
187 if (beacon_pending != 0) {
188 priv->bmiss_cnt++;
189 return;
190 }
191
192 spin_lock_bh(&priv->beacon_lock);
193
194 if (unlikely(priv->op_flags & OP_SCANNING)) {
195 spin_unlock_bh(&priv->beacon_lock);
196 return;
197 }
198
199 /* Get a new beacon */
200 beacon = ieee80211_beacon_get(priv->hw, priv->vif);
201 if (!beacon) {
202 spin_unlock_bh(&priv->beacon_lock);
203 return;
204 }
205
206 info = IEEE80211_SKB_CB(beacon);
207 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
208 struct ieee80211_hdr *hdr =
209 (struct ieee80211_hdr *) beacon->data;
210 priv->seq_no += 0x10;
211 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
212 hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
213 }
214
215 tx_ctl.type = ATH9K_HTC_NORMAL;
216 beacon_hdr.vif_index = avp->index;
217 tx_fhdr = skb_push(beacon, sizeof(beacon_hdr));
218 memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
219
220 htc_send(priv->htc, beacon, priv->beacon_ep, &tx_ctl);
221
222 spin_unlock_bh(&priv->beacon_lock);
223}
224
225
226void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
227 struct ieee80211_vif *vif)
228{
229 struct ath_common *common = ath9k_hw_common(priv->ah);
230 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
231 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
232
233 cur_conf->beacon_interval = bss_conf->beacon_int;
234 if (cur_conf->beacon_interval == 0)
235 cur_conf->beacon_interval = 100;
236
237 cur_conf->dtim_period = bss_conf->dtim_period;
238 cur_conf->listen_interval = 1;
239 cur_conf->dtim_count = 1;
240 cur_conf->bmiss_timeout =
241 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
242
243 switch (vif->type) {
244 case NL80211_IFTYPE_STATION:
245 ath9k_htc_beacon_config_sta(priv, cur_conf);
246 break;
247 case NL80211_IFTYPE_ADHOC:
248 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
249 break;
250 default:
251 ath_print(common, ATH_DBG_CONFIG,
252 "Unsupported beaconing mode\n");
253 return;
254 }
255}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 000000000000..dc015077a8d9
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,834 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19MODULE_AUTHOR("Atheros Communications");
20MODULE_LICENSE("Dual BSD/GPL");
21MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
22
23static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
24module_param_named(debug, ath9k_debug, uint, 0);
25MODULE_PARM_DESC(debug, "Debugging mask");
26
27int htc_modparam_nohwcrypt;
28module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
29MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
30
31#define CHAN2G(_freq, _idx) { \
32 .center_freq = (_freq), \
33 .hw_value = (_idx), \
34 .max_power = 20, \
35}
36
37static struct ieee80211_channel ath9k_2ghz_channels[] = {
38 CHAN2G(2412, 0), /* Channel 1 */
39 CHAN2G(2417, 1), /* Channel 2 */
40 CHAN2G(2422, 2), /* Channel 3 */
41 CHAN2G(2427, 3), /* Channel 4 */
42 CHAN2G(2432, 4), /* Channel 5 */
43 CHAN2G(2437, 5), /* Channel 6 */
44 CHAN2G(2442, 6), /* Channel 7 */
45 CHAN2G(2447, 7), /* Channel 8 */
46 CHAN2G(2452, 8), /* Channel 9 */
47 CHAN2G(2457, 9), /* Channel 10 */
48 CHAN2G(2462, 10), /* Channel 11 */
49 CHAN2G(2467, 11), /* Channel 12 */
50 CHAN2G(2472, 12), /* Channel 13 */
51 CHAN2G(2484, 13), /* Channel 14 */
52};
53
54/* Atheros hardware rate code addition for short premble */
55#define SHPCHECK(__hw_rate, __flags) \
56 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
57
58#define RATE(_bitrate, _hw_rate, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate), \
62 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
63}
64
65static struct ieee80211_rate ath9k_legacy_rates[] = {
66 RATE(10, 0x1b, 0),
67 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
68 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
69 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
70 RATE(60, 0x0b, 0),
71 RATE(90, 0x0f, 0),
72 RATE(120, 0x0a, 0),
73 RATE(180, 0x0e, 0),
74 RATE(240, 0x09, 0),
75 RATE(360, 0x0d, 0),
76 RATE(480, 0x08, 0),
77 RATE(540, 0x0c, 0),
78};
79
80static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
81{
82 int time_left;
83
84 if (atomic_read(&priv->htc->tgt_ready) > 0) {
85 atomic_dec(&priv->htc->tgt_ready);
86 return 0;
87 }
88
89 /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
90 time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
91 if (!time_left) {
92 dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
93 return -ETIMEDOUT;
94 }
95
96 atomic_dec(&priv->htc->tgt_ready);
97
98 return 0;
99}
100
101static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
102{
103 ath9k_htc_exit_debug(priv->ah);
104 ath9k_hw_deinit(priv->ah);
105 tasklet_kill(&priv->wmi_tasklet);
106 tasklet_kill(&priv->rx_tasklet);
107 tasklet_kill(&priv->tx_tasklet);
108 kfree(priv->ah);
109 priv->ah = NULL;
110}
111
112static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
113{
114 struct ieee80211_hw *hw = priv->hw;
115
116 wiphy_rfkill_stop_polling(hw->wiphy);
117 ath9k_deinit_leds(priv);
118 ieee80211_unregister_hw(hw);
119 ath9k_rx_cleanup(priv);
120 ath9k_tx_cleanup(priv);
121 ath9k_deinit_priv(priv);
122}
123
124static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
125 u16 service_id,
126 void (*tx) (void *,
127 struct sk_buff *,
128 enum htc_endpoint_id,
129 bool txok),
130 enum htc_endpoint_id *ep_id)
131{
132 struct htc_service_connreq req;
133
134 memset(&req, 0, sizeof(struct htc_service_connreq));
135
136 req.service_id = service_id;
137 req.ep_callbacks.priv = priv;
138 req.ep_callbacks.rx = ath9k_htc_rxep;
139 req.ep_callbacks.tx = tx;
140
141 return htc_connect_service(priv->htc, &req, ep_id);
142}
143
144static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
145{
146 int ret;
147
148 /* WMI CMD*/
149 ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
150 if (ret)
151 goto err;
152
153 /* Beacon */
154 ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, ath9k_htc_beaconep,
155 &priv->beacon_ep);
156 if (ret)
157 goto err;
158
159 /* CAB */
160 ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
161 &priv->cab_ep);
162 if (ret)
163 goto err;
164
165
166 /* UAPSD */
167 ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
168 &priv->uapsd_ep);
169 if (ret)
170 goto err;
171
172 /* MGMT */
173 ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
174 &priv->mgmt_ep);
175 if (ret)
176 goto err;
177
178 /* DATA BE */
179 ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
180 &priv->data_be_ep);
181 if (ret)
182 goto err;
183
184 /* DATA BK */
185 ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
186 &priv->data_bk_ep);
187 if (ret)
188 goto err;
189
190 /* DATA VI */
191 ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
192 &priv->data_vi_ep);
193 if (ret)
194 goto err;
195
196 /* DATA VO */
197 ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
198 &priv->data_vo_ep);
199 if (ret)
200 goto err;
201
202 ret = htc_init(priv->htc);
203 if (ret)
204 goto err;
205
206 return 0;
207
208err:
209 dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
210 return ret;
211}
212
213static int ath9k_reg_notifier(struct wiphy *wiphy,
214 struct regulatory_request *request)
215{
216 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
217 struct ath9k_htc_priv *priv = hw->priv;
218
219 return ath_reg_notifier_apply(wiphy, request,
220 ath9k_hw_regulatory(priv->ah));
221}
222
223static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
224{
225 struct ath_hw *ah = (struct ath_hw *) hw_priv;
226 struct ath_common *common = ath9k_hw_common(ah);
227 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
228 __be32 val, reg = cpu_to_be32(reg_offset);
229 int r;
230
231 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
232 (u8 *) &reg, sizeof(reg),
233 (u8 *) &val, sizeof(val),
234 100);
235 if (unlikely(r)) {
236 ath_print(common, ATH_DBG_WMI,
237 "REGISTER READ FAILED: (0x%04x, %d)\n",
238 reg_offset, r);
239 return -EIO;
240 }
241
242 return be32_to_cpu(val);
243}
244
245static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
246{
247 struct ath_hw *ah = (struct ath_hw *) hw_priv;
248 struct ath_common *common = ath9k_hw_common(ah);
249 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
250 __be32 buf[2] = {
251 cpu_to_be32(reg_offset),
252 cpu_to_be32(val),
253 };
254 int r;
255
256 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
257 (u8 *) &buf, sizeof(buf),
258 (u8 *) &val, sizeof(val),
259 100);
260 if (unlikely(r)) {
261 ath_print(common, ATH_DBG_WMI,
262 "REGISTER WRITE FAILED:(0x%04x, %d)\n",
263 reg_offset, r);
264 }
265}
266
267static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
268{
269 struct ath_hw *ah = (struct ath_hw *) hw_priv;
270 struct ath_common *common = ath9k_hw_common(ah);
271 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
272 u32 rsp_status;
273 int r;
274
275 mutex_lock(&priv->wmi->multi_write_mutex);
276
277 /* Store the register/value */
278 priv->wmi->multi_write[priv->wmi->multi_write_idx].reg =
279 cpu_to_be32(reg_offset);
280 priv->wmi->multi_write[priv->wmi->multi_write_idx].val =
281 cpu_to_be32(val);
282
283 priv->wmi->multi_write_idx++;
284
285 /* If the buffer is full, send it out. */
286 if (priv->wmi->multi_write_idx == MAX_CMD_NUMBER) {
287 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
288 (u8 *) &priv->wmi->multi_write,
289 sizeof(struct register_write) * priv->wmi->multi_write_idx,
290 (u8 *) &rsp_status, sizeof(rsp_status),
291 100);
292 if (unlikely(r)) {
293 ath_print(common, ATH_DBG_WMI,
294 "REGISTER WRITE FAILED, multi len: %d\n",
295 priv->wmi->multi_write_idx);
296 }
297 priv->wmi->multi_write_idx = 0;
298 }
299
300 mutex_unlock(&priv->wmi->multi_write_mutex);
301}
302
303static void ath9k_regwrite(void *hw_priv, u32 val, u32 reg_offset)
304{
305 struct ath_hw *ah = (struct ath_hw *) hw_priv;
306 struct ath_common *common = ath9k_hw_common(ah);
307 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
308
309 if (atomic_read(&priv->wmi->mwrite_cnt))
310 ath9k_regwrite_buffer(hw_priv, val, reg_offset);
311 else
312 ath9k_regwrite_single(hw_priv, val, reg_offset);
313}
314
315static void ath9k_enable_regwrite_buffer(void *hw_priv)
316{
317 struct ath_hw *ah = (struct ath_hw *) hw_priv;
318 struct ath_common *common = ath9k_hw_common(ah);
319 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
320
321 atomic_inc(&priv->wmi->mwrite_cnt);
322}
323
324static void ath9k_disable_regwrite_buffer(void *hw_priv)
325{
326 struct ath_hw *ah = (struct ath_hw *) hw_priv;
327 struct ath_common *common = ath9k_hw_common(ah);
328 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
329
330 atomic_dec(&priv->wmi->mwrite_cnt);
331}
332
333static void ath9k_regwrite_flush(void *hw_priv)
334{
335 struct ath_hw *ah = (struct ath_hw *) hw_priv;
336 struct ath_common *common = ath9k_hw_common(ah);
337 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
338 u32 rsp_status;
339 int r;
340
341 mutex_lock(&priv->wmi->multi_write_mutex);
342
343 if (priv->wmi->multi_write_idx) {
344 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
345 (u8 *) &priv->wmi->multi_write,
346 sizeof(struct register_write) * priv->wmi->multi_write_idx,
347 (u8 *) &rsp_status, sizeof(rsp_status),
348 100);
349 if (unlikely(r)) {
350 ath_print(common, ATH_DBG_WMI,
351 "REGISTER WRITE FAILED, multi len: %d\n",
352 priv->wmi->multi_write_idx);
353 }
354 priv->wmi->multi_write_idx = 0;
355 }
356
357 mutex_unlock(&priv->wmi->multi_write_mutex);
358}
359
360static const struct ath_ops ath9k_common_ops = {
361 .read = ath9k_regread,
362 .write = ath9k_regwrite,
363 .enable_write_buffer = ath9k_enable_regwrite_buffer,
364 .disable_write_buffer = ath9k_disable_regwrite_buffer,
365 .write_flush = ath9k_regwrite_flush,
366};
367
368static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
369{
370 *csz = L1_CACHE_BYTES >> 2;
371}
372
373static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
374{
375 struct ath_hw *ah = (struct ath_hw *) common->ah;
376
377 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
378
379 if (!ath9k_hw_wait(ah,
380 AR_EEPROM_STATUS_DATA,
381 AR_EEPROM_STATUS_DATA_BUSY |
382 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
383 AH_WAIT_TIMEOUT))
384 return false;
385
386 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
387 AR_EEPROM_STATUS_DATA_VAL);
388
389 return true;
390}
391
392static const struct ath_bus_ops ath9k_usb_bus_ops = {
393 .ath_bus_type = ATH_USB,
394 .read_cachesize = ath_usb_read_cachesize,
395 .eeprom_read = ath_usb_eeprom_read,
396};
397
398static void setup_ht_cap(struct ath9k_htc_priv *priv,
399 struct ieee80211_sta_ht_cap *ht_info)
400{
401 ht_info->ht_supported = true;
402 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
403 IEEE80211_HT_CAP_SM_PS |
404 IEEE80211_HT_CAP_SGI_40 |
405 IEEE80211_HT_CAP_DSSSCCK40;
406
407 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
408 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
409
410 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
411 ht_info->mcs.rx_mask[0] = 0xff;
412 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
413}
414
415static int ath9k_init_queues(struct ath9k_htc_priv *priv)
416{
417 struct ath_common *common = ath9k_hw_common(priv->ah);
418 int i;
419
420 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
421 priv->hwq_map[i] = -1;
422
423 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
424 ath_print(common, ATH_DBG_FATAL,
425 "Unable to setup xmit queue for BE traffic\n");
426 goto err;
427 }
428
429 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
430 ath_print(common, ATH_DBG_FATAL,
431 "Unable to setup xmit queue for BK traffic\n");
432 goto err;
433 }
434 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
435 ath_print(common, ATH_DBG_FATAL,
436 "Unable to setup xmit queue for VI traffic\n");
437 goto err;
438 }
439 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
440 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup xmit queue for VO traffic\n");
442 goto err;
443 }
444
445 return 0;
446
447err:
448 return -EINVAL;
449}
450
451static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
452{
453 struct ath_common *common = ath9k_hw_common(priv->ah);
454 int i = 0;
455
456 /* Get the hardware key cache size. */
457 common->keymax = priv->ah->caps.keycache_size;
458 if (common->keymax > ATH_KEYMAX) {
459 ath_print(common, ATH_DBG_ANY,
460 "Warning, using only %u entries in %u key cache\n",
461 ATH_KEYMAX, common->keymax);
462 common->keymax = ATH_KEYMAX;
463 }
464
465 /*
466 * Reset the key cache since some parts do not
467 * reset the contents on initial power up.
468 */
469 for (i = 0; i < common->keymax; i++)
470 ath9k_hw_keyreset(priv->ah, (u16) i);
471
472 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
473 ATH9K_CIPHER_TKIP, NULL)) {
474 /*
475 * Whether we should enable h/w TKIP MIC.
476 * XXX: if we don't support WME TKIP MIC, then we wouldn't
477 * report WMM capable, so it's always safe to turn on
478 * TKIP MIC in this case.
479 */
480 ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
481 }
482
483 /*
484 * Check whether the separate key cache entries
485 * are required to handle both tx+rx MIC keys.
486 * With split mic keys the number of stations is limited
487 * to 27 otherwise 59.
488 */
489 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
490 ATH9K_CIPHER_TKIP, NULL)
491 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
492 ATH9K_CIPHER_MIC, NULL)
493 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
494 0, NULL))
495 common->splitmic = 1;
496
497 /* turn on mcast key search if possible */
498 if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
499 (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
500 1, 1, NULL);
501}
502
503static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
504{
505 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
506 priv->sbands[IEEE80211_BAND_2GHZ].channels =
507 ath9k_2ghz_channels;
508 priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
509 priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
510 ARRAY_SIZE(ath9k_2ghz_channels);
511 priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
512 priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
513 ARRAY_SIZE(ath9k_legacy_rates);
514 }
515}
516
517static void ath9k_init_misc(struct ath9k_htc_priv *priv)
518{
519 struct ath_common *common = ath9k_hw_common(priv->ah);
520
521 common->tx_chainmask = priv->ah->caps.tx_chainmask;
522 common->rx_chainmask = priv->ah->caps.rx_chainmask;
523
524 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
525 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
526
527 priv->op_flags |= OP_TXAGGR;
528 priv->ah->opmode = NL80211_IFTYPE_STATION;
529}
530
531static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
532{
533 struct ath_hw *ah = NULL;
534 struct ath_common *common;
535 int ret = 0, csz = 0;
536
537 priv->op_flags |= OP_INVALID;
538
539 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
540 if (!ah)
541 return -ENOMEM;
542
543 ah->hw_version.devid = devid;
544 ah->hw_version.subsysid = 0; /* FIXME */
545 priv->ah = ah;
546
547 common = ath9k_hw_common(ah);
548 common->ops = &ath9k_common_ops;
549 common->bus_ops = &ath9k_usb_bus_ops;
550 common->ah = ah;
551 common->hw = priv->hw;
552 common->priv = priv;
553 common->debug_mask = ath9k_debug;
554
555 spin_lock_init(&priv->wmi->wmi_lock);
556 spin_lock_init(&priv->beacon_lock);
557 spin_lock_init(&priv->tx_lock);
558 mutex_init(&priv->mutex);
559 mutex_init(&priv->aggr_work.mutex);
560 mutex_init(&priv->htc_pm_lock);
561 tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
562 (unsigned long)priv);
563 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
564 (unsigned long)priv);
565 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
566 INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
567 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
568 INIT_WORK(&priv->ps_work, ath9k_ps_work);
569
570 /*
571 * Cache line size is used to size and align various
572 * structures used to communicate with the hardware.
573 */
574 ath_read_cachesize(common, &csz);
575 common->cachelsz = csz << 2; /* convert to bytes */
576
577 ret = ath9k_hw_init(ah);
578 if (ret) {
579 ath_print(common, ATH_DBG_FATAL,
580 "Unable to initialize hardware; "
581 "initialization status: %d\n", ret);
582 goto err_hw;
583 }
584
585 ret = ath9k_htc_init_debug(ah);
586 if (ret) {
587 ath_print(common, ATH_DBG_FATAL,
588 "Unable to create debugfs files\n");
589 goto err_debug;
590 }
591
592 ret = ath9k_init_queues(priv);
593 if (ret)
594 goto err_queues;
595
596 ath9k_init_crypto(priv);
597 ath9k_init_channels_rates(priv);
598 ath9k_init_misc(priv);
599
600 return 0;
601
602err_queues:
603 ath9k_htc_exit_debug(ah);
604err_debug:
605 ath9k_hw_deinit(ah);
606err_hw:
607
608 kfree(ah);
609 priv->ah = NULL;
610
611 return ret;
612}
613
614static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
615 struct ieee80211_hw *hw)
616{
617 struct ath_common *common = ath9k_hw_common(priv->ah);
618
619 hw->flags = IEEE80211_HW_SIGNAL_DBM |
620 IEEE80211_HW_AMPDU_AGGREGATION |
621 IEEE80211_HW_SPECTRUM_MGMT |
622 IEEE80211_HW_HAS_RATE_CONTROL |
623 IEEE80211_HW_RX_INCLUDES_FCS |
624 IEEE80211_HW_SUPPORTS_PS |
625 IEEE80211_HW_PS_NULLFUNC_STACK;
626
627 hw->wiphy->interface_modes =
628 BIT(NL80211_IFTYPE_STATION) |
629 BIT(NL80211_IFTYPE_ADHOC);
630
631 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
632
633 hw->queues = 4;
634 hw->channel_change_time = 5000;
635 hw->max_listen_interval = 10;
636 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
637 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
638
639 /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
640 hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
641 sizeof(struct htc_frame_hdr) + 4;
642
643 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
644 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
645 &priv->sbands[IEEE80211_BAND_2GHZ];
646
647 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
648 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
649 setup_ht_cap(priv,
650 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
651 }
652
653 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
654}
655
656static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
657{
658 struct ieee80211_hw *hw = priv->hw;
659 struct ath_common *common;
660 struct ath_hw *ah;
661 int error = 0;
662 struct ath_regulatory *reg;
663
664 /* Bring up device */
665 error = ath9k_init_priv(priv, devid);
666 if (error != 0)
667 goto err_init;
668
669 ah = priv->ah;
670 common = ath9k_hw_common(ah);
671 ath9k_set_hw_capab(priv, hw);
672
673 /* Initialize regulatory */
674 error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
675 ath9k_reg_notifier);
676 if (error)
677 goto err_regd;
678
679 reg = &common->regulatory;
680
681 /* Setup TX */
682 error = ath9k_tx_init(priv);
683 if (error != 0)
684 goto err_tx;
685
686 /* Setup RX */
687 error = ath9k_rx_init(priv);
688 if (error != 0)
689 goto err_rx;
690
691 /* Register with mac80211 */
692 error = ieee80211_register_hw(hw);
693 if (error)
694 goto err_register;
695
696 /* Handle world regulatory */
697 if (!ath_is_world_regd(reg)) {
698 error = regulatory_hint(hw->wiphy, reg->alpha2);
699 if (error)
700 goto err_world;
701 }
702
703 ath9k_init_leds(priv);
704 ath9k_start_rfkill_poll(priv);
705
706 return 0;
707
708err_world:
709 ieee80211_unregister_hw(hw);
710err_register:
711 ath9k_rx_cleanup(priv);
712err_rx:
713 ath9k_tx_cleanup(priv);
714err_tx:
715 /* Nothing */
716err_regd:
717 ath9k_deinit_priv(priv);
718err_init:
719 return error;
720}
721
722int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
723 u16 devid)
724{
725 struct ieee80211_hw *hw;
726 struct ath9k_htc_priv *priv;
727 int ret;
728
729 hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
730 if (!hw)
731 return -ENOMEM;
732
733 priv = hw->priv;
734 priv->hw = hw;
735 priv->htc = htc_handle;
736 priv->dev = dev;
737 htc_handle->drv_priv = priv;
738 SET_IEEE80211_DEV(hw, priv->dev);
739
740 ret = ath9k_htc_wait_for_target(priv);
741 if (ret)
742 goto err_free;
743
744 priv->wmi = ath9k_init_wmi(priv);
745 if (!priv->wmi) {
746 ret = -EINVAL;
747 goto err_free;
748 }
749
750 ret = ath9k_init_htc_services(priv);
751 if (ret)
752 goto err_init;
753
754 /* The device may have been unplugged earlier. */
755 priv->op_flags &= ~OP_UNPLUGGED;
756
757 ret = ath9k_init_device(priv, devid);
758 if (ret)
759 goto err_init;
760
761 return 0;
762
763err_init:
764 ath9k_deinit_wmi(priv);
765err_free:
766 ieee80211_free_hw(hw);
767 return ret;
768}
769
770void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
771{
772 if (htc_handle->drv_priv) {
773
774 /* Check if the device has been yanked out. */
775 if (hotunplug)
776 htc_handle->drv_priv->op_flags |= OP_UNPLUGGED;
777
778 ath9k_deinit_device(htc_handle->drv_priv);
779 ath9k_deinit_wmi(htc_handle->drv_priv);
780 ieee80211_free_hw(htc_handle->drv_priv->hw);
781 }
782}
783
784#ifdef CONFIG_PM
785int ath9k_htc_resume(struct htc_target *htc_handle)
786{
787 int ret;
788
789 ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
790 if (ret)
791 return ret;
792
793 ret = ath9k_init_htc_services(htc_handle->drv_priv);
794 return ret;
795}
796#endif
797
798static int __init ath9k_htc_init(void)
799{
800 int error;
801
802 error = ath9k_htc_debug_create_root();
803 if (error < 0) {
804 printk(KERN_ERR
805 "ath9k_htc: Unable to create debugfs root: %d\n",
806 error);
807 goto err_dbg;
808 }
809
810 error = ath9k_hif_usb_init();
811 if (error < 0) {
812 printk(KERN_ERR
813 "ath9k_htc: No USB devices found,"
814 " driver not installed.\n");
815 error = -ENODEV;
816 goto err_usb;
817 }
818
819 return 0;
820
821err_usb:
822 ath9k_htc_debug_remove_root();
823err_dbg:
824 return error;
825}
826module_init(ath9k_htc_init);
827
828static void __exit ath9k_htc_exit(void)
829{
830 ath9k_hif_usb_exit();
831 ath9k_htc_debug_remove_root();
832 printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
833}
834module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 000000000000..9d371c18eb41
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1775 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#ifdef CONFIG_ATH9K_HTC_DEBUGFS
20static struct dentry *ath9k_debugfs_root;
21#endif
22
23/*************/
24/* Utilities */
25/*************/
26
27static void ath_update_txpow(struct ath9k_htc_priv *priv)
28{
29 struct ath_hw *ah = priv->ah;
30 u32 txpow;
31
32 if (priv->curtxpow != priv->txpowlimit) {
33 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
34 /* read back in case value is clamped */
35 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
36 priv->curtxpow = txpow;
37 }
38}
39
40/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
41static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
42 struct ath9k_channel *ichan)
43{
44 enum htc_phymode mode;
45
46 mode = HTC_MODE_AUTO;
47
48 switch (ichan->chanmode) {
49 case CHANNEL_G:
50 case CHANNEL_G_HT20:
51 case CHANNEL_G_HT40PLUS:
52 case CHANNEL_G_HT40MINUS:
53 mode = HTC_MODE_11NG;
54 break;
55 case CHANNEL_A:
56 case CHANNEL_A_HT20:
57 case CHANNEL_A_HT40PLUS:
58 case CHANNEL_A_HT40MINUS:
59 mode = HTC_MODE_11NA;
60 break;
61 default:
62 break;
63 }
64
65 return mode;
66}
67
68static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
69 enum ath9k_power_mode mode)
70{
71 bool ret;
72
73 mutex_lock(&priv->htc_pm_lock);
74 ret = ath9k_hw_setpower(priv->ah, mode);
75 mutex_unlock(&priv->htc_pm_lock);
76
77 return ret;
78}
79
80void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
81{
82 mutex_lock(&priv->htc_pm_lock);
83 if (++priv->ps_usecount != 1)
84 goto unlock;
85 ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
86
87unlock:
88 mutex_unlock(&priv->htc_pm_lock);
89}
90
91void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
92{
93 mutex_lock(&priv->htc_pm_lock);
94 if (--priv->ps_usecount != 0)
95 goto unlock;
96
97 if (priv->ps_idle)
98 ath9k_hw_setpower(priv->ah, ATH9K_PM_FULL_SLEEP);
99 else if (priv->ps_enabled)
100 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
101
102unlock:
103 mutex_unlock(&priv->htc_pm_lock);
104}
105
106void ath9k_ps_work(struct work_struct *work)
107{
108 struct ath9k_htc_priv *priv =
109 container_of(work, struct ath9k_htc_priv,
110 ps_work);
111 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
112
113 /* The chip wakes up after receiving the first beacon
114 while network sleep is enabled. For the driver to
115 be in sync with the hw, set the chip to awake and
116 only then set it to sleep.
117 */
118 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
119}
120
121static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
122 struct ieee80211_hw *hw,
123 struct ath9k_channel *hchan)
124{
125 struct ath_hw *ah = priv->ah;
126 struct ath_common *common = ath9k_hw_common(ah);
127 struct ieee80211_conf *conf = &common->hw->conf;
128 bool fastcc = true;
129 struct ieee80211_channel *channel = hw->conf.channel;
130 enum htc_phymode mode;
131 __be16 htc_mode;
132 u8 cmd_rsp;
133 int ret;
134
135 if (priv->op_flags & OP_INVALID)
136 return -EIO;
137
138 if (priv->op_flags & OP_FULL_RESET)
139 fastcc = false;
140
141 /* Fiddle around with fastcc later on, for now just use full reset */
142 fastcc = false;
143 ath9k_htc_ps_wakeup(priv);
144 htc_stop(priv->htc);
145 WMI_CMD(WMI_DISABLE_INTR_CMDID);
146 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
147 WMI_CMD(WMI_STOP_RECV_CMDID);
148
149 ath_print(common, ATH_DBG_CONFIG,
150 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
151 priv->ah->curchan->channel,
152 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
153
154 ret = ath9k_hw_reset(ah, hchan, fastcc);
155 if (ret) {
156 ath_print(common, ATH_DBG_FATAL,
157 "Unable to reset channel (%u Mhz) "
158 "reset status %d\n", channel->center_freq, ret);
159 goto err;
160 }
161
162 ath_update_txpow(priv);
163
164 WMI_CMD(WMI_START_RECV_CMDID);
165 if (ret)
166 goto err;
167
168 ath9k_host_rx_init(priv);
169
170 mode = ath9k_htc_get_curmode(priv, hchan);
171 htc_mode = cpu_to_be16(mode);
172 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
173 if (ret)
174 goto err;
175
176 WMI_CMD(WMI_ENABLE_INTR_CMDID);
177 if (ret)
178 goto err;
179
180 htc_start(priv->htc);
181
182 priv->op_flags &= ~OP_FULL_RESET;
183err:
184 ath9k_htc_ps_restore(priv);
185 return ret;
186}
187
188static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
189{
190 struct ath_common *common = ath9k_hw_common(priv->ah);
191 struct ath9k_htc_target_vif hvif;
192 int ret = 0;
193 u8 cmd_rsp;
194
195 if (priv->nvifs > 0)
196 return -ENOBUFS;
197
198 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
199 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
200
201 hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
202 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
203 hvif.index = priv->nvifs;
204
205 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
206 if (ret)
207 return ret;
208
209 priv->nvifs++;
210 return 0;
211}
212
213static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
214{
215 struct ath_common *common = ath9k_hw_common(priv->ah);
216 struct ath9k_htc_target_vif hvif;
217 int ret = 0;
218 u8 cmd_rsp;
219
220 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
221 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
222 hvif.index = 0; /* Should do for now */
223 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
224 priv->nvifs--;
225
226 return ret;
227}
228
229static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
230 struct ieee80211_vif *vif,
231 struct ieee80211_sta *sta)
232{
233 struct ath_common *common = ath9k_hw_common(priv->ah);
234 struct ath9k_htc_target_sta tsta;
235 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
236 struct ath9k_htc_sta *ista;
237 int ret;
238 u8 cmd_rsp;
239
240 if (priv->nstations >= ATH9K_HTC_MAX_STA)
241 return -ENOBUFS;
242
243 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
244
245 if (sta) {
246 ista = (struct ath9k_htc_sta *) sta->drv_priv;
247 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
248 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
249 tsta.associd = common->curaid;
250 tsta.is_vif_sta = 0;
251 tsta.valid = true;
252 ista->index = priv->nstations;
253 } else {
254 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
255 tsta.is_vif_sta = 1;
256 }
257
258 tsta.sta_index = priv->nstations;
259 tsta.vif_index = avp->index;
260 tsta.maxampdu = 0xffff;
261 if (sta && sta->ht_cap.ht_supported)
262 tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
263
264 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
265 if (ret) {
266 if (sta)
267 ath_print(common, ATH_DBG_FATAL,
268 "Unable to add station entry for: %pM\n", sta->addr);
269 return ret;
270 }
271
272 if (sta)
273 ath_print(common, ATH_DBG_CONFIG,
274 "Added a station entry for: %pM (idx: %d)\n",
275 sta->addr, tsta.sta_index);
276
277 priv->nstations++;
278 return 0;
279}
280
281static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
282 struct ieee80211_vif *vif,
283 struct ieee80211_sta *sta)
284{
285 struct ath_common *common = ath9k_hw_common(priv->ah);
286 struct ath9k_htc_sta *ista;
287 int ret;
288 u8 cmd_rsp, sta_idx;
289
290 if (sta) {
291 ista = (struct ath9k_htc_sta *) sta->drv_priv;
292 sta_idx = ista->index;
293 } else {
294 sta_idx = 0;
295 }
296
297 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
298 if (ret) {
299 if (sta)
300 ath_print(common, ATH_DBG_FATAL,
301 "Unable to remove station entry for: %pM\n",
302 sta->addr);
303 return ret;
304 }
305
306 if (sta)
307 ath_print(common, ATH_DBG_CONFIG,
308 "Removed a station entry for: %pM (idx: %d)\n",
309 sta->addr, sta_idx);
310
311 priv->nstations--;
312 return 0;
313}
314
315static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
316{
317 struct ath9k_htc_cap_target tcap;
318 int ret;
319 u8 cmd_rsp;
320
321 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
322
323 /* FIXME: Values are hardcoded */
324 tcap.flags = 0x240c40;
325 tcap.flags_ext = 0x80601000;
326 tcap.ampdu_limit = 0xffff0000;
327 tcap.ampdu_subframes = 20;
328 tcap.tx_chainmask_legacy = 1;
329 tcap.protmode = 1;
330 tcap.tx_chainmask = 1;
331
332 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
333
334 return ret;
335}
336
337static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
338 struct ieee80211_vif *vif,
339 struct ieee80211_sta *sta)
340{
341 struct ath_common *common = ath9k_hw_common(priv->ah);
342 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
343 struct ieee80211_supported_band *sband;
344 struct ath9k_htc_target_rate trate;
345 u32 caps = 0;
346 u8 cmd_rsp;
347 int i, j, ret;
348
349 memset(&trate, 0, sizeof(trate));
350
351 /* Only 2GHz is supported */
352 sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
353
354 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
355 if (sta->supp_rates[sband->band] & BIT(i)) {
356 priv->tgt_rate.rates.legacy_rates.rs_rates[j]
357 = (sband->bitrates[i].bitrate * 2) / 10;
358 j++;
359 }
360 }
361 priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
362
363 if (sta->ht_cap.ht_supported) {
364 for (i = 0, j = 0; i < 77; i++) {
365 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
366 priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
367 if (j == ATH_HTC_RATE_MAX)
368 break;
369 }
370 priv->tgt_rate.rates.ht_rates.rs_nrates = j;
371
372 caps = WLAN_RC_HT_FLAG;
373 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
374 caps |= WLAN_RC_40_FLAG;
375 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
376 caps |= WLAN_RC_SGI_FLAG;
377
378 }
379
380 priv->tgt_rate.sta_index = ista->index;
381 priv->tgt_rate.isnew = 1;
382 trate = priv->tgt_rate;
383 priv->tgt_rate.capflags = cpu_to_be32(caps);
384 trate.capflags = cpu_to_be32(caps);
385
386 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
387 if (ret) {
388 ath_print(common, ATH_DBG_FATAL,
389 "Unable to initialize Rate information on target\n");
390 return ret;
391 }
392
393 ath_print(common, ATH_DBG_CONFIG,
394 "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
395 return 0;
396}
397
398static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
399{
400 struct ath9k_htc_priv *priv = hw->priv;
401 struct ieee80211_conf *conf = &hw->conf;
402
403 if (!conf_is_ht(conf))
404 return false;
405
406 if (!(priv->op_flags & OP_ASSOCIATED) ||
407 (priv->op_flags & OP_SCANNING))
408 return false;
409
410 if (conf_is_ht40(conf)) {
411 if (priv->ah->curchan->chanmode &
412 (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
413 return false;
414 } else {
415 *cw40 = true;
416 return true;
417 }
418 } else { /* ht20 */
419 if (priv->ah->curchan->chanmode & CHANNEL_HT20)
420 return false;
421 else
422 return true;
423 }
424}
425
426static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
427{
428 struct ath9k_htc_target_rate trate;
429 struct ath_common *common = ath9k_hw_common(priv->ah);
430 int ret;
431 u32 caps = be32_to_cpu(priv->tgt_rate.capflags);
432 u8 cmd_rsp;
433
434 memset(&trate, 0, sizeof(trate));
435
436 trate = priv->tgt_rate;
437
438 if (is_cw40)
439 caps |= WLAN_RC_40_FLAG;
440 else
441 caps &= ~WLAN_RC_40_FLAG;
442
443 priv->tgt_rate.capflags = cpu_to_be32(caps);
444 trate.capflags = cpu_to_be32(caps);
445
446 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
447 if (ret) {
448 ath_print(common, ATH_DBG_FATAL,
449 "Unable to update Rate information on target\n");
450 return;
451 }
452
453 ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
454 "caps:0x%x on target\n", priv->tgt_rate.capflags);
455}
456
457static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
458 struct ieee80211_vif *vif,
459 u8 *sta_addr, u8 tid, bool oper)
460{
461 struct ath_common *common = ath9k_hw_common(priv->ah);
462 struct ath9k_htc_target_aggr aggr;
463 struct ieee80211_sta *sta = NULL;
464 struct ath9k_htc_sta *ista;
465 int ret = 0;
466 u8 cmd_rsp;
467
468 if (tid >= ATH9K_HTC_MAX_TID)
469 return -EINVAL;
470
471 memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
472
473 rcu_read_lock();
474
475 /* Check if we are able to retrieve the station */
476 sta = ieee80211_find_sta(vif, sta_addr);
477 if (!sta) {
478 rcu_read_unlock();
479 return -EINVAL;
480 }
481
482 ista = (struct ath9k_htc_sta *) sta->drv_priv;
483
484 if (oper)
485 ista->tid_state[tid] = AGGR_START;
486 else
487 ista->tid_state[tid] = AGGR_STOP;
488
489 aggr.sta_index = ista->index;
490
491 rcu_read_unlock();
492
493 aggr.tidno = tid;
494 aggr.aggr_enable = oper;
495
496 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
497 if (ret)
498 ath_print(common, ATH_DBG_CONFIG,
499 "Unable to %s TX aggregation for (%pM, %d)\n",
500 (oper) ? "start" : "stop", sta->addr, tid);
501 else
502 ath_print(common, ATH_DBG_CONFIG,
503 "%s aggregation for (%pM, %d)\n",
504 (oper) ? "Starting" : "Stopping", sta->addr, tid);
505
506 return ret;
507}
508
509void ath9k_htc_aggr_work(struct work_struct *work)
510{
511 int ret = 0;
512 struct ath9k_htc_priv *priv =
513 container_of(work, struct ath9k_htc_priv,
514 ath9k_aggr_work.work);
515 struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
516
517 mutex_lock(&wk->mutex);
518
519 switch (wk->action) {
520 case IEEE80211_AMPDU_TX_START:
521 ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
522 wk->tid, true);
523 if (!ret)
524 ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
525 wk->tid);
526 break;
527 case IEEE80211_AMPDU_TX_STOP:
528 ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
529 wk->tid, false);
530 ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
531 break;
532 default:
533 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
534 "Unknown AMPDU action\n");
535 }
536
537 mutex_unlock(&wk->mutex);
538}
539
540/*********/
541/* DEBUG */
542/*********/
543
544#ifdef CONFIG_ATH9K_HTC_DEBUGFS
545
546static int ath9k_debugfs_open(struct inode *inode, struct file *file)
547{
548 file->private_data = inode->i_private;
549 return 0;
550}
551
552static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
553 size_t count, loff_t *ppos)
554{
555 struct ath9k_htc_priv *priv =
556 (struct ath9k_htc_priv *) file->private_data;
557 struct ath9k_htc_target_stats cmd_rsp;
558 char buf[512];
559 unsigned int len = 0;
560 int ret = 0;
561
562 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
563
564 WMI_CMD(WMI_TGT_STATS_CMDID);
565 if (ret)
566 return -EINVAL;
567
568
569 len += snprintf(buf + len, sizeof(buf) - len,
570 "%19s : %10u\n", "TX Short Retries",
571 be32_to_cpu(cmd_rsp.tx_shortretry));
572 len += snprintf(buf + len, sizeof(buf) - len,
573 "%19s : %10u\n", "TX Long Retries",
574 be32_to_cpu(cmd_rsp.tx_longretry));
575 len += snprintf(buf + len, sizeof(buf) - len,
576 "%19s : %10u\n", "TX Xretries",
577 be32_to_cpu(cmd_rsp.tx_xretries));
578 len += snprintf(buf + len, sizeof(buf) - len,
579 "%19s : %10u\n", "TX Unaggr. Xretries",
580 be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
581 len += snprintf(buf + len, sizeof(buf) - len,
582 "%19s : %10u\n", "TX Xretries (HT)",
583 be32_to_cpu(cmd_rsp.ht_tx_xretries));
584 len += snprintf(buf + len, sizeof(buf) - len,
585 "%19s : %10u\n", "TX Rate", priv->debug.txrate);
586
587 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
588}
589
590static const struct file_operations fops_tgt_stats = {
591 .read = read_file_tgt_stats,
592 .open = ath9k_debugfs_open,
593 .owner = THIS_MODULE
594};
595
596static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
597 size_t count, loff_t *ppos)
598{
599 struct ath9k_htc_priv *priv =
600 (struct ath9k_htc_priv *) file->private_data;
601 char buf[512];
602 unsigned int len = 0;
603
604 len += snprintf(buf + len, sizeof(buf) - len,
605 "%20s : %10u\n", "Buffers queued",
606 priv->debug.tx_stats.buf_queued);
607 len += snprintf(buf + len, sizeof(buf) - len,
608 "%20s : %10u\n", "Buffers completed",
609 priv->debug.tx_stats.buf_completed);
610 len += snprintf(buf + len, sizeof(buf) - len,
611 "%20s : %10u\n", "SKBs queued",
612 priv->debug.tx_stats.skb_queued);
613 len += snprintf(buf + len, sizeof(buf) - len,
614 "%20s : %10u\n", "SKBs completed",
615 priv->debug.tx_stats.skb_completed);
616 len += snprintf(buf + len, sizeof(buf) - len,
617 "%20s : %10u\n", "SKBs dropped",
618 priv->debug.tx_stats.skb_dropped);
619
620 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
621}
622
623static const struct file_operations fops_xmit = {
624 .read = read_file_xmit,
625 .open = ath9k_debugfs_open,
626 .owner = THIS_MODULE
627};
628
629static ssize_t read_file_recv(struct file *file, char __user *user_buf,
630 size_t count, loff_t *ppos)
631{
632 struct ath9k_htc_priv *priv =
633 (struct ath9k_htc_priv *) file->private_data;
634 char buf[512];
635 unsigned int len = 0;
636
637 len += snprintf(buf + len, sizeof(buf) - len,
638 "%20s : %10u\n", "SKBs allocated",
639 priv->debug.rx_stats.skb_allocated);
640 len += snprintf(buf + len, sizeof(buf) - len,
641 "%20s : %10u\n", "SKBs completed",
642 priv->debug.rx_stats.skb_completed);
643 len += snprintf(buf + len, sizeof(buf) - len,
644 "%20s : %10u\n", "SKBs Dropped",
645 priv->debug.rx_stats.skb_dropped);
646
647 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
648}
649
650static const struct file_operations fops_recv = {
651 .read = read_file_recv,
652 .open = ath9k_debugfs_open,
653 .owner = THIS_MODULE
654};
655
656int ath9k_htc_init_debug(struct ath_hw *ah)
657{
658 struct ath_common *common = ath9k_hw_common(ah);
659 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
660
661 if (!ath9k_debugfs_root)
662 return -ENOENT;
663
664 priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
665 ath9k_debugfs_root);
666 if (!priv->debug.debugfs_phy)
667 goto err;
668
669 priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
670 priv->debug.debugfs_phy,
671 priv, &fops_tgt_stats);
672 if (!priv->debug.debugfs_tgt_stats)
673 goto err;
674
675
676 priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
677 priv->debug.debugfs_phy,
678 priv, &fops_xmit);
679 if (!priv->debug.debugfs_xmit)
680 goto err;
681
682 priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
683 priv->debug.debugfs_phy,
684 priv, &fops_recv);
685 if (!priv->debug.debugfs_recv)
686 goto err;
687
688 return 0;
689
690err:
691 ath9k_htc_exit_debug(ah);
692 return -ENOMEM;
693}
694
695void ath9k_htc_exit_debug(struct ath_hw *ah)
696{
697 struct ath_common *common = ath9k_hw_common(ah);
698 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
699
700 debugfs_remove(priv->debug.debugfs_recv);
701 debugfs_remove(priv->debug.debugfs_xmit);
702 debugfs_remove(priv->debug.debugfs_tgt_stats);
703 debugfs_remove(priv->debug.debugfs_phy);
704}
705
706int ath9k_htc_debug_create_root(void)
707{
708 ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
709 if (!ath9k_debugfs_root)
710 return -ENOENT;
711
712 return 0;
713}
714
715void ath9k_htc_debug_remove_root(void)
716{
717 debugfs_remove(ath9k_debugfs_root);
718 ath9k_debugfs_root = NULL;
719}
720
721#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
722
723/*******/
724/* ANI */
725/*******/
726
727static void ath_start_ani(struct ath9k_htc_priv *priv)
728{
729 struct ath_common *common = ath9k_hw_common(priv->ah);
730 unsigned long timestamp = jiffies_to_msecs(jiffies);
731
732 common->ani.longcal_timer = timestamp;
733 common->ani.shortcal_timer = timestamp;
734 common->ani.checkani_timer = timestamp;
735
736 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
737 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
738}
739
740void ath9k_ani_work(struct work_struct *work)
741{
742 struct ath9k_htc_priv *priv =
743 container_of(work, struct ath9k_htc_priv,
744 ath9k_ani_work.work);
745 struct ath_hw *ah = priv->ah;
746 struct ath_common *common = ath9k_hw_common(ah);
747 bool longcal = false;
748 bool shortcal = false;
749 bool aniflag = false;
750 unsigned int timestamp = jiffies_to_msecs(jiffies);
751 u32 cal_interval, short_cal_interval;
752
753 short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
754
755 /* Only calibrate if awake */
756 if (ah->power_mode != ATH9K_PM_AWAKE)
757 goto set_timer;
758
759 /* Long calibration runs independently of short calibration. */
760 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
761 longcal = true;
762 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
763 common->ani.longcal_timer = timestamp;
764 }
765
766 /* Short calibration applies only while caldone is false */
767 if (!common->ani.caldone) {
768 if ((timestamp - common->ani.shortcal_timer) >=
769 short_cal_interval) {
770 shortcal = true;
771 ath_print(common, ATH_DBG_ANI,
772 "shortcal @%lu\n", jiffies);
773 common->ani.shortcal_timer = timestamp;
774 common->ani.resetcal_timer = timestamp;
775 }
776 } else {
777 if ((timestamp - common->ani.resetcal_timer) >=
778 ATH_RESTART_CALINTERVAL) {
779 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
780 if (common->ani.caldone)
781 common->ani.resetcal_timer = timestamp;
782 }
783 }
784
785 /* Verify whether we must check ANI */
786 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
787 aniflag = true;
788 common->ani.checkani_timer = timestamp;
789 }
790
791 /* Skip all processing if there's nothing to do. */
792 if (longcal || shortcal || aniflag) {
793
794 ath9k_htc_ps_wakeup(priv);
795
796 /* Call ANI routine if necessary */
797 if (aniflag)
798 ath9k_hw_ani_monitor(ah, ah->curchan);
799
800 /* Perform calibration if necessary */
801 if (longcal || shortcal) {
802 common->ani.caldone =
803 ath9k_hw_calibrate(ah, ah->curchan,
804 common->rx_chainmask,
805 longcal);
806
807 if (longcal)
808 common->ani.noise_floor =
809 ath9k_hw_getchan_noise(ah, ah->curchan);
810
811 ath_print(common, ATH_DBG_ANI,
812 " calibrate chan %u/%x nf: %d\n",
813 ah->curchan->channel,
814 ah->curchan->channelFlags,
815 common->ani.noise_floor);
816 }
817
818 ath9k_htc_ps_restore(priv);
819 }
820
821set_timer:
822 /*
823 * Set timer interval based on previous results.
824 * The interval must be the shortest necessary to satisfy ANI,
825 * short calibration and long calibration.
826 */
827 cal_interval = ATH_LONG_CALINTERVAL;
828 if (priv->ah->config.enable_ani)
829 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
830 if (!common->ani.caldone)
831 cal_interval = min(cal_interval, (u32)short_cal_interval);
832
833 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
834 msecs_to_jiffies(cal_interval));
835}
836
837/*******/
838/* LED */
839/*******/
840
841static void ath9k_led_blink_work(struct work_struct *work)
842{
843 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
844 ath9k_led_blink_work.work);
845
846 if (!(priv->op_flags & OP_LED_ASSOCIATED))
847 return;
848
849 if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
850 (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
851 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
852 else
853 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
854 (priv->op_flags & OP_LED_ON) ? 1 : 0);
855
856 ieee80211_queue_delayed_work(priv->hw,
857 &priv->ath9k_led_blink_work,
858 (priv->op_flags & OP_LED_ON) ?
859 msecs_to_jiffies(priv->led_off_duration) :
860 msecs_to_jiffies(priv->led_on_duration));
861
862 priv->led_on_duration = priv->led_on_cnt ?
863 max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
864 ATH_LED_ON_DURATION_IDLE;
865 priv->led_off_duration = priv->led_off_cnt ?
866 max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
867 ATH_LED_OFF_DURATION_IDLE;
868 priv->led_on_cnt = priv->led_off_cnt = 0;
869
870 if (priv->op_flags & OP_LED_ON)
871 priv->op_flags &= ~OP_LED_ON;
872 else
873 priv->op_flags |= OP_LED_ON;
874}
875
876static void ath9k_led_brightness_work(struct work_struct *work)
877{
878 struct ath_led *led = container_of(work, struct ath_led,
879 brightness_work.work);
880 struct ath9k_htc_priv *priv = led->priv;
881
882 switch (led->brightness) {
883 case LED_OFF:
884 if (led->led_type == ATH_LED_ASSOC ||
885 led->led_type == ATH_LED_RADIO) {
886 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
887 (led->led_type == ATH_LED_RADIO));
888 priv->op_flags &= ~OP_LED_ASSOCIATED;
889 if (led->led_type == ATH_LED_RADIO)
890 priv->op_flags &= ~OP_LED_ON;
891 } else {
892 priv->led_off_cnt++;
893 }
894 break;
895 case LED_FULL:
896 if (led->led_type == ATH_LED_ASSOC) {
897 priv->op_flags |= OP_LED_ASSOCIATED;
898 ieee80211_queue_delayed_work(priv->hw,
899 &priv->ath9k_led_blink_work, 0);
900 } else if (led->led_type == ATH_LED_RADIO) {
901 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
902 priv->op_flags |= OP_LED_ON;
903 } else {
904 priv->led_on_cnt++;
905 }
906 break;
907 default:
908 break;
909 }
910}
911
912static void ath9k_led_brightness(struct led_classdev *led_cdev,
913 enum led_brightness brightness)
914{
915 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
916 struct ath9k_htc_priv *priv = led->priv;
917
918 led->brightness = brightness;
919 if (!(priv->op_flags & OP_LED_DEINIT))
920 ieee80211_queue_delayed_work(priv->hw,
921 &led->brightness_work, 0);
922}
923
924static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
925{
926 cancel_delayed_work_sync(&priv->radio_led.brightness_work);
927 cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
928 cancel_delayed_work_sync(&priv->tx_led.brightness_work);
929 cancel_delayed_work_sync(&priv->rx_led.brightness_work);
930}
931
932static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
933 char *trigger)
934{
935 int ret;
936
937 led->priv = priv;
938 led->led_cdev.name = led->name;
939 led->led_cdev.default_trigger = trigger;
940 led->led_cdev.brightness_set = ath9k_led_brightness;
941
942 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
943 if (ret)
944 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
945 "Failed to register led:%s", led->name);
946 else
947 led->registered = 1;
948
949 INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
950
951 return ret;
952}
953
954static void ath9k_unregister_led(struct ath_led *led)
955{
956 if (led->registered) {
957 led_classdev_unregister(&led->led_cdev);
958 led->registered = 0;
959 }
960}
961
962void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
963{
964 priv->op_flags |= OP_LED_DEINIT;
965 ath9k_unregister_led(&priv->assoc_led);
966 priv->op_flags &= ~OP_LED_ASSOCIATED;
967 ath9k_unregister_led(&priv->tx_led);
968 ath9k_unregister_led(&priv->rx_led);
969 ath9k_unregister_led(&priv->radio_led);
970}
971
972void ath9k_init_leds(struct ath9k_htc_priv *priv)
973{
974 char *trigger;
975 int ret;
976
977 if (AR_SREV_9287(priv->ah))
978 priv->ah->led_pin = ATH_LED_PIN_9287;
979 else if (AR_SREV_9271(priv->ah))
980 priv->ah->led_pin = ATH_LED_PIN_9271;
981 else
982 priv->ah->led_pin = ATH_LED_PIN_DEF;
983
984 /* Configure gpio 1 for output */
985 ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
986 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
987 /* LED off, active low */
988 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
989
990 INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
991
992 trigger = ieee80211_get_radio_led_name(priv->hw);
993 snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
994 "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
995 ret = ath9k_register_led(priv, &priv->radio_led, trigger);
996 priv->radio_led.led_type = ATH_LED_RADIO;
997 if (ret)
998 goto fail;
999
1000 trigger = ieee80211_get_assoc_led_name(priv->hw);
1001 snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
1002 "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
1003 ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
1004 priv->assoc_led.led_type = ATH_LED_ASSOC;
1005 if (ret)
1006 goto fail;
1007
1008 trigger = ieee80211_get_tx_led_name(priv->hw);
1009 snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
1010 "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
1011 ret = ath9k_register_led(priv, &priv->tx_led, trigger);
1012 priv->tx_led.led_type = ATH_LED_TX;
1013 if (ret)
1014 goto fail;
1015
1016 trigger = ieee80211_get_rx_led_name(priv->hw);
1017 snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
1018 "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
1019 ret = ath9k_register_led(priv, &priv->rx_led, trigger);
1020 priv->rx_led.led_type = ATH_LED_RX;
1021 if (ret)
1022 goto fail;
1023
1024 priv->op_flags &= ~OP_LED_DEINIT;
1025
1026 return;
1027
1028fail:
1029 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1030 ath9k_deinit_leds(priv);
1031}
1032
1033/*******************/
1034/* Rfkill */
1035/*******************/
1036
1037static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
1038{
1039 return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
1040 priv->ah->rfkill_polarity;
1041}
1042
1043static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
1044{
1045 struct ath9k_htc_priv *priv = hw->priv;
1046 bool blocked = !!ath_is_rfkill_set(priv);
1047
1048 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1049}
1050
1051void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
1052{
1053 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1054 wiphy_rfkill_start_polling(priv->hw->wiphy);
1055}
1056
1057/**********************/
1058/* mac80211 Callbacks */
1059/**********************/
1060
1061static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1062{
1063 struct ieee80211_hdr *hdr;
1064 struct ath9k_htc_priv *priv = hw->priv;
1065 int padpos, padsize, ret;
1066
1067 hdr = (struct ieee80211_hdr *) skb->data;
1068
1069 /* Add the padding after the header if this is not already done */
1070 padpos = ath9k_cmn_padpos(hdr->frame_control);
1071 padsize = padpos & 3;
1072 if (padsize && skb->len > padpos) {
1073 if (skb_headroom(skb) < padsize)
1074 return -1;
1075 skb_push(skb, padsize);
1076 memmove(skb->data, skb->data + padsize, padpos);
1077 }
1078
1079 ret = ath9k_htc_tx_start(priv, skb);
1080 if (ret != 0) {
1081 if (ret == -ENOMEM) {
1082 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1083 "Stopping TX queues\n");
1084 ieee80211_stop_queues(hw);
1085 spin_lock_bh(&priv->tx_lock);
1086 priv->tx_queues_stop = true;
1087 spin_unlock_bh(&priv->tx_lock);
1088 } else {
1089 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1090 "Tx failed");
1091 }
1092 goto fail_tx;
1093 }
1094
1095 return 0;
1096
1097fail_tx:
1098 dev_kfree_skb_any(skb);
1099 return 0;
1100}
1101
1102static int ath9k_htc_radio_enable(struct ieee80211_hw *hw, bool led)
1103{
1104 struct ath9k_htc_priv *priv = hw->priv;
1105 struct ath_hw *ah = priv->ah;
1106 struct ath_common *common = ath9k_hw_common(ah);
1107 struct ieee80211_channel *curchan = hw->conf.channel;
1108 struct ath9k_channel *init_channel;
1109 int ret = 0;
1110 enum htc_phymode mode;
1111 __be16 htc_mode;
1112 u8 cmd_rsp;
1113
1114 ath_print(common, ATH_DBG_CONFIG,
1115 "Starting driver with initial channel: %d MHz\n",
1116 curchan->center_freq);
1117
1118 /* setup initial channel */
1119 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1120
1121 /* Reset SERDES registers */
1122 ath9k_hw_configpcipowersave(ah, 0, 0);
1123
1124 ath9k_hw_htc_resetinit(ah);
1125 ret = ath9k_hw_reset(ah, init_channel, false);
1126 if (ret) {
1127 ath_print(common, ATH_DBG_FATAL,
1128 "Unable to reset hardware; reset status %d "
1129 "(freq %u MHz)\n", ret, curchan->center_freq);
1130 return ret;
1131 }
1132
1133 ath_update_txpow(priv);
1134
1135 mode = ath9k_htc_get_curmode(priv, init_channel);
1136 htc_mode = cpu_to_be16(mode);
1137 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
1138 WMI_CMD(WMI_ATH_INIT_CMDID);
1139 WMI_CMD(WMI_START_RECV_CMDID);
1140
1141 ath9k_host_rx_init(priv);
1142
1143 priv->op_flags &= ~OP_INVALID;
1144 htc_start(priv->htc);
1145
1146 spin_lock_bh(&priv->tx_lock);
1147 priv->tx_queues_stop = false;
1148 spin_unlock_bh(&priv->tx_lock);
1149
1150 if (led) {
1151 /* Enable LED */
1152 ath9k_hw_cfg_output(ah, ah->led_pin,
1153 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1154 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1155 }
1156
1157 ieee80211_wake_queues(hw);
1158
1159 return ret;
1160}
1161
1162static int ath9k_htc_start(struct ieee80211_hw *hw)
1163{
1164 struct ath9k_htc_priv *priv = hw->priv;
1165 int ret = 0;
1166
1167 mutex_lock(&priv->mutex);
1168 ret = ath9k_htc_radio_enable(hw, false);
1169 mutex_unlock(&priv->mutex);
1170
1171 return ret;
1172}
1173
1174static void ath9k_htc_radio_disable(struct ieee80211_hw *hw, bool led)
1175{
1176 struct ath9k_htc_priv *priv = hw->priv;
1177 struct ath_hw *ah = priv->ah;
1178 struct ath_common *common = ath9k_hw_common(ah);
1179 int ret = 0;
1180 u8 cmd_rsp;
1181
1182 if (priv->op_flags & OP_INVALID) {
1183 ath_print(common, ATH_DBG_ANY, "Device not present\n");
1184 return;
1185 }
1186
1187 if (led) {
1188 /* Disable LED */
1189 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1190 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1191 }
1192
1193 /* Cancel all the running timers/work .. */
1194 cancel_work_sync(&priv->ps_work);
1195 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1196 cancel_delayed_work_sync(&priv->ath9k_aggr_work);
1197 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1198 ath9k_led_stop_brightness(priv);
1199
1200 ath9k_htc_ps_wakeup(priv);
1201 htc_stop(priv->htc);
1202 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1203 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1204 WMI_CMD(WMI_STOP_RECV_CMDID);
1205 ath9k_hw_phy_disable(ah);
1206 ath9k_hw_disable(ah);
1207 ath9k_hw_configpcipowersave(ah, 1, 1);
1208 ath9k_htc_ps_restore(priv);
1209 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1210
1211 skb_queue_purge(&priv->tx_queue);
1212
1213 /* Remove monitor interface here */
1214 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1215 if (ath9k_htc_remove_monitor_interface(priv))
1216 ath_print(common, ATH_DBG_FATAL,
1217 "Unable to remove monitor interface\n");
1218 else
1219 ath_print(common, ATH_DBG_CONFIG,
1220 "Monitor interface removed\n");
1221 }
1222
1223 priv->op_flags |= OP_INVALID;
1224
1225 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
1226}
1227
1228static void ath9k_htc_stop(struct ieee80211_hw *hw)
1229{
1230 struct ath9k_htc_priv *priv = hw->priv;
1231
1232 mutex_lock(&priv->mutex);
1233 ath9k_htc_radio_disable(hw, false);
1234 mutex_unlock(&priv->mutex);
1235}
1236
1237
1238static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1239 struct ieee80211_vif *vif)
1240{
1241 struct ath9k_htc_priv *priv = hw->priv;
1242 struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
1243 struct ath_common *common = ath9k_hw_common(priv->ah);
1244 struct ath9k_htc_target_vif hvif;
1245 int ret = 0;
1246 u8 cmd_rsp;
1247
1248 mutex_lock(&priv->mutex);
1249
1250 /* Only one interface for now */
1251 if (priv->nvifs > 0) {
1252 ret = -ENOBUFS;
1253 goto out;
1254 }
1255
1256 ath9k_htc_ps_wakeup(priv);
1257 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1258 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
1259
1260 switch (vif->type) {
1261 case NL80211_IFTYPE_STATION:
1262 hvif.opmode = cpu_to_be32(HTC_M_STA);
1263 break;
1264 case NL80211_IFTYPE_ADHOC:
1265 hvif.opmode = cpu_to_be32(HTC_M_IBSS);
1266 break;
1267 default:
1268 ath_print(common, ATH_DBG_FATAL,
1269 "Interface type %d not yet supported\n", vif->type);
1270 ret = -EOPNOTSUPP;
1271 goto out;
1272 }
1273
1274 ath_print(common, ATH_DBG_CONFIG,
1275 "Attach a VIF of type: %d\n", vif->type);
1276
1277 priv->ah->opmode = vif->type;
1278
1279 /* Index starts from zero on the target */
1280 avp->index = hvif.index = priv->nvifs;
1281 hvif.rtsthreshold = cpu_to_be16(2304);
1282 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
1283 if (ret)
1284 goto out;
1285
1286 priv->nvifs++;
1287
1288 /*
1289 * We need a node in target to tx mgmt frames
1290 * before association.
1291 */
1292 ret = ath9k_htc_add_station(priv, vif, NULL);
1293 if (ret)
1294 goto out;
1295
1296 ret = ath9k_htc_update_cap_target(priv);
1297 if (ret)
1298 ath_print(common, ATH_DBG_CONFIG, "Failed to update"
1299 " capability in target \n");
1300
1301 priv->vif = vif;
1302out:
1303 ath9k_htc_ps_restore(priv);
1304 mutex_unlock(&priv->mutex);
1305 return ret;
1306}
1307
1308static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1309 struct ieee80211_vif *vif)
1310{
1311 struct ath9k_htc_priv *priv = hw->priv;
1312 struct ath_common *common = ath9k_hw_common(priv->ah);
1313 struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
1314 struct ath9k_htc_target_vif hvif;
1315 int ret = 0;
1316 u8 cmd_rsp;
1317
1318 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
1319
1320 mutex_lock(&priv->mutex);
1321
1322 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1323 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
1324 hvif.index = avp->index;
1325 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1326 priv->nvifs--;
1327
1328 ath9k_htc_remove_station(priv, vif, NULL);
1329 priv->vif = NULL;
1330
1331 mutex_unlock(&priv->mutex);
1332}
1333
1334static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1335{
1336 struct ath9k_htc_priv *priv = hw->priv;
1337 struct ath_common *common = ath9k_hw_common(priv->ah);
1338 struct ieee80211_conf *conf = &hw->conf;
1339
1340 mutex_lock(&priv->mutex);
1341
1342 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1343 bool enable_radio = false;
1344 bool idle = !!(conf->flags & IEEE80211_CONF_IDLE);
1345
1346 if (!idle && priv->ps_idle)
1347 enable_radio = true;
1348
1349 priv->ps_idle = idle;
1350
1351 if (enable_radio) {
1352 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1353 ath9k_htc_radio_enable(hw, true);
1354 ath_print(common, ATH_DBG_CONFIG,
1355 "not-idle: enabling radio\n");
1356 }
1357 }
1358
1359 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1360 struct ieee80211_channel *curchan = hw->conf.channel;
1361 int pos = curchan->hw_value;
1362 bool is_cw40 = false;
1363
1364 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1365 curchan->center_freq);
1366
1367 if (check_rc_update(hw, &is_cw40))
1368 ath9k_htc_rc_update(priv, is_cw40);
1369
1370 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
1371
1372 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1373 ath_print(common, ATH_DBG_FATAL,
1374 "Unable to set channel\n");
1375 mutex_unlock(&priv->mutex);
1376 return -EINVAL;
1377 }
1378
1379 }
1380 if (changed & IEEE80211_CONF_CHANGE_PS) {
1381 if (conf->flags & IEEE80211_CONF_PS) {
1382 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
1383 priv->ps_enabled = true;
1384 } else {
1385 priv->ps_enabled = false;
1386 cancel_work_sync(&priv->ps_work);
1387 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1388 }
1389 }
1390
1391 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1392 if (conf->flags & IEEE80211_CONF_MONITOR) {
1393 if (ath9k_htc_add_monitor_interface(priv))
1394 ath_print(common, ATH_DBG_FATAL,
1395 "Failed to set monitor mode\n");
1396 else
1397 ath_print(common, ATH_DBG_CONFIG,
1398 "HW opmode set to Monitor mode\n");
1399 }
1400 }
1401
1402 if (priv->ps_idle) {
1403 ath_print(common, ATH_DBG_CONFIG,
1404 "idle: disabling radio\n");
1405 ath9k_htc_radio_disable(hw, true);
1406 }
1407
1408 mutex_unlock(&priv->mutex);
1409
1410 return 0;
1411}
1412
1413#define SUPPORTED_FILTERS \
1414 (FIF_PROMISC_IN_BSS | \
1415 FIF_ALLMULTI | \
1416 FIF_CONTROL | \
1417 FIF_PSPOLL | \
1418 FIF_OTHER_BSS | \
1419 FIF_BCN_PRBRESP_PROMISC | \
1420 FIF_FCSFAIL)
1421
1422static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1423 unsigned int changed_flags,
1424 unsigned int *total_flags,
1425 u64 multicast)
1426{
1427 struct ath9k_htc_priv *priv = hw->priv;
1428 u32 rfilt;
1429
1430 mutex_lock(&priv->mutex);
1431
1432 ath9k_htc_ps_wakeup(priv);
1433 changed_flags &= SUPPORTED_FILTERS;
1434 *total_flags &= SUPPORTED_FILTERS;
1435
1436 priv->rxfilter = *total_flags;
1437 rfilt = ath9k_htc_calcrxfilter(priv);
1438 ath9k_hw_setrxfilter(priv->ah, rfilt);
1439
1440 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
1441 "Set HW RX filter: 0x%x\n", rfilt);
1442
1443 ath9k_htc_ps_restore(priv);
1444 mutex_unlock(&priv->mutex);
1445}
1446
1447static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
1448 struct ieee80211_vif *vif,
1449 enum sta_notify_cmd cmd,
1450 struct ieee80211_sta *sta)
1451{
1452 struct ath9k_htc_priv *priv = hw->priv;
1453 int ret;
1454
1455 mutex_lock(&priv->mutex);
1456
1457 switch (cmd) {
1458 case STA_NOTIFY_ADD:
1459 ret = ath9k_htc_add_station(priv, vif, sta);
1460 if (!ret)
1461 ath9k_htc_init_rate(priv, vif, sta);
1462 break;
1463 case STA_NOTIFY_REMOVE:
1464 ath9k_htc_remove_station(priv, vif, sta);
1465 break;
1466 default:
1467 break;
1468 }
1469
1470 mutex_unlock(&priv->mutex);
1471}
1472
1473static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
1474 const struct ieee80211_tx_queue_params *params)
1475{
1476 struct ath9k_htc_priv *priv = hw->priv;
1477 struct ath_common *common = ath9k_hw_common(priv->ah);
1478 struct ath9k_tx_queue_info qi;
1479 int ret = 0, qnum;
1480
1481 if (queue >= WME_NUM_AC)
1482 return 0;
1483
1484 mutex_lock(&priv->mutex);
1485
1486 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
1487
1488 qi.tqi_aifs = params->aifs;
1489 qi.tqi_cwmin = params->cw_min;
1490 qi.tqi_cwmax = params->cw_max;
1491 qi.tqi_burstTime = params->txop;
1492
1493 qnum = get_hw_qnum(queue, priv->hwq_map);
1494
1495 ath_print(common, ATH_DBG_CONFIG,
1496 "Configure tx [queue/hwq] [%d/%d], "
1497 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1498 queue, qnum, params->aifs, params->cw_min,
1499 params->cw_max, params->txop);
1500
1501 ret = ath_htc_txq_update(priv, qnum, &qi);
1502 if (ret)
1503 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
1504
1505 mutex_unlock(&priv->mutex);
1506
1507 return ret;
1508}
1509
1510static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1511 enum set_key_cmd cmd,
1512 struct ieee80211_vif *vif,
1513 struct ieee80211_sta *sta,
1514 struct ieee80211_key_conf *key)
1515{
1516 struct ath9k_htc_priv *priv = hw->priv;
1517 struct ath_common *common = ath9k_hw_common(priv->ah);
1518 int ret = 0;
1519
1520 if (htc_modparam_nohwcrypt)
1521 return -ENOSPC;
1522
1523 mutex_lock(&priv->mutex);
1524 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
1525 ath9k_htc_ps_wakeup(priv);
1526
1527 switch (cmd) {
1528 case SET_KEY:
1529 ret = ath9k_cmn_key_config(common, vif, sta, key);
1530 if (ret >= 0) {
1531 key->hw_key_idx = ret;
1532 /* push IV and Michael MIC generation to stack */
1533 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1534 if (key->alg == ALG_TKIP)
1535 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1536 if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
1537 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1538 ret = 0;
1539 }
1540 break;
1541 case DISABLE_KEY:
1542 ath9k_cmn_key_delete(common, key);
1543 break;
1544 default:
1545 ret = -EINVAL;
1546 }
1547
1548 ath9k_htc_ps_restore(priv);
1549 mutex_unlock(&priv->mutex);
1550
1551 return ret;
1552}
1553
1554static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1555 struct ieee80211_vif *vif,
1556 struct ieee80211_bss_conf *bss_conf,
1557 u32 changed)
1558{
1559 struct ath9k_htc_priv *priv = hw->priv;
1560 struct ath_hw *ah = priv->ah;
1561 struct ath_common *common = ath9k_hw_common(ah);
1562
1563 mutex_lock(&priv->mutex);
1564 ath9k_htc_ps_wakeup(priv);
1565
1566 if (changed & BSS_CHANGED_ASSOC) {
1567 common->curaid = bss_conf->assoc ?
1568 bss_conf->aid : 0;
1569 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1570 bss_conf->assoc);
1571
1572 if (bss_conf->assoc) {
1573 priv->op_flags |= OP_ASSOCIATED;
1574 ath_start_ani(priv);
1575 } else {
1576 priv->op_flags &= ~OP_ASSOCIATED;
1577 cancel_work_sync(&priv->ps_work);
1578 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1579 }
1580 }
1581
1582 if (changed & BSS_CHANGED_BSSID) {
1583 /* Set BSSID */
1584 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1585 ath9k_hw_write_associd(ah);
1586
1587 ath_print(common, ATH_DBG_CONFIG,
1588 "BSSID: %pM aid: 0x%x\n",
1589 common->curbssid, common->curaid);
1590 }
1591
1592 if ((changed & BSS_CHANGED_BEACON_INT) ||
1593 (changed & BSS_CHANGED_BEACON) ||
1594 ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1595 bss_conf->enable_beacon)) {
1596 priv->op_flags |= OP_ENABLE_BEACON;
1597 ath9k_htc_beacon_config(priv, vif);
1598 }
1599
1600 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1601 !bss_conf->enable_beacon) {
1602 priv->op_flags &= ~OP_ENABLE_BEACON;
1603 ath9k_htc_beacon_config(priv, vif);
1604 }
1605
1606 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1607 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
1608 bss_conf->use_short_preamble);
1609 if (bss_conf->use_short_preamble)
1610 priv->op_flags |= OP_PREAMBLE_SHORT;
1611 else
1612 priv->op_flags &= ~OP_PREAMBLE_SHORT;
1613 }
1614
1615 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1616 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
1617 bss_conf->use_cts_prot);
1618 if (bss_conf->use_cts_prot &&
1619 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
1620 priv->op_flags |= OP_PROTECT_ENABLE;
1621 else
1622 priv->op_flags &= ~OP_PROTECT_ENABLE;
1623 }
1624
1625 if (changed & BSS_CHANGED_ERP_SLOT) {
1626 if (bss_conf->use_short_slot)
1627 ah->slottime = 9;
1628 else
1629 ah->slottime = 20;
1630
1631 ath9k_hw_init_global_settings(ah);
1632 }
1633
1634 ath9k_htc_ps_restore(priv);
1635 mutex_unlock(&priv->mutex);
1636}
1637
1638static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
1639{
1640 struct ath9k_htc_priv *priv = hw->priv;
1641 u64 tsf;
1642
1643 mutex_lock(&priv->mutex);
1644 tsf = ath9k_hw_gettsf64(priv->ah);
1645 mutex_unlock(&priv->mutex);
1646
1647 return tsf;
1648}
1649
1650static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1651{
1652 struct ath9k_htc_priv *priv = hw->priv;
1653
1654 mutex_lock(&priv->mutex);
1655 ath9k_hw_settsf64(priv->ah, tsf);
1656 mutex_unlock(&priv->mutex);
1657}
1658
1659static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
1660{
1661 struct ath9k_htc_priv *priv = hw->priv;
1662
1663 ath9k_htc_ps_wakeup(priv);
1664 mutex_lock(&priv->mutex);
1665 ath9k_hw_reset_tsf(priv->ah);
1666 mutex_unlock(&priv->mutex);
1667 ath9k_htc_ps_restore(priv);
1668}
1669
1670static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1671 struct ieee80211_vif *vif,
1672 enum ieee80211_ampdu_mlme_action action,
1673 struct ieee80211_sta *sta,
1674 u16 tid, u16 *ssn)
1675{
1676 struct ath9k_htc_priv *priv = hw->priv;
1677 struct ath9k_htc_aggr_work *work = &priv->aggr_work;
1678 struct ath9k_htc_sta *ista;
1679
1680 switch (action) {
1681 case IEEE80211_AMPDU_RX_START:
1682 break;
1683 case IEEE80211_AMPDU_RX_STOP:
1684 break;
1685 case IEEE80211_AMPDU_TX_START:
1686 case IEEE80211_AMPDU_TX_STOP:
1687 if (!(priv->op_flags & OP_TXAGGR))
1688 return -ENOTSUPP;
1689 memcpy(work->sta_addr, sta->addr, ETH_ALEN);
1690 work->hw = hw;
1691 work->vif = vif;
1692 work->action = action;
1693 work->tid = tid;
1694 ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
1695 break;
1696 case IEEE80211_AMPDU_TX_OPERATIONAL:
1697 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1698 ista->tid_state[tid] = AGGR_OPERATIONAL;
1699 break;
1700 default:
1701 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
1702 "Unknown AMPDU action\n");
1703 }
1704
1705 return 0;
1706}
1707
1708static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1709{
1710 struct ath9k_htc_priv *priv = hw->priv;
1711
1712 mutex_lock(&priv->mutex);
1713 spin_lock_bh(&priv->beacon_lock);
1714 priv->op_flags |= OP_SCANNING;
1715 spin_unlock_bh(&priv->beacon_lock);
1716 cancel_work_sync(&priv->ps_work);
1717 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1718 mutex_unlock(&priv->mutex);
1719}
1720
1721static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1722{
1723 struct ath9k_htc_priv *priv = hw->priv;
1724
1725 ath9k_htc_ps_wakeup(priv);
1726 mutex_lock(&priv->mutex);
1727 spin_lock_bh(&priv->beacon_lock);
1728 priv->op_flags &= ~OP_SCANNING;
1729 spin_unlock_bh(&priv->beacon_lock);
1730 priv->op_flags |= OP_FULL_RESET;
1731 if (priv->op_flags & OP_ASSOCIATED)
1732 ath9k_htc_beacon_config(priv, priv->vif);
1733 ath_start_ani(priv);
1734 mutex_unlock(&priv->mutex);
1735 ath9k_htc_ps_restore(priv);
1736}
1737
1738static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1739{
1740 return 0;
1741}
1742
1743static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
1744 u8 coverage_class)
1745{
1746 struct ath9k_htc_priv *priv = hw->priv;
1747
1748 mutex_lock(&priv->mutex);
1749 priv->ah->coverage_class = coverage_class;
1750 ath9k_hw_init_global_settings(priv->ah);
1751 mutex_unlock(&priv->mutex);
1752}
1753
1754struct ieee80211_ops ath9k_htc_ops = {
1755 .tx = ath9k_htc_tx,
1756 .start = ath9k_htc_start,
1757 .stop = ath9k_htc_stop,
1758 .add_interface = ath9k_htc_add_interface,
1759 .remove_interface = ath9k_htc_remove_interface,
1760 .config = ath9k_htc_config,
1761 .configure_filter = ath9k_htc_configure_filter,
1762 .sta_notify = ath9k_htc_sta_notify,
1763 .conf_tx = ath9k_htc_conf_tx,
1764 .bss_info_changed = ath9k_htc_bss_info_changed,
1765 .set_key = ath9k_htc_set_key,
1766 .get_tsf = ath9k_htc_get_tsf,
1767 .set_tsf = ath9k_htc_set_tsf,
1768 .reset_tsf = ath9k_htc_reset_tsf,
1769 .ampdu_action = ath9k_htc_ampdu_action,
1770 .sw_scan_start = ath9k_htc_sw_scan_start,
1771 .sw_scan_complete = ath9k_htc_sw_scan_complete,
1772 .set_rts_threshold = ath9k_htc_set_rts_threshold,
1773 .rfkill_poll = ath9k_htc_rfkill_poll_state,
1774 .set_coverage_class = ath9k_htc_set_coverage_class,
1775};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 000000000000..2571b443ac82
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,707 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19/******/
20/* TX */
21/******/
22
23int get_hw_qnum(u16 queue, int *hwq_map)
24{
25 switch (queue) {
26 case 0:
27 return hwq_map[ATH9K_WME_AC_VO];
28 case 1:
29 return hwq_map[ATH9K_WME_AC_VI];
30 case 2:
31 return hwq_map[ATH9K_WME_AC_BE];
32 case 3:
33 return hwq_map[ATH9K_WME_AC_BK];
34 default:
35 return hwq_map[ATH9K_WME_AC_BE];
36 }
37}
38
39int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
40 struct ath9k_tx_queue_info *qinfo)
41{
42 struct ath_hw *ah = priv->ah;
43 int error = 0;
44 struct ath9k_tx_queue_info qi;
45
46 ath9k_hw_get_txq_props(ah, qnum, &qi);
47
48 qi.tqi_aifs = qinfo->tqi_aifs;
49 qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
50 qi.tqi_cwmax = qinfo->tqi_cwmax;
51 qi.tqi_burstTime = qinfo->tqi_burstTime;
52 qi.tqi_readyTime = qinfo->tqi_readyTime;
53
54 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
55 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
56 "Unable to update hardware queue %u!\n", qnum);
57 error = -EIO;
58 } else {
59 ath9k_hw_resettxqueue(ah, qnum);
60 }
61
62 return error;
63}
64
65int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
66{
67 struct ieee80211_hdr *hdr;
68 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
69 struct ieee80211_sta *sta = tx_info->control.sta;
70 struct ath9k_htc_sta *ista;
71 struct ath9k_htc_vif *avp;
72 struct ath9k_htc_tx_ctl tx_ctl;
73 enum htc_endpoint_id epid;
74 u16 qnum, hw_qnum;
75 __le16 fc;
76 u8 *tx_fhdr;
77 u8 sta_idx;
78
79 hdr = (struct ieee80211_hdr *) skb->data;
80 fc = hdr->frame_control;
81
82 avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
83 if (sta) {
84 ista = (struct ath9k_htc_sta *) sta->drv_priv;
85 sta_idx = ista->index;
86 } else {
87 sta_idx = 0;
88 }
89
90 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
91
92 if (ieee80211_is_data(fc)) {
93 struct tx_frame_hdr tx_hdr;
94 u8 *qc;
95
96 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
97
98 tx_hdr.node_idx = sta_idx;
99 tx_hdr.vif_idx = avp->index;
100
101 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
102 tx_ctl.type = ATH9K_HTC_AMPDU;
103 tx_hdr.data_type = ATH9K_HTC_AMPDU;
104 } else {
105 tx_ctl.type = ATH9K_HTC_NORMAL;
106 tx_hdr.data_type = ATH9K_HTC_NORMAL;
107 }
108
109 if (ieee80211_is_data(fc)) {
110 qc = ieee80211_get_qos_ctl(hdr);
111 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
112 }
113
114 /* Check for RTS protection */
115 if (priv->hw->wiphy->rts_threshold != (u32) -1)
116 if (skb->len > priv->hw->wiphy->rts_threshold)
117 tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
118
119 /* CTS-to-self */
120 if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
121 (priv->op_flags & OP_PROTECT_ENABLE))
122 tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
123
124 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
125 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
126 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
127 else
128 tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
129
130 tx_fhdr = skb_push(skb, sizeof(tx_hdr));
131 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
132
133 qnum = skb_get_queue_mapping(skb);
134 hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
135
136 switch (hw_qnum) {
137 case 0:
138 epid = priv->data_be_ep;
139 break;
140 case 2:
141 epid = priv->data_vi_ep;
142 break;
143 case 3:
144 epid = priv->data_vo_ep;
145 break;
146 case 1:
147 default:
148 epid = priv->data_bk_ep;
149 break;
150 }
151 } else {
152 struct tx_mgmt_hdr mgmt_hdr;
153
154 memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
155
156 tx_ctl.type = ATH9K_HTC_NORMAL;
157
158 mgmt_hdr.node_idx = sta_idx;
159 mgmt_hdr.vif_idx = avp->index;
160 mgmt_hdr.tidno = 0;
161 mgmt_hdr.flags = 0;
162
163 mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
164 if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
165 mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
166 else
167 mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
168
169 tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
170 memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
171 epid = priv->mgmt_ep;
172 }
173
174 return htc_send(priv->htc, skb, epid, &tx_ctl);
175}
176
177void ath9k_tx_tasklet(unsigned long data)
178{
179 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
180 struct ieee80211_sta *sta;
181 struct ieee80211_hdr *hdr;
182 struct ieee80211_tx_info *tx_info;
183 struct sk_buff *skb = NULL;
184 __le16 fc;
185
186 while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
187
188 hdr = (struct ieee80211_hdr *) skb->data;
189 fc = hdr->frame_control;
190 tx_info = IEEE80211_SKB_CB(skb);
191
192 memset(&tx_info->status, 0, sizeof(tx_info->status));
193
194 rcu_read_lock();
195
196 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
197 if (!sta) {
198 rcu_read_unlock();
199 ieee80211_tx_status(priv->hw, skb);
200 continue;
201 }
202
203 /* Check if we need to start aggregation */
204
205 if (sta && conf_is_ht(&priv->hw->conf) &&
206 (priv->op_flags & OP_TXAGGR)
207 && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
208 if (ieee80211_is_data_qos(fc)) {
209 u8 *qc, tid;
210 struct ath9k_htc_sta *ista;
211
212 qc = ieee80211_get_qos_ctl(hdr);
213 tid = qc[0] & 0xf;
214 ista = (struct ath9k_htc_sta *)sta->drv_priv;
215
216 if ((tid < ATH9K_HTC_MAX_TID) &&
217 ista->tid_state[tid] == AGGR_STOP) {
218 ieee80211_start_tx_ba_session(sta, tid);
219 ista->tid_state[tid] = AGGR_PROGRESS;
220 }
221 }
222 }
223
224 rcu_read_unlock();
225
226 /* Send status to mac80211 */
227 ieee80211_tx_status(priv->hw, skb);
228 }
229
230 /* Wake TX queues if needed */
231 spin_lock_bh(&priv->tx_lock);
232 if (priv->tx_queues_stop) {
233 priv->tx_queues_stop = false;
234 spin_unlock_bh(&priv->tx_lock);
235 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
236 "Waking up TX queues\n");
237 ieee80211_wake_queues(priv->hw);
238 return;
239 }
240 spin_unlock_bh(&priv->tx_lock);
241}
242
243void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
244 enum htc_endpoint_id ep_id, bool txok)
245{
246 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
247 struct ath_common *common = ath9k_hw_common(priv->ah);
248 struct ieee80211_tx_info *tx_info;
249
250 if (!skb)
251 return;
252
253 if (ep_id == priv->mgmt_ep) {
254 skb_pull(skb, sizeof(struct tx_mgmt_hdr));
255 } else if ((ep_id == priv->data_bk_ep) ||
256 (ep_id == priv->data_be_ep) ||
257 (ep_id == priv->data_vi_ep) ||
258 (ep_id == priv->data_vo_ep)) {
259 skb_pull(skb, sizeof(struct tx_frame_hdr));
260 } else {
261 ath_print(common, ATH_DBG_FATAL,
262 "Unsupported TX EPID: %d\n", ep_id);
263 dev_kfree_skb_any(skb);
264 return;
265 }
266
267 tx_info = IEEE80211_SKB_CB(skb);
268
269 if (txok)
270 tx_info->flags |= IEEE80211_TX_STAT_ACK;
271
272 skb_queue_tail(&priv->tx_queue, skb);
273 tasklet_schedule(&priv->tx_tasklet);
274}
275
276int ath9k_tx_init(struct ath9k_htc_priv *priv)
277{
278 skb_queue_head_init(&priv->tx_queue);
279 return 0;
280}
281
282void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
283{
284
285}
286
287bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
288 enum ath9k_tx_queue_subtype subtype)
289{
290 struct ath_hw *ah = priv->ah;
291 struct ath_common *common = ath9k_hw_common(ah);
292 struct ath9k_tx_queue_info qi;
293 int qnum;
294
295 memset(&qi, 0, sizeof(qi));
296
297 qi.tqi_subtype = subtype;
298 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
299 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
300 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
301 qi.tqi_physCompBuf = 0;
302 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
303
304 qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
305 if (qnum == -1)
306 return false;
307
308 if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
309 ath_print(common, ATH_DBG_FATAL,
310 "qnum %u out of range, max %u!\n",
311 qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
312 ath9k_hw_releasetxqueue(ah, qnum);
313 return false;
314 }
315
316 priv->hwq_map[subtype] = qnum;
317 return true;
318}
319
320/******/
321/* RX */
322/******/
323
324/*
325 * Calculate the RX filter to be set in the HW.
326 */
327u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
328{
329#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
330
331 struct ath_hw *ah = priv->ah;
332 u32 rfilt;
333
334 rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
335 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
336 | ATH9K_RX_FILTER_MCAST;
337
338 /* If not a STA, enable processing of Probe Requests */
339 if (ah->opmode != NL80211_IFTYPE_STATION)
340 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
341
342 /*
343 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
344 * mode interface or when in monitor mode. AP mode does not need this
345 * since it receives all in-BSS frames anyway.
346 */
347 if (((ah->opmode != NL80211_IFTYPE_AP) &&
348 (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
349 (ah->opmode == NL80211_IFTYPE_MONITOR))
350 rfilt |= ATH9K_RX_FILTER_PROM;
351
352 if (priv->rxfilter & FIF_CONTROL)
353 rfilt |= ATH9K_RX_FILTER_CONTROL;
354
355 if ((ah->opmode == NL80211_IFTYPE_STATION) &&
356 !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
357 rfilt |= ATH9K_RX_FILTER_MYBEACON;
358 else
359 rfilt |= ATH9K_RX_FILTER_BEACON;
360
361 if (conf_is_ht(&priv->hw->conf))
362 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
363
364 return rfilt;
365
366#undef RX_FILTER_PRESERVE
367}
368
369/*
370 * Recv initialization for opmode change.
371 */
372static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
373{
374 struct ath_hw *ah = priv->ah;
375 struct ath_common *common = ath9k_hw_common(ah);
376
377 u32 rfilt, mfilt[2];
378
379 /* configure rx filter */
380 rfilt = ath9k_htc_calcrxfilter(priv);
381 ath9k_hw_setrxfilter(ah, rfilt);
382
383 /* configure bssid mask */
384 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
385 ath_hw_setbssidmask(common);
386
387 /* configure operational mode */
388 ath9k_hw_setopmode(ah);
389
390 /* Handle any link-level address change. */
391 ath9k_hw_setmac(ah, common->macaddr);
392
393 /* calculate and install multicast filter */
394 mfilt[0] = mfilt[1] = ~0;
395 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
396}
397
398void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
399{
400 ath9k_hw_rxena(priv->ah);
401 ath9k_htc_opmode_init(priv);
402 ath9k_hw_startpcureceive(priv->ah);
403 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
404}
405
406static void ath9k_process_rate(struct ieee80211_hw *hw,
407 struct ieee80211_rx_status *rxs,
408 u8 rx_rate, u8 rs_flags)
409{
410 struct ieee80211_supported_band *sband;
411 enum ieee80211_band band;
412 unsigned int i = 0;
413
414 if (rx_rate & 0x80) {
415 /* HT rate */
416 rxs->flag |= RX_FLAG_HT;
417 if (rs_flags & ATH9K_RX_2040)
418 rxs->flag |= RX_FLAG_40MHZ;
419 if (rs_flags & ATH9K_RX_GI)
420 rxs->flag |= RX_FLAG_SHORT_GI;
421 rxs->rate_idx = rx_rate & 0x7f;
422 return;
423 }
424
425 band = hw->conf.channel->band;
426 sband = hw->wiphy->bands[band];
427
428 for (i = 0; i < sband->n_bitrates; i++) {
429 if (sband->bitrates[i].hw_value == rx_rate) {
430 rxs->rate_idx = i;
431 return;
432 }
433 if (sband->bitrates[i].hw_value_short == rx_rate) {
434 rxs->rate_idx = i;
435 rxs->flag |= RX_FLAG_SHORTPRE;
436 return;
437 }
438 }
439
440}
441
442static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
443 struct ath9k_htc_rxbuf *rxbuf,
444 struct ieee80211_rx_status *rx_status)
445
446{
447 struct ieee80211_hdr *hdr;
448 struct ieee80211_hw *hw = priv->hw;
449 struct sk_buff *skb = rxbuf->skb;
450 struct ath_common *common = ath9k_hw_common(priv->ah);
451 struct ath_htc_rx_status *rxstatus;
452 int hdrlen, padpos, padsize;
453 int last_rssi = ATH_RSSI_DUMMY_MARKER;
454 __le16 fc;
455
456 if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
457 ath_print(common, ATH_DBG_FATAL,
458 "Corrupted RX frame, dropping\n");
459 goto rx_next;
460 }
461
462 rxstatus = (struct ath_htc_rx_status *)skb->data;
463
464 if (be16_to_cpu(rxstatus->rs_datalen) -
465 (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
466 ath_print(common, ATH_DBG_FATAL,
467 "Corrupted RX data len, dropping "
468 "(dlen: %d, skblen: %d)\n",
469 rxstatus->rs_datalen, skb->len);
470 goto rx_next;
471 }
472
473 /* Get the RX status information */
474 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
475 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
476
477 hdr = (struct ieee80211_hdr *)skb->data;
478 fc = hdr->frame_control;
479 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
480
481 padpos = ath9k_cmn_padpos(fc);
482
483 padsize = padpos & 3;
484 if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
485 memmove(skb->data + padsize, skb->data, padpos);
486 skb_pull(skb, padsize);
487 }
488
489 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
490
491 if (rxbuf->rxstatus.rs_status != 0) {
492 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
493 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
494 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
495 goto rx_next;
496
497 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
498 /* FIXME */
499 } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
500 if (ieee80211_is_ctl(fc))
501 /*
502 * Sometimes, we get invalid
503 * MIC failures on valid control frames.
504 * Remove these mic errors.
505 */
506 rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
507 else
508 rx_status->flag |= RX_FLAG_MMIC_ERROR;
509 }
510
511 /*
512 * Reject error frames with the exception of
513 * decryption and MIC failures. For monitor mode,
514 * we also ignore the CRC error.
515 */
516 if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
517 if (rxbuf->rxstatus.rs_status &
518 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
519 ATH9K_RXERR_CRC))
520 goto rx_next;
521 } else {
522 if (rxbuf->rxstatus.rs_status &
523 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
524 goto rx_next;
525 }
526 }
527 }
528
529 if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
530 u8 keyix;
531 keyix = rxbuf->rxstatus.rs_keyix;
532 if (keyix != ATH9K_RXKEYIX_INVALID) {
533 rx_status->flag |= RX_FLAG_DECRYPTED;
534 } else if (ieee80211_has_protected(fc) &&
535 skb->len >= hdrlen + 4) {
536 keyix = skb->data[hdrlen + 3] >> 6;
537 if (test_bit(keyix, common->keymap))
538 rx_status->flag |= RX_FLAG_DECRYPTED;
539 }
540 }
541
542 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
543 rxbuf->rxstatus.rs_flags);
544
545 if (priv->op_flags & OP_ASSOCIATED) {
546 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
547 !rxbuf->rxstatus.rs_moreaggr)
548 ATH_RSSI_LPF(priv->rx.last_rssi,
549 rxbuf->rxstatus.rs_rssi);
550
551 last_rssi = priv->rx.last_rssi;
552
553 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
554 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
555 ATH_RSSI_EP_MULTIPLIER);
556
557 if (rxbuf->rxstatus.rs_rssi < 0)
558 rxbuf->rxstatus.rs_rssi = 0;
559
560 if (ieee80211_is_beacon(fc))
561 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
562 }
563
564 rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
565 rx_status->band = hw->conf.channel->band;
566 rx_status->freq = hw->conf.channel->center_freq;
567 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
568 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
569 rx_status->flag |= RX_FLAG_TSFT;
570
571 return true;
572
573rx_next:
574 return false;
575}
576
577/*
578 * FIXME: Handle FLUSH later on.
579 */
580void ath9k_rx_tasklet(unsigned long data)
581{
582 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
583 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
584 struct ieee80211_rx_status rx_status;
585 struct sk_buff *skb;
586 unsigned long flags;
587 struct ieee80211_hdr *hdr;
588
589 do {
590 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
591 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
592 if (tmp_buf->in_process) {
593 rxbuf = tmp_buf;
594 break;
595 }
596 }
597
598 if (rxbuf == NULL) {
599 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
600 break;
601 }
602
603 if (!rxbuf->skb)
604 goto requeue;
605
606 if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
607 dev_kfree_skb_any(rxbuf->skb);
608 goto requeue;
609 }
610
611 memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
612 sizeof(struct ieee80211_rx_status));
613 skb = rxbuf->skb;
614 hdr = (struct ieee80211_hdr *) skb->data;
615
616 if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
617 ieee80211_queue_work(priv->hw, &priv->ps_work);
618
619 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
620
621 ieee80211_rx(priv->hw, skb);
622
623 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
624requeue:
625 rxbuf->in_process = false;
626 rxbuf->skb = NULL;
627 list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
628 rxbuf = NULL;
629 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
630 } while (1);
631
632}
633
634void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
635 enum htc_endpoint_id ep_id)
636{
637 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
638 struct ath_hw *ah = priv->ah;
639 struct ath_common *common = ath9k_hw_common(ah);
640 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
641
642 spin_lock(&priv->rx.rxbuflock);
643 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
644 if (!tmp_buf->in_process) {
645 rxbuf = tmp_buf;
646 break;
647 }
648 }
649 spin_unlock(&priv->rx.rxbuflock);
650
651 if (rxbuf == NULL) {
652 ath_print(common, ATH_DBG_ANY,
653 "No free RX buffer\n");
654 goto err;
655 }
656
657 spin_lock(&priv->rx.rxbuflock);
658 rxbuf->skb = skb;
659 rxbuf->in_process = true;
660 spin_unlock(&priv->rx.rxbuflock);
661
662 tasklet_schedule(&priv->rx_tasklet);
663 return;
664err:
665 dev_kfree_skb_any(skb);
666}
667
668/* FIXME: Locking for cleanup/init */
669
670void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
671{
672 struct ath9k_htc_rxbuf *rxbuf, *tbuf;
673
674 list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
675 list_del(&rxbuf->list);
676 if (rxbuf->skb)
677 dev_kfree_skb_any(rxbuf->skb);
678 kfree(rxbuf);
679 }
680}
681
682int ath9k_rx_init(struct ath9k_htc_priv *priv)
683{
684 struct ath_hw *ah = priv->ah;
685 struct ath_common *common = ath9k_hw_common(ah);
686 struct ath9k_htc_rxbuf *rxbuf;
687 int i = 0;
688
689 INIT_LIST_HEAD(&priv->rx.rxbuf);
690 spin_lock_init(&priv->rx.rxbuflock);
691
692 for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
693 rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
694 if (rxbuf == NULL) {
695 ath_print(common, ATH_DBG_FATAL,
696 "Unable to allocate RX buffers\n");
697 goto err;
698 }
699 list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
700 }
701
702 return 0;
703
704err:
705 ath9k_rx_cleanup(priv);
706 return -ENOMEM;
707}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 000000000000..064397fd738e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,480 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
20 u16 len, u8 flags, u8 epid,
21 struct ath9k_htc_tx_ctl *tx_ctl)
22{
23 struct htc_frame_hdr *hdr;
24 struct htc_endpoint *endpoint = &target->endpoint[epid];
25 int status;
26
27 hdr = (struct htc_frame_hdr *)
28 skb_push(skb, sizeof(struct htc_frame_hdr));
29 hdr->endpoint_id = epid;
30 hdr->flags = flags;
31 hdr->payload_len = cpu_to_be16(len);
32
33 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
34 tx_ctl);
35 return status;
36}
37
38static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
39{
40 enum htc_endpoint_id avail_epid;
41
42 for (avail_epid = (ENDPOINT_MAX - 1); avail_epid > ENDPOINT0; avail_epid--)
43 if (endpoint[avail_epid].service_id == 0)
44 return &endpoint[avail_epid];
45 return NULL;
46}
47
48static u8 service_to_ulpipe(u16 service_id)
49{
50 switch (service_id) {
51 case WMI_CONTROL_SVC:
52 return 4;
53 case WMI_BEACON_SVC:
54 case WMI_CAB_SVC:
55 case WMI_UAPSD_SVC:
56 case WMI_MGMT_SVC:
57 case WMI_DATA_VO_SVC:
58 case WMI_DATA_VI_SVC:
59 case WMI_DATA_BE_SVC:
60 case WMI_DATA_BK_SVC:
61 return 1;
62 default:
63 return 0;
64 }
65}
66
67static u8 service_to_dlpipe(u16 service_id)
68{
69 switch (service_id) {
70 case WMI_CONTROL_SVC:
71 return 3;
72 case WMI_BEACON_SVC:
73 case WMI_CAB_SVC:
74 case WMI_UAPSD_SVC:
75 case WMI_MGMT_SVC:
76 case WMI_DATA_VO_SVC:
77 case WMI_DATA_VI_SVC:
78 case WMI_DATA_BE_SVC:
79 case WMI_DATA_BK_SVC:
80 return 2;
81 default:
82 return 0;
83 }
84}
85
86static void htc_process_target_rdy(struct htc_target *target,
87 void *buf)
88{
89 struct htc_endpoint *endpoint;
90 struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
91
92 target->credits = be16_to_cpu(htc_ready_msg->credits);
93 target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
94
95 endpoint = &target->endpoint[ENDPOINT0];
96 endpoint->service_id = HTC_CTRL_RSVD_SVC;
97 endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
98 atomic_inc(&target->tgt_ready);
99 complete(&target->target_wait);
100}
101
102static void htc_process_conn_rsp(struct htc_target *target,
103 struct htc_frame_hdr *htc_hdr)
104{
105 struct htc_conn_svc_rspmsg *svc_rspmsg;
106 struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
107 u16 service_id;
108 u16 max_msglen;
109 enum htc_endpoint_id epid, tepid;
110
111 svc_rspmsg = (struct htc_conn_svc_rspmsg *)
112 ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
113
114 if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
115 epid = svc_rspmsg->endpoint_id;
116 service_id = be16_to_cpu(svc_rspmsg->service_id);
117 max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
118 endpoint = &target->endpoint[epid];
119
120 for (tepid = (ENDPOINT_MAX - 1); tepid > ENDPOINT0; tepid--) {
121 tmp_endpoint = &target->endpoint[tepid];
122 if (tmp_endpoint->service_id == service_id) {
123 tmp_endpoint->service_id = 0;
124 break;
125 }
126 }
127
128 if (tepid == ENDPOINT0)
129 return;
130
131 endpoint->service_id = service_id;
132 endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
133 endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
134 endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
135 endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
136 endpoint->max_msglen = max_msglen;
137 target->conn_rsp_epid = epid;
138 complete(&target->cmd_wait);
139 } else {
140 target->conn_rsp_epid = ENDPOINT_UNUSED;
141 }
142}
143
144static int htc_config_pipe_credits(struct htc_target *target)
145{
146 struct sk_buff *skb;
147 struct htc_config_pipe_msg *cp_msg;
148 int ret, time_left;
149
150 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
151 if (!skb) {
152 dev_err(target->dev, "failed to allocate send buffer\n");
153 return -ENOMEM;
154 }
155 skb_reserve(skb, sizeof(struct htc_frame_hdr));
156
157 cp_msg = (struct htc_config_pipe_msg *)
158 skb_put(skb, sizeof(struct htc_config_pipe_msg));
159
160 cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
161 cp_msg->pipe_id = USB_WLAN_TX_PIPE;
162 cp_msg->credits = 28;
163
164 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
165
166 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
167 if (ret)
168 goto err;
169
170 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
171 if (!time_left) {
172 dev_err(target->dev, "HTC credit config timeout\n");
173 return -ETIMEDOUT;
174 }
175
176 return 0;
177err:
178 kfree_skb(skb);
179 return -EINVAL;
180}
181
182static int htc_setup_complete(struct htc_target *target)
183{
184 struct sk_buff *skb;
185 struct htc_comp_msg *comp_msg;
186 int ret = 0, time_left;
187
188 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
189 if (!skb) {
190 dev_err(target->dev, "failed to allocate send buffer\n");
191 return -ENOMEM;
192 }
193 skb_reserve(skb, sizeof(struct htc_frame_hdr));
194
195 comp_msg = (struct htc_comp_msg *)
196 skb_put(skb, sizeof(struct htc_comp_msg));
197 comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
198
199 target->htc_flags |= HTC_OP_START_WAIT;
200
201 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
202 if (ret)
203 goto err;
204
205 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
206 if (!time_left) {
207 dev_err(target->dev, "HTC start timeout\n");
208 return -ETIMEDOUT;
209 }
210
211 return 0;
212
213err:
214 kfree_skb(skb);
215 return -EINVAL;
216}
217
218/* HTC APIs */
219
220int htc_init(struct htc_target *target)
221{
222 int ret;
223
224 ret = htc_config_pipe_credits(target);
225 if (ret)
226 return ret;
227
228 return htc_setup_complete(target);
229}
230
231int htc_connect_service(struct htc_target *target,
232 struct htc_service_connreq *service_connreq,
233 enum htc_endpoint_id *conn_rsp_epid)
234{
235 struct sk_buff *skb;
236 struct htc_endpoint *endpoint;
237 struct htc_conn_svc_msg *conn_msg;
238 int ret, time_left;
239
240 /* Find an available endpoint */
241 endpoint = get_next_avail_ep(target->endpoint);
242 if (!endpoint) {
243 dev_err(target->dev, "Endpoint is not available for"
244 "service %d\n", service_connreq->service_id);
245 return -EINVAL;
246 }
247
248 endpoint->service_id = service_connreq->service_id;
249 endpoint->max_txqdepth = service_connreq->max_send_qdepth;
250 endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
251 endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
252 endpoint->ep_callbacks = service_connreq->ep_callbacks;
253
254 skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
255 sizeof(struct htc_frame_hdr), GFP_ATOMIC);
256 if (!skb) {
257 dev_err(target->dev, "Failed to allocate buf to send"
258 "service connect req\n");
259 return -ENOMEM;
260 }
261
262 skb_reserve(skb, sizeof(struct htc_frame_hdr));
263
264 conn_msg = (struct htc_conn_svc_msg *)
265 skb_put(skb, sizeof(struct htc_conn_svc_msg));
266 conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
267 conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
268 conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
269 conn_msg->dl_pipeid = endpoint->dl_pipeid;
270 conn_msg->ul_pipeid = endpoint->ul_pipeid;
271
272 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
273 if (ret)
274 goto err;
275
276 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
277 if (!time_left) {
278 dev_err(target->dev, "Service connection timeout for: %d\n",
279 service_connreq->service_id);
280 return -ETIMEDOUT;
281 }
282
283 *conn_rsp_epid = target->conn_rsp_epid;
284 return 0;
285err:
286 kfree_skb(skb);
287 return ret;
288}
289
290int htc_send(struct htc_target *target, struct sk_buff *skb,
291 enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
292{
293 return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
294}
295
296void htc_stop(struct htc_target *target)
297{
298 enum htc_endpoint_id epid;
299 struct htc_endpoint *endpoint;
300
301 for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
302 endpoint = &target->endpoint[epid];
303 if (endpoint->service_id != 0)
304 target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
305 }
306}
307
308void htc_start(struct htc_target *target)
309{
310 enum htc_endpoint_id epid;
311 struct htc_endpoint *endpoint;
312
313 for (epid = ENDPOINT0; epid < ENDPOINT_MAX; epid++) {
314 endpoint = &target->endpoint[epid];
315 if (endpoint->service_id != 0)
316 target->hif->start(target->hif_dev,
317 endpoint->ul_pipeid);
318 }
319}
320
321void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
322 struct sk_buff *skb, bool txok)
323{
324 struct htc_endpoint *endpoint;
325 struct htc_frame_hdr *htc_hdr = NULL;
326
327 if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
328 complete(&htc_handle->cmd_wait);
329 htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
330 goto ret;
331 }
332
333 if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
334 complete(&htc_handle->cmd_wait);
335 htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
336 goto ret;
337 }
338
339 if (skb) {
340 htc_hdr = (struct htc_frame_hdr *) skb->data;
341 endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
342 skb_pull(skb, sizeof(struct htc_frame_hdr));
343
344 if (endpoint->ep_callbacks.tx) {
345 endpoint->ep_callbacks.tx(endpoint->ep_callbacks.priv,
346 skb, htc_hdr->endpoint_id,
347 txok);
348 }
349 }
350
351 return;
352ret:
353 /* HTC-generated packets are freed here. */
354 if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
355 dev_kfree_skb_any(skb);
356 else
357 kfree_skb(skb);
358}
359
360/*
361 * HTC Messages are handled directly here and the obtained SKB
362 * is freed.
363 *
364 * Sevice messages (Data, WMI) passed to the corresponding
365 * endpoint RX handlers, which have to free the SKB.
366 */
367void ath9k_htc_rx_msg(struct htc_target *htc_handle,
368 struct sk_buff *skb, u32 len, u8 pipe_id)
369{
370 struct htc_frame_hdr *htc_hdr;
371 enum htc_endpoint_id epid;
372 struct htc_endpoint *endpoint;
373 __be16 *msg_id;
374
375 if (!htc_handle || !skb)
376 return;
377
378 htc_hdr = (struct htc_frame_hdr *) skb->data;
379 epid = htc_hdr->endpoint_id;
380
381 if (epid >= ENDPOINT_MAX) {
382 if (pipe_id != USB_REG_IN_PIPE)
383 dev_kfree_skb_any(skb);
384 else
385 kfree_skb(skb);
386 return;
387 }
388
389 if (epid == ENDPOINT0) {
390
391 /* Handle trailer */
392 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
393 if (be32_to_cpu(*(__be32 *) skb->data) == 0x00C60000)
394 /* Move past the Watchdog pattern */
395 htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
396 }
397
398 /* Get the message ID */
399 msg_id = (__be16 *) ((void *) htc_hdr +
400 sizeof(struct htc_frame_hdr));
401
402 /* Now process HTC messages */
403 switch (be16_to_cpu(*msg_id)) {
404 case HTC_MSG_READY_ID:
405 htc_process_target_rdy(htc_handle, htc_hdr);
406 break;
407 case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
408 htc_process_conn_rsp(htc_handle, htc_hdr);
409 break;
410 default:
411 break;
412 }
413
414 kfree_skb(skb);
415
416 } else {
417 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
418 skb_trim(skb, len - htc_hdr->control[0]);
419
420 skb_pull(skb, sizeof(struct htc_frame_hdr));
421
422 endpoint = &htc_handle->endpoint[epid];
423 if (endpoint->ep_callbacks.rx)
424 endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
425 skb, epid);
426 }
427}
428
429struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
430 struct ath9k_htc_hif *hif,
431 struct device *dev)
432{
433 struct htc_endpoint *endpoint;
434 struct htc_target *target;
435
436 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
437 if (!target) {
438 printk(KERN_ERR "Unable to allocate memory for"
439 "target device\n");
440 return NULL;
441 }
442
443 init_completion(&target->target_wait);
444 init_completion(&target->cmd_wait);
445
446 target->hif = hif;
447 target->hif_dev = hif_handle;
448 target->dev = dev;
449
450 /* Assign control endpoint pipe IDs */
451 endpoint = &target->endpoint[ENDPOINT0];
452 endpoint->ul_pipeid = hif->control_ul_pipe;
453 endpoint->dl_pipeid = hif->control_dl_pipe;
454
455 atomic_set(&target->tgt_ready, 0);
456
457 return target;
458}
459
460void ath9k_htc_hw_free(struct htc_target *htc)
461{
462 kfree(htc);
463}
464
465int ath9k_htc_hw_init(struct htc_target *target,
466 struct device *dev, u16 devid)
467{
468 if (ath9k_htc_probe_device(target, dev, devid)) {
469 printk(KERN_ERR "Failed to initialize the device\n");
470 return -ENODEV;
471 }
472
473 return 0;
474}
475
476void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
477{
478 if (target)
479 ath9k_htc_disconnect_device(target, hot_unplug);
480}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 000000000000..faba6790328b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_HST_H
18#define HTC_HST_H
19
20struct ath9k_htc_priv;
21struct htc_target;
22struct ath9k_htc_tx_ctl;
23
24enum ath9k_hif_transports {
25 ATH9K_HIF_USB,
26};
27
28struct ath9k_htc_hif {
29 struct list_head list;
30 const enum ath9k_hif_transports transport;
31 const char *name;
32
33 u8 control_dl_pipe;
34 u8 control_ul_pipe;
35
36 void (*start) (void *hif_handle, u8 pipe);
37 void (*stop) (void *hif_handle, u8 pipe);
38 int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
39 struct ath9k_htc_tx_ctl *tx_ctl);
40};
41
42enum htc_endpoint_id {
43 ENDPOINT_UNUSED = -1,
44 ENDPOINT0 = 0,
45 ENDPOINT1 = 1,
46 ENDPOINT2 = 2,
47 ENDPOINT3 = 3,
48 ENDPOINT4 = 4,
49 ENDPOINT5 = 5,
50 ENDPOINT6 = 6,
51 ENDPOINT7 = 7,
52 ENDPOINT8 = 8,
53 ENDPOINT_MAX = 22
54};
55
56/* Htc frame hdr flags */
57#define HTC_FLAGS_RECV_TRAILER (1 << 1)
58
59struct htc_frame_hdr {
60 u8 endpoint_id;
61 u8 flags;
62 __be16 payload_len;
63 u8 control[4];
64} __packed;
65
66struct htc_ready_msg {
67 __be16 message_id;
68 __be16 credits;
69 __be16 credit_size;
70 u8 max_endpoints;
71 u8 pad;
72} __packed;
73
74struct htc_config_pipe_msg {
75 __be16 message_id;
76 u8 pipe_id;
77 u8 credits;
78} __packed;
79
80struct htc_packet {
81 void *pktcontext;
82 u8 *buf;
83 u8 *buf_payload;
84 u32 buflen;
85 u32 payload_len;
86
87 int endpoint;
88 int status;
89
90 void *context;
91 u32 reserved;
92};
93
94struct htc_ep_callbacks {
95 void *priv;
96 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
97 void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
98};
99
100#define HTC_TX_QUEUE_SIZE 256
101
102struct htc_txq {
103 struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
104 u32 txqdepth;
105 u16 txbuf_cnt;
106 u16 txq_head;
107 u16 txq_tail;
108};
109
110struct htc_endpoint {
111 u16 service_id;
112
113 struct htc_ep_callbacks ep_callbacks;
114 struct htc_txq htc_txq;
115 u32 max_txqdepth;
116 int max_msglen;
117
118 u8 ul_pipeid;
119 u8 dl_pipeid;
120};
121
122#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
123#define HTC_CONTROL_BUFFER_SIZE \
124 (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
125
126struct htc_control_buf {
127 struct htc_packet htc_pkt;
128 u8 buf[HTC_CONTROL_BUFFER_SIZE];
129};
130
131#define HTC_OP_START_WAIT BIT(0)
132#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
133
134struct htc_target {
135 void *hif_dev;
136 struct ath9k_htc_priv *drv_priv;
137 struct device *dev;
138 struct ath9k_htc_hif *hif;
139 struct htc_endpoint endpoint[ENDPOINT_MAX];
140 struct completion target_wait;
141 struct completion cmd_wait;
142 struct list_head list;
143 enum htc_endpoint_id conn_rsp_epid;
144 u16 credits;
145 u16 credit_size;
146 u8 htc_flags;
147 atomic_t tgt_ready;
148};
149
150enum htc_msg_id {
151 HTC_MSG_READY_ID = 1,
152 HTC_MSG_CONNECT_SERVICE_ID,
153 HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
154 HTC_MSG_SETUP_COMPLETE_ID,
155 HTC_MSG_CONFIG_PIPE_ID,
156 HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
157};
158
159struct htc_service_connreq {
160 u16 service_id;
161 u16 con_flags;
162 u32 max_send_qdepth;
163 struct htc_ep_callbacks ep_callbacks;
164};
165
166/* Current service IDs */
167
168enum htc_service_group_ids{
169 RSVD_SERVICE_GROUP = 0,
170 WMI_SERVICE_GROUP = 1,
171
172 HTC_SERVICE_GROUP_LAST = 255
173};
174
175#define MAKE_SERVICE_ID(group, index) \
176 (int)(((int)group << 8) | (int)(index))
177
178/* NOTE: service ID of 0x0000 is reserved and should never be used */
179#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
180#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
181
182#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
183#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
184#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
185#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
186#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
187#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
188#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
189#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
190#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
191
192struct htc_conn_svc_msg {
193 __be16 msg_id;
194 __be16 service_id;
195 __be16 con_flags;
196 u8 dl_pipeid;
197 u8 ul_pipeid;
198 u8 svc_meta_len;
199 u8 pad;
200} __packed;
201
202/* connect response status codes */
203#define HTC_SERVICE_SUCCESS 0
204#define HTC_SERVICE_NOT_FOUND 1
205#define HTC_SERVICE_FAILED 2
206#define HTC_SERVICE_NO_RESOURCES 3
207#define HTC_SERVICE_NO_MORE_EP 4
208
209struct htc_conn_svc_rspmsg {
210 __be16 msg_id;
211 __be16 service_id;
212 u8 status;
213 u8 endpoint_id;
214 __be16 max_msg_len;
215 u8 svc_meta_len;
216 u8 pad;
217} __packed;
218
219struct htc_comp_msg {
220 __be16 msg_id;
221} __packed;
222
223int htc_init(struct htc_target *target);
224int htc_connect_service(struct htc_target *target,
225 struct htc_service_connreq *service_connreq,
226 enum htc_endpoint_id *conn_rsp_eid);
227int htc_send(struct htc_target *target, struct sk_buff *skb,
228 enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
229void htc_stop(struct htc_target *target);
230void htc_start(struct htc_target *target);
231
232void ath9k_htc_rx_msg(struct htc_target *htc_handle,
233 struct sk_buff *skb, u32 len, u8 pipe_id);
234void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
235 struct sk_buff *skb, bool txok);
236
237struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
238 struct ath9k_htc_hif *hif,
239 struct device *dev);
240void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct htc_target *target,
242 struct device *dev, u16 devid);
243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
244
245#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
new file mode 100644
index 000000000000..624422a8169e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -0,0 +1,280 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH9K_HW_OPS_H
18#define ATH9K_HW_OPS_H
19
20#include "hw.h"
21
22/* Hardware core and driver accessible callbacks */
23
24static inline void ath9k_hw_configpcipowersave(struct ath_hw *ah,
25 int restore,
26 int power_off)
27{
28 ath9k_hw_ops(ah)->config_pci_powersave(ah, restore, power_off);
29}
30
31static inline void ath9k_hw_rxena(struct ath_hw *ah)
32{
33 ath9k_hw_ops(ah)->rx_enable(ah);
34}
35
36static inline void ath9k_hw_set_desc_link(struct ath_hw *ah, void *ds,
37 u32 link)
38{
39 ath9k_hw_ops(ah)->set_desc_link(ds, link);
40}
41
42static inline void ath9k_hw_get_desc_link(struct ath_hw *ah, void *ds,
43 u32 **link)
44{
45 ath9k_hw_ops(ah)->get_desc_link(ds, link);
46}
47static inline bool ath9k_hw_calibrate(struct ath_hw *ah,
48 struct ath9k_channel *chan,
49 u8 rxchainmask,
50 bool longcal)
51{
52 return ath9k_hw_ops(ah)->calibrate(ah, chan, rxchainmask, longcal);
53}
54
55static inline bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
56{
57 return ath9k_hw_ops(ah)->get_isr(ah, masked);
58}
59
60static inline void ath9k_hw_filltxdesc(struct ath_hw *ah, void *ds, u32 seglen,
61 bool is_firstseg, bool is_lastseg,
62 const void *ds0, dma_addr_t buf_addr,
63 unsigned int qcu)
64{
65 ath9k_hw_ops(ah)->fill_txdesc(ah, ds, seglen, is_firstseg, is_lastseg,
66 ds0, buf_addr, qcu);
67}
68
69static inline int ath9k_hw_txprocdesc(struct ath_hw *ah, void *ds,
70 struct ath_tx_status *ts)
71{
72 return ath9k_hw_ops(ah)->proc_txdesc(ah, ds, ts);
73}
74
75static inline void ath9k_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
76 u32 pktLen, enum ath9k_pkt_type type,
77 u32 txPower, u32 keyIx,
78 enum ath9k_key_type keyType,
79 u32 flags)
80{
81 ath9k_hw_ops(ah)->set11n_txdesc(ah, ds, pktLen, type, txPower, keyIx,
82 keyType, flags);
83}
84
85static inline void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
86 void *lastds,
87 u32 durUpdateEn, u32 rtsctsRate,
88 u32 rtsctsDuration,
89 struct ath9k_11n_rate_series series[],
90 u32 nseries, u32 flags)
91{
92 ath9k_hw_ops(ah)->set11n_ratescenario(ah, ds, lastds, durUpdateEn,
93 rtsctsRate, rtsctsDuration, series,
94 nseries, flags);
95}
96
97static inline void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
98 u32 aggrLen)
99{
100 ath9k_hw_ops(ah)->set11n_aggr_first(ah, ds, aggrLen);
101}
102
103static inline void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
104 u32 numDelims)
105{
106 ath9k_hw_ops(ah)->set11n_aggr_middle(ah, ds, numDelims);
107}
108
109static inline void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, void *ds)
110{
111 ath9k_hw_ops(ah)->set11n_aggr_last(ah, ds);
112}
113
114static inline void ath9k_hw_clr11n_aggr(struct ath_hw *ah, void *ds)
115{
116 ath9k_hw_ops(ah)->clr11n_aggr(ah, ds);
117}
118
119static inline void ath9k_hw_set11n_burstduration(struct ath_hw *ah, void *ds,
120 u32 burstDuration)
121{
122 ath9k_hw_ops(ah)->set11n_burstduration(ah, ds, burstDuration);
123}
124
125static inline void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, void *ds,
126 u32 vmf)
127{
128 ath9k_hw_ops(ah)->set11n_virtualmorefrag(ah, ds, vmf);
129}
130
131/* Private hardware call ops */
132
133/* PHY ops */
134
135static inline int ath9k_hw_rf_set_freq(struct ath_hw *ah,
136 struct ath9k_channel *chan)
137{
138 return ath9k_hw_private_ops(ah)->rf_set_freq(ah, chan);
139}
140
141static inline void ath9k_hw_spur_mitigate_freq(struct ath_hw *ah,
142 struct ath9k_channel *chan)
143{
144 ath9k_hw_private_ops(ah)->spur_mitigate_freq(ah, chan);
145}
146
147static inline int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
148{
149 if (!ath9k_hw_private_ops(ah)->rf_alloc_ext_banks)
150 return 0;
151
152 return ath9k_hw_private_ops(ah)->rf_alloc_ext_banks(ah);
153}
154
155static inline void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
156{
157 if (!ath9k_hw_private_ops(ah)->rf_free_ext_banks)
158 return;
159
160 ath9k_hw_private_ops(ah)->rf_free_ext_banks(ah);
161}
162
163static inline bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
164 struct ath9k_channel *chan,
165 u16 modesIndex)
166{
167 if (!ath9k_hw_private_ops(ah)->set_rf_regs)
168 return true;
169
170 return ath9k_hw_private_ops(ah)->set_rf_regs(ah, chan, modesIndex);
171}
172
173static inline void ath9k_hw_init_bb(struct ath_hw *ah,
174 struct ath9k_channel *chan)
175{
176 return ath9k_hw_private_ops(ah)->init_bb(ah, chan);
177}
178
179static inline void ath9k_hw_set_channel_regs(struct ath_hw *ah,
180 struct ath9k_channel *chan)
181{
182 return ath9k_hw_private_ops(ah)->set_channel_regs(ah, chan);
183}
184
185static inline int ath9k_hw_process_ini(struct ath_hw *ah,
186 struct ath9k_channel *chan)
187{
188 return ath9k_hw_private_ops(ah)->process_ini(ah, chan);
189}
190
191static inline void ath9k_olc_init(struct ath_hw *ah)
192{
193 if (!ath9k_hw_private_ops(ah)->olc_init)
194 return;
195
196 return ath9k_hw_private_ops(ah)->olc_init(ah);
197}
198
199static inline void ath9k_hw_set_rfmode(struct ath_hw *ah,
200 struct ath9k_channel *chan)
201{
202 return ath9k_hw_private_ops(ah)->set_rfmode(ah, chan);
203}
204
205static inline void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
206{
207 return ath9k_hw_private_ops(ah)->mark_phy_inactive(ah);
208}
209
210static inline void ath9k_hw_set_delta_slope(struct ath_hw *ah,
211 struct ath9k_channel *chan)
212{
213 return ath9k_hw_private_ops(ah)->set_delta_slope(ah, chan);
214}
215
216static inline bool ath9k_hw_rfbus_req(struct ath_hw *ah)
217{
218 return ath9k_hw_private_ops(ah)->rfbus_req(ah);
219}
220
221static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
222{
223 return ath9k_hw_private_ops(ah)->rfbus_done(ah);
224}
225
226static inline void ath9k_enable_rfkill(struct ath_hw *ah)
227{
228 return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
229}
230
231static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
232{
233 if (!ath9k_hw_private_ops(ah)->restore_chainmask)
234 return;
235
236 return ath9k_hw_private_ops(ah)->restore_chainmask(ah);
237}
238
239static inline void ath9k_hw_set_diversity(struct ath_hw *ah, bool value)
240{
241 return ath9k_hw_private_ops(ah)->set_diversity(ah, value);
242}
243
244static inline bool ath9k_hw_ani_control(struct ath_hw *ah,
245 enum ath9k_ani_cmd cmd, int param)
246{
247 return ath9k_hw_private_ops(ah)->ani_control(ah, cmd, param);
248}
249
250static inline void ath9k_hw_do_getnf(struct ath_hw *ah,
251 int16_t nfarray[NUM_NF_READINGS])
252{
253 ath9k_hw_private_ops(ah)->do_getnf(ah, nfarray);
254}
255
256static inline void ath9k_hw_loadnf(struct ath_hw *ah,
257 struct ath9k_channel *chan)
258{
259 ath9k_hw_private_ops(ah)->loadnf(ah, chan);
260}
261
262static inline bool ath9k_hw_init_cal(struct ath_hw *ah,
263 struct ath9k_channel *chan)
264{
265 return ath9k_hw_private_ops(ah)->init_cal(ah, chan);
266}
267
268static inline void ath9k_hw_setup_calibration(struct ath_hw *ah,
269 struct ath9k_cal_list *currCal)
270{
271 ath9k_hw_private_ops(ah)->setup_calibration(ah, currCal);
272}
273
274static inline bool ath9k_hw_iscal_supported(struct ath_hw *ah,
275 enum ath9k_cal_types calType)
276{
277 return ath9k_hw_private_ops(ah)->iscal_supported(ah, calType);
278}
279
280#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b571129c92..c33f17dbe6f1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -19,18 +19,16 @@
19#include <asm/unaligned.h> 19#include <asm/unaligned.h>
20 20
21#include "hw.h" 21#include "hw.h"
22#include "hw-ops.h"
22#include "rc.h" 23#include "rc.h"
23#include "initvals.h" 24#include "ar9003_mac.h"
24 25
25#define ATH9K_CLOCK_RATE_CCK 22 26#define ATH9K_CLOCK_RATE_CCK 22
26#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40 27#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
27#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 28#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
29#define ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM 44
28 30
29static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 31static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
30static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
31static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
32 struct ar5416_eeprom_def *pEepData,
33 u32 reg, u32 value);
34 32
35MODULE_AUTHOR("Atheros Communications"); 33MODULE_AUTHOR("Atheros Communications");
36MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 34MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -49,6 +47,39 @@ static void __exit ath9k_exit(void)
49} 47}
50module_exit(ath9k_exit); 48module_exit(ath9k_exit);
51 49
50/* Private hardware callbacks */
51
52static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
53{
54 ath9k_hw_private_ops(ah)->init_cal_settings(ah);
55}
56
57static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
58{
59 ath9k_hw_private_ops(ah)->init_mode_regs(ah);
60}
61
62static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
63{
64 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
65
66 return priv_ops->macversion_supported(ah->hw_version.macVersion);
67}
68
69static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
70 struct ath9k_channel *chan)
71{
72 return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
73}
74
75static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
76{
77 if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
78 return;
79
80 ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
81}
82
52/********************/ 83/********************/
53/* Helper Functions */ 84/* Helper Functions */
54/********************/ 85/********************/
@@ -61,7 +92,11 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
61 return usecs *ATH9K_CLOCK_RATE_CCK; 92 return usecs *ATH9K_CLOCK_RATE_CCK;
62 if (conf->channel->band == IEEE80211_BAND_2GHZ) 93 if (conf->channel->band == IEEE80211_BAND_2GHZ)
63 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM; 94 return usecs *ATH9K_CLOCK_RATE_2GHZ_OFDM;
64 return usecs *ATH9K_CLOCK_RATE_5GHZ_OFDM; 95
96 if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
97 return usecs * ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
98 else
99 return usecs * ATH9K_CLOCK_RATE_5GHZ_OFDM;
65} 100}
66 101
67static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 102static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
@@ -236,21 +271,6 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
236 } 271 }
237} 272}
238 273
239static int ath9k_hw_get_radiorev(struct ath_hw *ah)
240{
241 u32 val;
242 int i;
243
244 REG_WRITE(ah, AR_PHY(0x36), 0x00007058);
245
246 for (i = 0; i < 8; i++)
247 REG_WRITE(ah, AR_PHY(0x20), 0x00010000);
248 val = (REG_READ(ah, AR_PHY(256)) >> 24) & 0xff;
249 val = ((val & 0xf0) >> 4) | ((val & 0x0f) << 4);
250
251 return ath9k_hw_reverse_bits(val, 8);
252}
253
254/************************************/ 274/************************************/
255/* HW Attach, Detach, Init Routines */ 275/* HW Attach, Detach, Init Routines */
256/************************************/ 276/************************************/
@@ -260,6 +280,8 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
260 if (AR_SREV_9100(ah)) 280 if (AR_SREV_9100(ah))
261 return; 281 return;
262 282
283 ENABLE_REGWRITE_BUFFER(ah);
284
263 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 285 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
264 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 286 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
265 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); 287 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -271,20 +293,30 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
271 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); 293 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
272 294
273 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 295 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
296
297 REGWRITE_BUFFER_FLUSH(ah);
298 DISABLE_REGWRITE_BUFFER(ah);
274} 299}
275 300
301/* This should work for all families including legacy */
276static bool ath9k_hw_chip_test(struct ath_hw *ah) 302static bool ath9k_hw_chip_test(struct ath_hw *ah)
277{ 303{
278 struct ath_common *common = ath9k_hw_common(ah); 304 struct ath_common *common = ath9k_hw_common(ah);
279 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 305 u32 regAddr[2] = { AR_STA_ID0 };
280 u32 regHold[2]; 306 u32 regHold[2];
281 u32 patternData[4] = { 0x55555555, 307 u32 patternData[4] = { 0x55555555,
282 0xaaaaaaaa, 308 0xaaaaaaaa,
283 0x66666666, 309 0x66666666,
284 0x99999999 }; 310 0x99999999 };
285 int i, j; 311 int i, j, loop_max;
312
313 if (!AR_SREV_9300_20_OR_LATER(ah)) {
314 loop_max = 2;
315 regAddr[1] = AR_PHY_BASE + (8 << 2);
316 } else
317 loop_max = 1;
286 318
287 for (i = 0; i < 2; i++) { 319 for (i = 0; i < loop_max; i++) {
288 u32 addr = regAddr[i]; 320 u32 addr = regAddr[i];
289 u32 wrData, rdData; 321 u32 wrData, rdData;
290 322
@@ -339,7 +371,13 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
339 ah->config.ofdm_trig_high = 500; 371 ah->config.ofdm_trig_high = 500;
340 ah->config.cck_trig_high = 200; 372 ah->config.cck_trig_high = 200;
341 ah->config.cck_trig_low = 100; 373 ah->config.cck_trig_low = 100;
342 ah->config.enable_ani = 1; 374
375 /*
376 * For now ANI is disabled for AR9003, it is still
377 * being tested.
378 */
379 if (!AR_SREV_9300_20_OR_LATER(ah))
380 ah->config.enable_ani = 1;
343 381
344 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 382 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
345 ah->config.spurchans[i][0] = AR_NO_SPUR; 383 ah->config.spurchans[i][0] = AR_NO_SPUR;
@@ -354,6 +392,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
354 ah->config.rx_intr_mitigation = true; 392 ah->config.rx_intr_mitigation = true;
355 393
356 /* 394 /*
395 * Tx IQ Calibration (ah->config.tx_iq_calibration) is only
396 * used by AR9003, but it is showing reliability issues.
397 * It will take a while to fix so this is currently disabled.
398 */
399
400 /*
357 * We need this for PCI devices only (Cardbus, PCI, miniPCI) 401 * We need this for PCI devices only (Cardbus, PCI, miniPCI)
358 * _and_ if on non-uniprocessor systems (Multiprocessor/HT). 402 * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
359 * This means we use it for all AR5416 devices, and the few 403 * This means we use it for all AR5416 devices, and the few
@@ -372,7 +416,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
372 if (num_possible_cpus() > 1) 416 if (num_possible_cpus() > 1)
373 ah->config.serialize_regmode = SER_REG_MODE_AUTO; 417 ah->config.serialize_regmode = SER_REG_MODE_AUTO;
374} 418}
375EXPORT_SYMBOL(ath9k_hw_init);
376 419
377static void ath9k_hw_init_defaults(struct ath_hw *ah) 420static void ath9k_hw_init_defaults(struct ath_hw *ah)
378{ 421{
@@ -386,8 +429,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
386 ah->hw_version.subvendorid = 0; 429 ah->hw_version.subvendorid = 0;
387 430
388 ah->ah_flags = 0; 431 ah->ah_flags = 0;
389 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
390 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
391 if (!AR_SREV_9100(ah)) 432 if (!AR_SREV_9100(ah))
392 ah->ah_flags = AH_USE_EEPROM; 433 ah->ah_flags = AH_USE_EEPROM;
393 434
@@ -400,44 +441,17 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
400 ah->power_mode = ATH9K_PM_UNDEFINED; 441 ah->power_mode = ATH9K_PM_UNDEFINED;
401} 442}
402 443
403static int ath9k_hw_rf_claim(struct ath_hw *ah)
404{
405 u32 val;
406
407 REG_WRITE(ah, AR_PHY(0), 0x00000007);
408
409 val = ath9k_hw_get_radiorev(ah);
410 switch (val & AR_RADIO_SREV_MAJOR) {
411 case 0:
412 val = AR_RAD5133_SREV_MAJOR;
413 break;
414 case AR_RAD5133_SREV_MAJOR:
415 case AR_RAD5122_SREV_MAJOR:
416 case AR_RAD2133_SREV_MAJOR:
417 case AR_RAD2122_SREV_MAJOR:
418 break;
419 default:
420 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
421 "Radio Chip Rev 0x%02X not supported\n",
422 val & AR_RADIO_SREV_MAJOR);
423 return -EOPNOTSUPP;
424 }
425
426 ah->hw_version.analog5GhzRev = val;
427
428 return 0;
429}
430
431static int ath9k_hw_init_macaddr(struct ath_hw *ah) 444static int ath9k_hw_init_macaddr(struct ath_hw *ah)
432{ 445{
433 struct ath_common *common = ath9k_hw_common(ah); 446 struct ath_common *common = ath9k_hw_common(ah);
434 u32 sum; 447 u32 sum;
435 int i; 448 int i;
436 u16 eeval; 449 u16 eeval;
450 u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
437 451
438 sum = 0; 452 sum = 0;
439 for (i = 0; i < 3; i++) { 453 for (i = 0; i < 3; i++) {
440 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); 454 eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
441 sum += eeval; 455 sum += eeval;
442 common->macaddr[2 * i] = eeval >> 8; 456 common->macaddr[2 * i] = eeval >> 8;
443 common->macaddr[2 * i + 1] = eeval & 0xff; 457 common->macaddr[2 * i + 1] = eeval & 0xff;
@@ -448,64 +462,20 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
448 return 0; 462 return 0;
449} 463}
450 464
451static void ath9k_hw_init_rxgain_ini(struct ath_hw *ah)
452{
453 u32 rxgain_type;
454
455 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_17) {
456 rxgain_type = ah->eep_ops->get_eeprom(ah, EEP_RXGAIN_TYPE);
457
458 if (rxgain_type == AR5416_EEP_RXGAIN_13DB_BACKOFF)
459 INIT_INI_ARRAY(&ah->iniModesRxGain,
460 ar9280Modes_backoff_13db_rxgain_9280_2,
461 ARRAY_SIZE(ar9280Modes_backoff_13db_rxgain_9280_2), 6);
462 else if (rxgain_type == AR5416_EEP_RXGAIN_23DB_BACKOFF)
463 INIT_INI_ARRAY(&ah->iniModesRxGain,
464 ar9280Modes_backoff_23db_rxgain_9280_2,
465 ARRAY_SIZE(ar9280Modes_backoff_23db_rxgain_9280_2), 6);
466 else
467 INIT_INI_ARRAY(&ah->iniModesRxGain,
468 ar9280Modes_original_rxgain_9280_2,
469 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
470 } else {
471 INIT_INI_ARRAY(&ah->iniModesRxGain,
472 ar9280Modes_original_rxgain_9280_2,
473 ARRAY_SIZE(ar9280Modes_original_rxgain_9280_2), 6);
474 }
475}
476
477static void ath9k_hw_init_txgain_ini(struct ath_hw *ah)
478{
479 u32 txgain_type;
480
481 if (ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) >= AR5416_EEP_MINOR_VER_19) {
482 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
483
484 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER)
485 INIT_INI_ARRAY(&ah->iniModesTxGain,
486 ar9280Modes_high_power_tx_gain_9280_2,
487 ARRAY_SIZE(ar9280Modes_high_power_tx_gain_9280_2), 6);
488 else
489 INIT_INI_ARRAY(&ah->iniModesTxGain,
490 ar9280Modes_original_tx_gain_9280_2,
491 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
492 } else {
493 INIT_INI_ARRAY(&ah->iniModesTxGain,
494 ar9280Modes_original_tx_gain_9280_2,
495 ARRAY_SIZE(ar9280Modes_original_tx_gain_9280_2), 6);
496 }
497}
498
499static int ath9k_hw_post_init(struct ath_hw *ah) 465static int ath9k_hw_post_init(struct ath_hw *ah)
500{ 466{
501 int ecode; 467 int ecode;
502 468
503 if (!ath9k_hw_chip_test(ah)) 469 if (!AR_SREV_9271(ah)) {
504 return -ENODEV; 470 if (!ath9k_hw_chip_test(ah))
471 return -ENODEV;
472 }
505 473
506 ecode = ath9k_hw_rf_claim(ah); 474 if (!AR_SREV_9300_20_OR_LATER(ah)) {
507 if (ecode != 0) 475 ecode = ar9002_hw_rf_claim(ah);
508 return ecode; 476 if (ecode != 0)
477 return ecode;
478 }
509 479
510 ecode = ath9k_hw_eeprom_init(ah); 480 ecode = ath9k_hw_eeprom_init(ah);
511 if (ecode != 0) 481 if (ecode != 0)
@@ -516,14 +486,12 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
516 ah->eep_ops->get_eeprom_ver(ah), 486 ah->eep_ops->get_eeprom_ver(ah),
517 ah->eep_ops->get_eeprom_rev(ah)); 487 ah->eep_ops->get_eeprom_rev(ah));
518 488
519 if (!AR_SREV_9280_10_OR_LATER(ah)) { 489 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
520 ecode = ath9k_hw_rf_alloc_ext_banks(ah); 490 if (ecode) {
521 if (ecode) { 491 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
522 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 492 "Failed allocating banks for "
523 "Failed allocating banks for " 493 "external radio\n");
524 "external radio\n"); 494 return ecode;
525 return ecode;
526 }
527 } 495 }
528 496
529 if (!AR_SREV_9100(ah)) { 497 if (!AR_SREV_9100(ah)) {
@@ -534,321 +502,22 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
534 return 0; 502 return 0;
535} 503}
536 504
537static bool ath9k_hw_devid_supported(u16 devid) 505static void ath9k_hw_attach_ops(struct ath_hw *ah)
538{ 506{
539 switch (devid) { 507 if (AR_SREV_9300_20_OR_LATER(ah))
540 case AR5416_DEVID_PCI: 508 ar9003_hw_attach_ops(ah);
541 case AR5416_DEVID_PCIE: 509 else
542 case AR5416_AR9100_DEVID: 510 ar9002_hw_attach_ops(ah);
543 case AR9160_DEVID_PCI:
544 case AR9280_DEVID_PCI:
545 case AR9280_DEVID_PCIE:
546 case AR9285_DEVID_PCIE:
547 case AR5416_DEVID_AR9287_PCI:
548 case AR5416_DEVID_AR9287_PCIE:
549 case AR9271_USB:
550 case AR2427_DEVID_PCIE:
551 return true;
552 default:
553 break;
554 }
555 return false;
556}
557
558static bool ath9k_hw_macversion_supported(u32 macversion)
559{
560 switch (macversion) {
561 case AR_SREV_VERSION_5416_PCI:
562 case AR_SREV_VERSION_5416_PCIE:
563 case AR_SREV_VERSION_9160:
564 case AR_SREV_VERSION_9100:
565 case AR_SREV_VERSION_9280:
566 case AR_SREV_VERSION_9285:
567 case AR_SREV_VERSION_9287:
568 case AR_SREV_VERSION_9271:
569 return true;
570 default:
571 break;
572 }
573 return false;
574}
575
576static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
577{
578 if (AR_SREV_9160_10_OR_LATER(ah)) {
579 if (AR_SREV_9280_10_OR_LATER(ah)) {
580 ah->iq_caldata.calData = &iq_cal_single_sample;
581 ah->adcgain_caldata.calData =
582 &adc_gain_cal_single_sample;
583 ah->adcdc_caldata.calData =
584 &adc_dc_cal_single_sample;
585 ah->adcdc_calinitdata.calData =
586 &adc_init_dc_cal;
587 } else {
588 ah->iq_caldata.calData = &iq_cal_multi_sample;
589 ah->adcgain_caldata.calData =
590 &adc_gain_cal_multi_sample;
591 ah->adcdc_caldata.calData =
592 &adc_dc_cal_multi_sample;
593 ah->adcdc_calinitdata.calData =
594 &adc_init_dc_cal;
595 }
596 ah->supp_cals = ADC_GAIN_CAL | ADC_DC_CAL | IQ_MISMATCH_CAL;
597 }
598}
599
600static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
601{
602 if (AR_SREV_9271(ah)) {
603 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
604 ARRAY_SIZE(ar9271Modes_9271), 6);
605 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
606 ARRAY_SIZE(ar9271Common_9271), 2);
607 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
608 ar9271Modes_9271_1_0_only,
609 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
610 return;
611 }
612
613 if (AR_SREV_9287_11_OR_LATER(ah)) {
614 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_1,
615 ARRAY_SIZE(ar9287Modes_9287_1_1), 6);
616 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_1,
617 ARRAY_SIZE(ar9287Common_9287_1_1), 2);
618 if (ah->config.pcie_clock_req)
619 INIT_INI_ARRAY(&ah->iniPcieSerdes,
620 ar9287PciePhy_clkreq_off_L1_9287_1_1,
621 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_1), 2);
622 else
623 INIT_INI_ARRAY(&ah->iniPcieSerdes,
624 ar9287PciePhy_clkreq_always_on_L1_9287_1_1,
625 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_1),
626 2);
627 } else if (AR_SREV_9287_10_OR_LATER(ah)) {
628 INIT_INI_ARRAY(&ah->iniModes, ar9287Modes_9287_1_0,
629 ARRAY_SIZE(ar9287Modes_9287_1_0), 6);
630 INIT_INI_ARRAY(&ah->iniCommon, ar9287Common_9287_1_0,
631 ARRAY_SIZE(ar9287Common_9287_1_0), 2);
632
633 if (ah->config.pcie_clock_req)
634 INIT_INI_ARRAY(&ah->iniPcieSerdes,
635 ar9287PciePhy_clkreq_off_L1_9287_1_0,
636 ARRAY_SIZE(ar9287PciePhy_clkreq_off_L1_9287_1_0), 2);
637 else
638 INIT_INI_ARRAY(&ah->iniPcieSerdes,
639 ar9287PciePhy_clkreq_always_on_L1_9287_1_0,
640 ARRAY_SIZE(ar9287PciePhy_clkreq_always_on_L1_9287_1_0),
641 2);
642 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
643
644
645 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285_1_2,
646 ARRAY_SIZE(ar9285Modes_9285_1_2), 6);
647 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285_1_2,
648 ARRAY_SIZE(ar9285Common_9285_1_2), 2);
649
650 if (ah->config.pcie_clock_req) {
651 INIT_INI_ARRAY(&ah->iniPcieSerdes,
652 ar9285PciePhy_clkreq_off_L1_9285_1_2,
653 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285_1_2), 2);
654 } else {
655 INIT_INI_ARRAY(&ah->iniPcieSerdes,
656 ar9285PciePhy_clkreq_always_on_L1_9285_1_2,
657 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285_1_2),
658 2);
659 }
660 } else if (AR_SREV_9285_10_OR_LATER(ah)) {
661 INIT_INI_ARRAY(&ah->iniModes, ar9285Modes_9285,
662 ARRAY_SIZE(ar9285Modes_9285), 6);
663 INIT_INI_ARRAY(&ah->iniCommon, ar9285Common_9285,
664 ARRAY_SIZE(ar9285Common_9285), 2);
665
666 if (ah->config.pcie_clock_req) {
667 INIT_INI_ARRAY(&ah->iniPcieSerdes,
668 ar9285PciePhy_clkreq_off_L1_9285,
669 ARRAY_SIZE(ar9285PciePhy_clkreq_off_L1_9285), 2);
670 } else {
671 INIT_INI_ARRAY(&ah->iniPcieSerdes,
672 ar9285PciePhy_clkreq_always_on_L1_9285,
673 ARRAY_SIZE(ar9285PciePhy_clkreq_always_on_L1_9285), 2);
674 }
675 } else if (AR_SREV_9280_20_OR_LATER(ah)) {
676 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280_2,
677 ARRAY_SIZE(ar9280Modes_9280_2), 6);
678 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280_2,
679 ARRAY_SIZE(ar9280Common_9280_2), 2);
680
681 if (ah->config.pcie_clock_req) {
682 INIT_INI_ARRAY(&ah->iniPcieSerdes,
683 ar9280PciePhy_clkreq_off_L1_9280,
684 ARRAY_SIZE(ar9280PciePhy_clkreq_off_L1_9280),2);
685 } else {
686 INIT_INI_ARRAY(&ah->iniPcieSerdes,
687 ar9280PciePhy_clkreq_always_on_L1_9280,
688 ARRAY_SIZE(ar9280PciePhy_clkreq_always_on_L1_9280), 2);
689 }
690 INIT_INI_ARRAY(&ah->iniModesAdditional,
691 ar9280Modes_fast_clock_9280_2,
692 ARRAY_SIZE(ar9280Modes_fast_clock_9280_2), 3);
693 } else if (AR_SREV_9280_10_OR_LATER(ah)) {
694 INIT_INI_ARRAY(&ah->iniModes, ar9280Modes_9280,
695 ARRAY_SIZE(ar9280Modes_9280), 6);
696 INIT_INI_ARRAY(&ah->iniCommon, ar9280Common_9280,
697 ARRAY_SIZE(ar9280Common_9280), 2);
698 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
699 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9160,
700 ARRAY_SIZE(ar5416Modes_9160), 6);
701 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9160,
702 ARRAY_SIZE(ar5416Common_9160), 2);
703 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9160,
704 ARRAY_SIZE(ar5416Bank0_9160), 2);
705 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9160,
706 ARRAY_SIZE(ar5416BB_RfGain_9160), 3);
707 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9160,
708 ARRAY_SIZE(ar5416Bank1_9160), 2);
709 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9160,
710 ARRAY_SIZE(ar5416Bank2_9160), 2);
711 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9160,
712 ARRAY_SIZE(ar5416Bank3_9160), 3);
713 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9160,
714 ARRAY_SIZE(ar5416Bank6_9160), 3);
715 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9160,
716 ARRAY_SIZE(ar5416Bank6TPC_9160), 3);
717 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9160,
718 ARRAY_SIZE(ar5416Bank7_9160), 2);
719 if (AR_SREV_9160_11(ah)) {
720 INIT_INI_ARRAY(&ah->iniAddac,
721 ar5416Addac_91601_1,
722 ARRAY_SIZE(ar5416Addac_91601_1), 2);
723 } else {
724 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9160,
725 ARRAY_SIZE(ar5416Addac_9160), 2);
726 }
727 } else if (AR_SREV_9100_OR_LATER(ah)) {
728 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes_9100,
729 ARRAY_SIZE(ar5416Modes_9100), 6);
730 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common_9100,
731 ARRAY_SIZE(ar5416Common_9100), 2);
732 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0_9100,
733 ARRAY_SIZE(ar5416Bank0_9100), 2);
734 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain_9100,
735 ARRAY_SIZE(ar5416BB_RfGain_9100), 3);
736 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1_9100,
737 ARRAY_SIZE(ar5416Bank1_9100), 2);
738 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2_9100,
739 ARRAY_SIZE(ar5416Bank2_9100), 2);
740 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3_9100,
741 ARRAY_SIZE(ar5416Bank3_9100), 3);
742 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6_9100,
743 ARRAY_SIZE(ar5416Bank6_9100), 3);
744 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC_9100,
745 ARRAY_SIZE(ar5416Bank6TPC_9100), 3);
746 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7_9100,
747 ARRAY_SIZE(ar5416Bank7_9100), 2);
748 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac_9100,
749 ARRAY_SIZE(ar5416Addac_9100), 2);
750 } else {
751 INIT_INI_ARRAY(&ah->iniModes, ar5416Modes,
752 ARRAY_SIZE(ar5416Modes), 6);
753 INIT_INI_ARRAY(&ah->iniCommon, ar5416Common,
754 ARRAY_SIZE(ar5416Common), 2);
755 INIT_INI_ARRAY(&ah->iniBank0, ar5416Bank0,
756 ARRAY_SIZE(ar5416Bank0), 2);
757 INIT_INI_ARRAY(&ah->iniBB_RfGain, ar5416BB_RfGain,
758 ARRAY_SIZE(ar5416BB_RfGain), 3);
759 INIT_INI_ARRAY(&ah->iniBank1, ar5416Bank1,
760 ARRAY_SIZE(ar5416Bank1), 2);
761 INIT_INI_ARRAY(&ah->iniBank2, ar5416Bank2,
762 ARRAY_SIZE(ar5416Bank2), 2);
763 INIT_INI_ARRAY(&ah->iniBank3, ar5416Bank3,
764 ARRAY_SIZE(ar5416Bank3), 3);
765 INIT_INI_ARRAY(&ah->iniBank6, ar5416Bank6,
766 ARRAY_SIZE(ar5416Bank6), 3);
767 INIT_INI_ARRAY(&ah->iniBank6TPC, ar5416Bank6TPC,
768 ARRAY_SIZE(ar5416Bank6TPC), 3);
769 INIT_INI_ARRAY(&ah->iniBank7, ar5416Bank7,
770 ARRAY_SIZE(ar5416Bank7), 2);
771 INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
772 ARRAY_SIZE(ar5416Addac), 2);
773 }
774}
775
776static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
777{
778 if (AR_SREV_9287_11_OR_LATER(ah))
779 INIT_INI_ARRAY(&ah->iniModesRxGain,
780 ar9287Modes_rx_gain_9287_1_1,
781 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_1), 6);
782 else if (AR_SREV_9287_10(ah))
783 INIT_INI_ARRAY(&ah->iniModesRxGain,
784 ar9287Modes_rx_gain_9287_1_0,
785 ARRAY_SIZE(ar9287Modes_rx_gain_9287_1_0), 6);
786 else if (AR_SREV_9280_20(ah))
787 ath9k_hw_init_rxgain_ini(ah);
788
789 if (AR_SREV_9287_11_OR_LATER(ah)) {
790 INIT_INI_ARRAY(&ah->iniModesTxGain,
791 ar9287Modes_tx_gain_9287_1_1,
792 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_1), 6);
793 } else if (AR_SREV_9287_10(ah)) {
794 INIT_INI_ARRAY(&ah->iniModesTxGain,
795 ar9287Modes_tx_gain_9287_1_0,
796 ARRAY_SIZE(ar9287Modes_tx_gain_9287_1_0), 6);
797 } else if (AR_SREV_9280_20(ah)) {
798 ath9k_hw_init_txgain_ini(ah);
799 } else if (AR_SREV_9285_12_OR_LATER(ah)) {
800 u32 txgain_type = ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE);
801
802 /* txgain table */
803 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
804 INIT_INI_ARRAY(&ah->iniModesTxGain,
805 ar9285Modes_high_power_tx_gain_9285_1_2,
806 ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6);
807 } else {
808 INIT_INI_ARRAY(&ah->iniModesTxGain,
809 ar9285Modes_original_tx_gain_9285_1_2,
810 ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6);
811 }
812
813 }
814}
815
816static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
817{
818 u32 i, j;
819
820 if (ah->hw_version.devid == AR9280_DEVID_PCI) {
821
822 /* EEPROM Fixup */
823 for (i = 0; i < ah->iniModes.ia_rows; i++) {
824 u32 reg = INI_RA(&ah->iniModes, i, 0);
825
826 for (j = 1; j < ah->iniModes.ia_columns; j++) {
827 u32 val = INI_RA(&ah->iniModes, i, j);
828
829 INI_RA(&ah->iniModes, i, j) =
830 ath9k_hw_ini_fixup(ah,
831 &ah->eeprom.def,
832 reg, val);
833 }
834 }
835 }
836} 511}
837 512
838int ath9k_hw_init(struct ath_hw *ah) 513/* Called for all hardware families */
514static int __ath9k_hw_init(struct ath_hw *ah)
839{ 515{
840 struct ath_common *common = ath9k_hw_common(ah); 516 struct ath_common *common = ath9k_hw_common(ah);
841 int r = 0; 517 int r = 0;
842 518
843 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) { 519 if (ah->hw_version.devid == AR5416_AR9100_DEVID)
844 ath_print(common, ATH_DBG_FATAL, 520 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
845 "Unsupported device ID: 0x%0x\n",
846 ah->hw_version.devid);
847 return -EOPNOTSUPP;
848 }
849
850 ath9k_hw_init_defaults(ah);
851 ath9k_hw_init_config(ah);
852 521
853 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 522 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
854 ath_print(common, ATH_DBG_FATAL, 523 ath_print(common, ATH_DBG_FATAL,
@@ -856,6 +525,11 @@ int ath9k_hw_init(struct ath_hw *ah)
856 return -EIO; 525 return -EIO;
857 } 526 }
858 527
528 ath9k_hw_init_defaults(ah);
529 ath9k_hw_init_config(ah);
530
531 ath9k_hw_attach_ops(ah);
532
859 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 533 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
860 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); 534 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
861 return -EIO; 535 return -EIO;
@@ -880,7 +554,7 @@ int ath9k_hw_init(struct ath_hw *ah)
880 else 554 else
881 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; 555 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
882 556
883 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { 557 if (!ath9k_hw_macversion_supported(ah)) {
884 ath_print(common, ATH_DBG_FATAL, 558 ath_print(common, ATH_DBG_FATAL,
885 "Mac Chip Rev 0x%02x.%x is not supported by " 559 "Mac Chip Rev 0x%02x.%x is not supported by "
886 "this driver\n", ah->hw_version.macVersion, 560 "this driver\n", ah->hw_version.macVersion,
@@ -888,45 +562,45 @@ int ath9k_hw_init(struct ath_hw *ah)
888 return -EOPNOTSUPP; 562 return -EOPNOTSUPP;
889 } 563 }
890 564
891 if (AR_SREV_9100(ah)) { 565 if (AR_SREV_9271(ah) || AR_SREV_9100(ah))
892 ah->iq_caldata.calData = &iq_cal_multi_sample;
893 ah->supp_cals = IQ_MISMATCH_CAL;
894 ah->is_pciexpress = false;
895 }
896
897 if (AR_SREV_9271(ah))
898 ah->is_pciexpress = false; 566 ah->is_pciexpress = false;
899 567
900 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID); 568 ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
901
902 ath9k_hw_init_cal_settings(ah); 569 ath9k_hw_init_cal_settings(ah);
903 570
904 ah->ani_function = ATH9K_ANI_ALL; 571 ah->ani_function = ATH9K_ANI_ALL;
905 if (AR_SREV_9280_10_OR_LATER(ah)) { 572 if (AR_SREV_9280_10_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
906 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 573 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
907 ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
908 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
909 } else {
910 ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
911 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
912 }
913 574
914 ath9k_hw_init_mode_regs(ah); 575 ath9k_hw_init_mode_regs(ah);
915 576
577 /*
578 * Configire PCIE after Ini init. SERDES values now come from ini file
579 * This enables PCIe low power mode.
580 */
581 if (AR_SREV_9300_20_OR_LATER(ah)) {
582 u32 regval;
583 unsigned int i;
584
585 /* Set Bits 16 and 17 in the AR_WA register. */
586 regval = REG_READ(ah, AR_WA);
587 regval |= 0x00030000;
588 REG_WRITE(ah, AR_WA, regval);
589
590 for (i = 0; i < ah->iniPcieSerdesLowPower.ia_rows; i++) {
591 REG_WRITE(ah,
592 INI_RA(&ah->iniPcieSerdesLowPower, i, 0),
593 INI_RA(&ah->iniPcieSerdesLowPower, i, 1));
594 }
595 }
596
916 if (ah->is_pciexpress) 597 if (ah->is_pciexpress)
917 ath9k_hw_configpcipowersave(ah, 0, 0); 598 ath9k_hw_configpcipowersave(ah, 0, 0);
918 else 599 else
919 ath9k_hw_disablepcie(ah); 600 ath9k_hw_disablepcie(ah);
920 601
921 /* Support for Japan ch.14 (2484) spread */ 602 if (!AR_SREV_9300_20_OR_LATER(ah))
922 if (AR_SREV_9287_11_OR_LATER(ah)) { 603 ar9002_hw_cck_chan14_spread(ah);
923 INIT_INI_ARRAY(&ah->iniCckfirNormal,
924 ar9287Common_normal_cck_fir_coeff_92871_1,
925 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
926 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
927 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
928 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
929 }
930 604
931 r = ath9k_hw_post_init(ah); 605 r = ath9k_hw_post_init(ah);
932 if (r) 606 if (r)
@@ -937,8 +611,6 @@ int ath9k_hw_init(struct ath_hw *ah)
937 if (r) 611 if (r)
938 return r; 612 return r;
939 613
940 ath9k_hw_init_eeprom_fix(ah);
941
942 r = ath9k_hw_init_macaddr(ah); 614 r = ath9k_hw_init_macaddr(ah);
943 if (r) { 615 if (r) {
944 ath_print(common, ATH_DBG_FATAL, 616 ath_print(common, ATH_DBG_FATAL,
@@ -951,6 +623,9 @@ int ath9k_hw_init(struct ath_hw *ah)
951 else 623 else
952 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S); 624 ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
953 625
626 if (AR_SREV_9300_20_OR_LATER(ah))
627 ar9003_hw_set_nf_limits(ah);
628
954 ath9k_init_nfcal_hist_buffer(ah); 629 ath9k_init_nfcal_hist_buffer(ah);
955 630
956 common->state = ATH_HW_INITIALIZED; 631 common->state = ATH_HW_INITIALIZED;
@@ -958,24 +633,50 @@ int ath9k_hw_init(struct ath_hw *ah)
958 return 0; 633 return 0;
959} 634}
960 635
961static void ath9k_hw_init_bb(struct ath_hw *ah, 636int ath9k_hw_init(struct ath_hw *ah)
962 struct ath9k_channel *chan)
963{ 637{
964 u32 synthDelay; 638 int ret;
639 struct ath_common *common = ath9k_hw_common(ah);
965 640
966 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 641 /* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
967 if (IS_CHAN_B(chan)) 642 switch (ah->hw_version.devid) {
968 synthDelay = (4 * synthDelay) / 22; 643 case AR5416_DEVID_PCI:
969 else 644 case AR5416_DEVID_PCIE:
970 synthDelay /= 10; 645 case AR5416_AR9100_DEVID:
646 case AR9160_DEVID_PCI:
647 case AR9280_DEVID_PCI:
648 case AR9280_DEVID_PCIE:
649 case AR9285_DEVID_PCIE:
650 case AR9287_DEVID_PCI:
651 case AR9287_DEVID_PCIE:
652 case AR2427_DEVID_PCIE:
653 case AR9300_DEVID_PCIE:
654 break;
655 default:
656 if (common->bus_ops->ath_bus_type == ATH_USB)
657 break;
658 ath_print(common, ATH_DBG_FATAL,
659 "Hardware device ID 0x%04x not supported\n",
660 ah->hw_version.devid);
661 return -EOPNOTSUPP;
662 }
971 663
972 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 664 ret = __ath9k_hw_init(ah);
665 if (ret) {
666 ath_print(common, ATH_DBG_FATAL,
667 "Unable to initialize hardware; "
668 "initialization status: %d\n", ret);
669 return ret;
670 }
973 671
974 udelay(synthDelay + BASE_ACTIVATE_DELAY); 672 return 0;
975} 673}
674EXPORT_SYMBOL(ath9k_hw_init);
976 675
977static void ath9k_hw_init_qos(struct ath_hw *ah) 676static void ath9k_hw_init_qos(struct ath_hw *ah)
978{ 677{
678 ENABLE_REGWRITE_BUFFER(ah);
679
979 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa); 680 REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
980 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210); 681 REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
981 682
@@ -989,105 +690,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
989 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF); 690 REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
990 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF); 691 REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
991 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 692 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
992}
993
994static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
995{
996 u32 lcr;
997 u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
998
999 lcr = REG_READ(ah , 0x5100c);
1000 lcr |= 0x80;
1001 693
1002 REG_WRITE(ah, 0x5100c, lcr); 694 REGWRITE_BUFFER_FLUSH(ah);
1003 REG_WRITE(ah, 0x51004, (baud_divider >> 8)); 695 DISABLE_REGWRITE_BUFFER(ah);
1004 REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
1005
1006 lcr &= ~0x80;
1007 REG_WRITE(ah, 0x5100c, lcr);
1008} 696}
1009 697
1010static void ath9k_hw_init_pll(struct ath_hw *ah, 698static void ath9k_hw_init_pll(struct ath_hw *ah,
1011 struct ath9k_channel *chan) 699 struct ath9k_channel *chan)
1012{ 700{
1013 u32 pll; 701 u32 pll = ath9k_hw_compute_pll_control(ah, chan);
1014
1015 if (AR_SREV_9100(ah)) {
1016 if (chan && IS_CHAN_5GHZ(chan))
1017 pll = 0x1450;
1018 else
1019 pll = 0x1458;
1020 } else {
1021 if (AR_SREV_9280_10_OR_LATER(ah)) {
1022 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1023
1024 if (chan && IS_CHAN_HALF_RATE(chan))
1025 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1026 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1027 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1028
1029 if (chan && IS_CHAN_5GHZ(chan)) {
1030 pll |= SM(0x28, AR_RTC_9160_PLL_DIV);
1031
1032
1033 if (AR_SREV_9280_20(ah)) {
1034 if (((chan->channel % 20) == 0)
1035 || ((chan->channel % 10) == 0))
1036 pll = 0x2850;
1037 else
1038 pll = 0x142c;
1039 }
1040 } else {
1041 pll |= SM(0x2c, AR_RTC_9160_PLL_DIV);
1042 }
1043
1044 } else if (AR_SREV_9160_10_OR_LATER(ah)) {
1045
1046 pll = SM(0x5, AR_RTC_9160_PLL_REFDIV);
1047
1048 if (chan && IS_CHAN_HALF_RATE(chan))
1049 pll |= SM(0x1, AR_RTC_9160_PLL_CLKSEL);
1050 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1051 pll |= SM(0x2, AR_RTC_9160_PLL_CLKSEL);
1052
1053 if (chan && IS_CHAN_5GHZ(chan))
1054 pll |= SM(0x50, AR_RTC_9160_PLL_DIV);
1055 else
1056 pll |= SM(0x58, AR_RTC_9160_PLL_DIV);
1057 } else {
1058 pll = AR_RTC_PLL_REFDIV_5 | AR_RTC_PLL_DIV2;
1059
1060 if (chan && IS_CHAN_HALF_RATE(chan))
1061 pll |= SM(0x1, AR_RTC_PLL_CLKSEL);
1062 else if (chan && IS_CHAN_QUARTER_RATE(chan))
1063 pll |= SM(0x2, AR_RTC_PLL_CLKSEL);
1064 702
1065 if (chan && IS_CHAN_5GHZ(chan))
1066 pll |= SM(0xa, AR_RTC_PLL_DIV);
1067 else
1068 pll |= SM(0xb, AR_RTC_PLL_DIV);
1069 }
1070 }
1071 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 703 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1072 704
1073 /* Switch the core clock for ar9271 to 117Mhz */ 705 /* Switch the core clock for ar9271 to 117Mhz */
1074 if (AR_SREV_9271(ah)) { 706 if (AR_SREV_9271(ah)) {
1075 if ((pll == 0x142c) || (pll == 0x2850) ) { 707 udelay(500);
1076 udelay(500); 708 REG_WRITE(ah, 0x50040, 0x304);
1077 /* set CLKOBS to output AHB clock */
1078 REG_WRITE(ah, 0x7020, 0xe);
1079 /*
1080 * 0x304: 117Mhz, ahb_ratio: 1x1
1081 * 0x306: 40Mhz, ahb_ratio: 1x1
1082 */
1083 REG_WRITE(ah, 0x50040, 0x304);
1084 /*
1085 * makes adjustments for the baud dividor to keep the
1086 * targetted baud rate based on the used core clock.
1087 */
1088 ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
1089 AR9271_TARGET_BAUD_RATE);
1090 }
1091 } 709 }
1092 710
1093 udelay(RTC_PLL_SETTLE_DELAY); 711 udelay(RTC_PLL_SETTLE_DELAY);
@@ -1095,70 +713,58 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
1095 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 713 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
1096} 714}
1097 715
1098static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
1099{
1100 int rx_chainmask, tx_chainmask;
1101
1102 rx_chainmask = ah->rxchainmask;
1103 tx_chainmask = ah->txchainmask;
1104
1105 switch (rx_chainmask) {
1106 case 0x5:
1107 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1108 AR_PHY_SWAP_ALT_CHAIN);
1109 case 0x3:
1110 if (ah->hw_version.macVersion == AR_SREV_REVISION_5416_10) {
1111 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, 0x7);
1112 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7);
1113 break;
1114 }
1115 case 0x1:
1116 case 0x2:
1117 case 0x7:
1118 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
1119 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
1120 break;
1121 default:
1122 break;
1123 }
1124
1125 REG_WRITE(ah, AR_SELFGEN_MASK, tx_chainmask);
1126 if (tx_chainmask == 0x5) {
1127 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
1128 AR_PHY_SWAP_ALT_CHAIN);
1129 }
1130 if (AR_SREV_9100(ah))
1131 REG_WRITE(ah, AR_PHY_ANALOG_SWAP,
1132 REG_READ(ah, AR_PHY_ANALOG_SWAP) | 0x00000001);
1133}
1134
1135static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 716static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1136 enum nl80211_iftype opmode) 717 enum nl80211_iftype opmode)
1137{ 718{
1138 ah->mask_reg = AR_IMR_TXERR | 719 u32 imr_reg = AR_IMR_TXERR |
1139 AR_IMR_TXURN | 720 AR_IMR_TXURN |
1140 AR_IMR_RXERR | 721 AR_IMR_RXERR |
1141 AR_IMR_RXORN | 722 AR_IMR_RXORN |
1142 AR_IMR_BCNMISC; 723 AR_IMR_BCNMISC;
1143 724
1144 if (ah->config.rx_intr_mitigation) 725 if (AR_SREV_9300_20_OR_LATER(ah)) {
1145 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 726 imr_reg |= AR_IMR_RXOK_HP;
1146 else 727 if (ah->config.rx_intr_mitigation)
1147 ah->mask_reg |= AR_IMR_RXOK; 728 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
729 else
730 imr_reg |= AR_IMR_RXOK_LP;
1148 731
1149 ah->mask_reg |= AR_IMR_TXOK; 732 } else {
733 if (ah->config.rx_intr_mitigation)
734 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
735 else
736 imr_reg |= AR_IMR_RXOK;
737 }
738
739 if (ah->config.tx_intr_mitigation)
740 imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
741 else
742 imr_reg |= AR_IMR_TXOK;
1150 743
1151 if (opmode == NL80211_IFTYPE_AP) 744 if (opmode == NL80211_IFTYPE_AP)
1152 ah->mask_reg |= AR_IMR_MIB; 745 imr_reg |= AR_IMR_MIB;
746
747 ENABLE_REGWRITE_BUFFER(ah);
1153 748
1154 REG_WRITE(ah, AR_IMR, ah->mask_reg); 749 REG_WRITE(ah, AR_IMR, imr_reg);
1155 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); 750 ah->imrs2_reg |= AR_IMR_S2_GTT;
751 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1156 752
1157 if (!AR_SREV_9100(ah)) { 753 if (!AR_SREV_9100(ah)) {
1158 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); 754 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
1159 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT); 755 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, AR_INTR_SYNC_DEFAULT);
1160 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0); 756 REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
1161 } 757 }
758
759 REGWRITE_BUFFER_FLUSH(ah);
760 DISABLE_REGWRITE_BUFFER(ah);
761
762 if (AR_SREV_9300_20_OR_LATER(ah)) {
763 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
764 REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
765 REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
766 REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
767 }
1162} 768}
1163 769
1164static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us) 770static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
@@ -1241,19 +847,13 @@ void ath9k_hw_deinit(struct ath_hw *ah)
1241{ 847{
1242 struct ath_common *common = ath9k_hw_common(ah); 848 struct ath_common *common = ath9k_hw_common(ah);
1243 849
1244 if (common->state <= ATH_HW_INITIALIZED) 850 if (common->state < ATH_HW_INITIALIZED)
1245 goto free_hw; 851 goto free_hw;
1246 852
1247 if (!AR_SREV_9100(ah))
1248 ath9k_hw_ani_disable(ah);
1249
1250 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 853 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1251 854
1252free_hw: 855free_hw:
1253 if (!AR_SREV_9280_10_OR_LATER(ah)) 856 ath9k_hw_rf_free_ext_banks(ah);
1254 ath9k_hw_rf_free_ext_banks(ah);
1255 kfree(ah);
1256 ah = NULL;
1257} 857}
1258EXPORT_SYMBOL(ath9k_hw_deinit); 858EXPORT_SYMBOL(ath9k_hw_deinit);
1259 859
@@ -1261,136 +861,7 @@ EXPORT_SYMBOL(ath9k_hw_deinit);
1261/* INI */ 861/* INI */
1262/*******/ 862/*******/
1263 863
1264static void ath9k_hw_override_ini(struct ath_hw *ah, 864u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
1265 struct ath9k_channel *chan)
1266{
1267 u32 val;
1268
1269 if (AR_SREV_9271(ah)) {
1270 /*
1271 * Enable spectral scan to solution for issues with stuck
1272 * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
1273 * AR9271 1.1
1274 */
1275 if (AR_SREV_9271_10(ah)) {
1276 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
1277 AR_PHY_SPECTRAL_SCAN_ENABLE;
1278 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
1279 }
1280 else if (AR_SREV_9271_11(ah))
1281 /*
1282 * change AR_PHY_RF_CTL3 setting to fix MAC issue
1283 * present on AR9271 1.1
1284 */
1285 REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
1286 return;
1287 }
1288
1289 /*
1290 * Set the RX_ABORT and RX_DIS and clear if off only after
1291 * RXE is set for MAC. This prevents frames with corrupted
1292 * descriptor status.
1293 */
1294 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1295
1296 if (AR_SREV_9280_10_OR_LATER(ah)) {
1297 val = REG_READ(ah, AR_PCU_MISC_MODE2) &
1298 (~AR_PCU_MISC_MODE2_HWWAR1);
1299
1300 if (AR_SREV_9287_10_OR_LATER(ah))
1301 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
1302
1303 REG_WRITE(ah, AR_PCU_MISC_MODE2, val);
1304 }
1305
1306 if (!AR_SREV_5416_20_OR_LATER(ah) ||
1307 AR_SREV_9280_10_OR_LATER(ah))
1308 return;
1309 /*
1310 * Disable BB clock gating
1311 * Necessary to avoid issues on AR5416 2.0
1312 */
1313 REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
1314
1315 /*
1316 * Disable RIFS search on some chips to avoid baseband
1317 * hang issues.
1318 */
1319 if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
1320 val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
1321 val &= ~AR_PHY_RIFS_INIT_DELAY;
1322 REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
1323 }
1324}
1325
1326static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
1327 struct ar5416_eeprom_def *pEepData,
1328 u32 reg, u32 value)
1329{
1330 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
1331 struct ath_common *common = ath9k_hw_common(ah);
1332
1333 switch (ah->hw_version.devid) {
1334 case AR9280_DEVID_PCI:
1335 if (reg == 0x7894) {
1336 ath_print(common, ATH_DBG_EEPROM,
1337 "ini VAL: %x EEPROM: %x\n", value,
1338 (pBase->version & 0xff));
1339
1340 if ((pBase->version & 0xff) > 0x0a) {
1341 ath_print(common, ATH_DBG_EEPROM,
1342 "PWDCLKIND: %d\n",
1343 pBase->pwdclkind);
1344 value &= ~AR_AN_TOP2_PWDCLKIND;
1345 value |= AR_AN_TOP2_PWDCLKIND &
1346 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
1347 } else {
1348 ath_print(common, ATH_DBG_EEPROM,
1349 "PWDCLKIND Earlier Rev\n");
1350 }
1351
1352 ath_print(common, ATH_DBG_EEPROM,
1353 "final ini VAL: %x\n", value);
1354 }
1355 break;
1356 }
1357
1358 return value;
1359}
1360
1361static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
1362 struct ar5416_eeprom_def *pEepData,
1363 u32 reg, u32 value)
1364{
1365 if (ah->eep_map == EEP_MAP_4KBITS)
1366 return value;
1367 else
1368 return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
1369}
1370
1371static void ath9k_olc_init(struct ath_hw *ah)
1372{
1373 u32 i;
1374
1375 if (OLC_FOR_AR9287_10_LATER) {
1376 REG_SET_BIT(ah, AR_PHY_TX_PWRCTRL9,
1377 AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL);
1378 ath9k_hw_analog_shift_rmw(ah, AR9287_AN_TXPC0,
1379 AR9287_AN_TXPC0_TXPCMODE,
1380 AR9287_AN_TXPC0_TXPCMODE_S,
1381 AR9287_AN_TXPC0_TXPCMODE_TEMPSENSE);
1382 udelay(100);
1383 } else {
1384 for (i = 0; i < AR9280_TX_GAIN_TABLE_SIZE; i++)
1385 ah->originalGain[i] =
1386 MS(REG_READ(ah, AR_PHY_TX_GAIN_TBL1 + i * 4),
1387 AR_PHY_TX_GAIN);
1388 ah->PDADCdelta = 0;
1389 }
1390}
1391
1392static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1393 struct ath9k_channel *chan)
1394{ 865{
1395 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band); 866 u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
1396 867
@@ -1404,173 +875,24 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1404 return ctl; 875 return ctl;
1405} 876}
1406 877
1407static int ath9k_hw_process_ini(struct ath_hw *ah,
1408 struct ath9k_channel *chan)
1409{
1410 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1411 int i, regWrites = 0;
1412 struct ieee80211_channel *channel = chan->chan;
1413 u32 modesIndex, freqIndex;
1414
1415 switch (chan->chanmode) {
1416 case CHANNEL_A:
1417 case CHANNEL_A_HT20:
1418 modesIndex = 1;
1419 freqIndex = 1;
1420 break;
1421 case CHANNEL_A_HT40PLUS:
1422 case CHANNEL_A_HT40MINUS:
1423 modesIndex = 2;
1424 freqIndex = 1;
1425 break;
1426 case CHANNEL_G:
1427 case CHANNEL_G_HT20:
1428 case CHANNEL_B:
1429 modesIndex = 4;
1430 freqIndex = 2;
1431 break;
1432 case CHANNEL_G_HT40PLUS:
1433 case CHANNEL_G_HT40MINUS:
1434 modesIndex = 3;
1435 freqIndex = 2;
1436 break;
1437
1438 default:
1439 return -EINVAL;
1440 }
1441
1442 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1443 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
1444 ah->eep_ops->set_addac(ah, chan);
1445
1446 if (AR_SREV_5416_22_OR_LATER(ah)) {
1447 REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
1448 } else {
1449 struct ar5416IniArray temp;
1450 u32 addacSize =
1451 sizeof(u32) * ah->iniAddac.ia_rows *
1452 ah->iniAddac.ia_columns;
1453
1454 memcpy(ah->addac5416_21,
1455 ah->iniAddac.ia_array, addacSize);
1456
1457 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
1458
1459 temp.ia_array = ah->addac5416_21;
1460 temp.ia_columns = ah->iniAddac.ia_columns;
1461 temp.ia_rows = ah->iniAddac.ia_rows;
1462 REG_WRITE_ARRAY(&temp, 1, regWrites);
1463 }
1464
1465 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
1466
1467 for (i = 0; i < ah->iniModes.ia_rows; i++) {
1468 u32 reg = INI_RA(&ah->iniModes, i, 0);
1469 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
1470
1471 REG_WRITE(ah, reg, val);
1472
1473 if (reg >= 0x7800 && reg < 0x78a0
1474 && ah->config.analog_shiftreg) {
1475 udelay(100);
1476 }
1477
1478 DO_DELAY(regWrites);
1479 }
1480
1481 if (AR_SREV_9280(ah) || AR_SREV_9287_10_OR_LATER(ah))
1482 REG_WRITE_ARRAY(&ah->iniModesRxGain, modesIndex, regWrites);
1483
1484 if (AR_SREV_9280(ah) || AR_SREV_9285_12_OR_LATER(ah) ||
1485 AR_SREV_9287_10_OR_LATER(ah))
1486 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1487
1488 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
1489 u32 reg = INI_RA(&ah->iniCommon, i, 0);
1490 u32 val = INI_RA(&ah->iniCommon, i, 1);
1491
1492 REG_WRITE(ah, reg, val);
1493
1494 if (reg >= 0x7800 && reg < 0x78a0
1495 && ah->config.analog_shiftreg) {
1496 udelay(100);
1497 }
1498
1499 DO_DELAY(regWrites);
1500 }
1501
1502 ath9k_hw_write_regs(ah, freqIndex, regWrites);
1503
1504 if (AR_SREV_9271_10(ah))
1505 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1506 modesIndex, regWrites);
1507
1508 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1509 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
1510 regWrites);
1511 }
1512
1513 ath9k_hw_override_ini(ah, chan);
1514 ath9k_hw_set_regs(ah, chan);
1515 ath9k_hw_init_chain_masks(ah);
1516
1517 if (OLC_FOR_AR9280_20_LATER)
1518 ath9k_olc_init(ah);
1519
1520 ah->eep_ops->set_txpower(ah, chan,
1521 ath9k_regd_get_ctl(regulatory, chan),
1522 channel->max_antenna_gain * 2,
1523 channel->max_power * 2,
1524 min((u32) MAX_RATE_POWER,
1525 (u32) regulatory->power_limit));
1526
1527 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
1528 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1529 "ar5416SetRfRegs failed\n");
1530 return -EIO;
1531 }
1532
1533 return 0;
1534}
1535
1536/****************************************/ 878/****************************************/
1537/* Reset and Channel Switching Routines */ 879/* Reset and Channel Switching Routines */
1538/****************************************/ 880/****************************************/
1539 881
1540static void ath9k_hw_set_rfmode(struct ath_hw *ah, struct ath9k_channel *chan)
1541{
1542 u32 rfMode = 0;
1543
1544 if (chan == NULL)
1545 return;
1546
1547 rfMode |= (IS_CHAN_B(chan) || IS_CHAN_G(chan))
1548 ? AR_PHY_MODE_DYNAMIC : AR_PHY_MODE_OFDM;
1549
1550 if (!AR_SREV_9280_10_OR_LATER(ah))
1551 rfMode |= (IS_CHAN_5GHZ(chan)) ?
1552 AR_PHY_MODE_RF5GHZ : AR_PHY_MODE_RF2GHZ;
1553
1554 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan))
1555 rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
1556
1557 REG_WRITE(ah, AR_PHY_MODE, rfMode);
1558}
1559
1560static void ath9k_hw_mark_phy_inactive(struct ath_hw *ah)
1561{
1562 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
1563}
1564
1565static inline void ath9k_hw_set_dma(struct ath_hw *ah) 882static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1566{ 883{
884 struct ath_common *common = ath9k_hw_common(ah);
1567 u32 regval; 885 u32 regval;
1568 886
887 ENABLE_REGWRITE_BUFFER(ah);
888
1569 /* 889 /*
1570 * set AHB_MODE not to do cacheline prefetches 890 * set AHB_MODE not to do cacheline prefetches
1571 */ 891 */
1572 regval = REG_READ(ah, AR_AHB_MODE); 892 if (!AR_SREV_9300_20_OR_LATER(ah)) {
1573 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN); 893 regval = REG_READ(ah, AR_AHB_MODE);
894 REG_WRITE(ah, AR_AHB_MODE, regval | AR_AHB_PREFETCH_RD_EN);
895 }
1574 896
1575 /* 897 /*
1576 * let mac dma reads be in 128 byte chunks 898 * let mac dma reads be in 128 byte chunks
@@ -1578,12 +900,18 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1578 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK; 900 regval = REG_READ(ah, AR_TXCFG) & ~AR_TXCFG_DMASZ_MASK;
1579 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B); 901 REG_WRITE(ah, AR_TXCFG, regval | AR_TXCFG_DMASZ_128B);
1580 902
903 REGWRITE_BUFFER_FLUSH(ah);
904 DISABLE_REGWRITE_BUFFER(ah);
905
1581 /* 906 /*
1582 * Restore TX Trigger Level to its pre-reset value. 907 * Restore TX Trigger Level to its pre-reset value.
1583 * The initial value depends on whether aggregation is enabled, and is 908 * The initial value depends on whether aggregation is enabled, and is
1584 * adjusted whenever underruns are detected. 909 * adjusted whenever underruns are detected.
1585 */ 910 */
1586 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level); 911 if (!AR_SREV_9300_20_OR_LATER(ah))
912 REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
913
914 ENABLE_REGWRITE_BUFFER(ah);
1587 915
1588 /* 916 /*
1589 * let mac dma writes be in 128 byte chunks 917 * let mac dma writes be in 128 byte chunks
@@ -1596,6 +924,14 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1596 */ 924 */
1597 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200); 925 REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
1598 926
927 if (AR_SREV_9300_20_OR_LATER(ah)) {
928 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
929 REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
930
931 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
932 ah->caps.rx_status_len);
933 }
934
1599 /* 935 /*
1600 * reduce the number of usable entries in PCU TXBUF to avoid 936 * reduce the number of usable entries in PCU TXBUF to avoid
1601 * wrap around issues. 937 * wrap around issues.
@@ -1611,6 +947,12 @@ static inline void ath9k_hw_set_dma(struct ath_hw *ah)
1611 REG_WRITE(ah, AR_PCU_TXBUF_CTRL, 947 REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
1612 AR_PCU_TXBUF_CTRL_USABLE_SIZE); 948 AR_PCU_TXBUF_CTRL_USABLE_SIZE);
1613 } 949 }
950
951 REGWRITE_BUFFER_FLUSH(ah);
952 DISABLE_REGWRITE_BUFFER(ah);
953
954 if (AR_SREV_9300_20_OR_LATER(ah))
955 ath9k_hw_reset_txstatus_ring(ah);
1614} 956}
1615 957
1616static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode) 958static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
@@ -1638,10 +980,8 @@ static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
1638 } 980 }
1639} 981}
1640 982
1641static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, 983void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
1642 u32 coef_scaled, 984 u32 *coef_mantissa, u32 *coef_exponent)
1643 u32 *coef_mantissa,
1644 u32 *coef_exponent)
1645{ 985{
1646 u32 coef_exp, coef_man; 986 u32 coef_exp, coef_man;
1647 987
@@ -1657,40 +997,6 @@ static inline void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah,
1657 *coef_exponent = coef_exp - 16; 997 *coef_exponent = coef_exp - 16;
1658} 998}
1659 999
1660static void ath9k_hw_set_delta_slope(struct ath_hw *ah,
1661 struct ath9k_channel *chan)
1662{
1663 u32 coef_scaled, ds_coef_exp, ds_coef_man;
1664 u32 clockMhzScaled = 0x64000000;
1665 struct chan_centers centers;
1666
1667 if (IS_CHAN_HALF_RATE(chan))
1668 clockMhzScaled = clockMhzScaled >> 1;
1669 else if (IS_CHAN_QUARTER_RATE(chan))
1670 clockMhzScaled = clockMhzScaled >> 2;
1671
1672 ath9k_hw_get_channel_centers(ah, chan, &centers);
1673 coef_scaled = clockMhzScaled / centers.synth_center;
1674
1675 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
1676 &ds_coef_exp);
1677
1678 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
1679 AR_PHY_TIMING3_DSC_MAN, ds_coef_man);
1680 REG_RMW_FIELD(ah, AR_PHY_TIMING3,
1681 AR_PHY_TIMING3_DSC_EXP, ds_coef_exp);
1682
1683 coef_scaled = (9 * coef_scaled) / 10;
1684
1685 ath9k_hw_get_delta_slope_vals(ah, coef_scaled, &ds_coef_man,
1686 &ds_coef_exp);
1687
1688 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
1689 AR_PHY_HALFGI_DSC_MAN, ds_coef_man);
1690 REG_RMW_FIELD(ah, AR_PHY_HALFGI,
1691 AR_PHY_HALFGI_DSC_EXP, ds_coef_exp);
1692}
1693
1694static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) 1000static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1695{ 1001{
1696 u32 rst_flags; 1002 u32 rst_flags;
@@ -1704,6 +1010,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1704 (void)REG_READ(ah, AR_RTC_DERIVED_CLK); 1010 (void)REG_READ(ah, AR_RTC_DERIVED_CLK);
1705 } 1011 }
1706 1012
1013 ENABLE_REGWRITE_BUFFER(ah);
1014
1707 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1015 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1708 AR_RTC_FORCE_WAKE_ON_INT); 1016 AR_RTC_FORCE_WAKE_ON_INT);
1709 1017
@@ -1715,11 +1023,16 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1715 if (tmpReg & 1023 if (tmpReg &
1716 (AR_INTR_SYNC_LOCAL_TIMEOUT | 1024 (AR_INTR_SYNC_LOCAL_TIMEOUT |
1717 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) { 1025 AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
1026 u32 val;
1718 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0); 1027 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
1719 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 1028
1720 } else { 1029 val = AR_RC_HOSTIF;
1030 if (!AR_SREV_9300_20_OR_LATER(ah))
1031 val |= AR_RC_AHB;
1032 REG_WRITE(ah, AR_RC, val);
1033
1034 } else if (!AR_SREV_9300_20_OR_LATER(ah))
1721 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1035 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1722 }
1723 1036
1724 rst_flags = AR_RTC_RC_MAC_WARM; 1037 rst_flags = AR_RTC_RC_MAC_WARM;
1725 if (type == ATH9K_RESET_COLD) 1038 if (type == ATH9K_RESET_COLD)
@@ -1727,6 +1040,10 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1727 } 1040 }
1728 1041
1729 REG_WRITE(ah, AR_RTC_RC, rst_flags); 1042 REG_WRITE(ah, AR_RTC_RC, rst_flags);
1043
1044 REGWRITE_BUFFER_FLUSH(ah);
1045 DISABLE_REGWRITE_BUFFER(ah);
1046
1730 udelay(50); 1047 udelay(50);
1731 1048
1732 REG_WRITE(ah, AR_RTC_RC, 0); 1049 REG_WRITE(ah, AR_RTC_RC, 0);
@@ -1747,16 +1064,23 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1747 1064
1748static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah) 1065static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1749{ 1066{
1067 ENABLE_REGWRITE_BUFFER(ah);
1068
1750 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN | 1069 REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
1751 AR_RTC_FORCE_WAKE_ON_INT); 1070 AR_RTC_FORCE_WAKE_ON_INT);
1752 1071
1753 if (!AR_SREV_9100(ah)) 1072 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1754 REG_WRITE(ah, AR_RC, AR_RC_AHB); 1073 REG_WRITE(ah, AR_RC, AR_RC_AHB);
1755 1074
1756 REG_WRITE(ah, AR_RTC_RESET, 0); 1075 REG_WRITE(ah, AR_RTC_RESET, 0);
1757 udelay(2);
1758 1076
1759 if (!AR_SREV_9100(ah)) 1077 REGWRITE_BUFFER_FLUSH(ah);
1078 DISABLE_REGWRITE_BUFFER(ah);
1079
1080 if (!AR_SREV_9300_20_OR_LATER(ah))
1081 udelay(2);
1082
1083 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
1760 REG_WRITE(ah, AR_RC, 0); 1084 REG_WRITE(ah, AR_RC, 0);
1761 1085
1762 REG_WRITE(ah, AR_RTC_RESET, 1); 1086 REG_WRITE(ah, AR_RTC_RESET, 1);
@@ -1792,34 +1116,6 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1792 } 1116 }
1793} 1117}
1794 1118
1795static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
1796{
1797 u32 phymode;
1798 u32 enableDacFifo = 0;
1799
1800 if (AR_SREV_9285_10_OR_LATER(ah))
1801 enableDacFifo = (REG_READ(ah, AR_PHY_TURBO) &
1802 AR_PHY_FC_ENABLE_DAC_FIFO);
1803
1804 phymode = AR_PHY_FC_HT_EN | AR_PHY_FC_SHORT_GI_40
1805 | AR_PHY_FC_SINGLE_HT_LTF1 | AR_PHY_FC_WALSH | enableDacFifo;
1806
1807 if (IS_CHAN_HT40(chan)) {
1808 phymode |= AR_PHY_FC_DYN2040_EN;
1809
1810 if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
1811 (chan->chanmode == CHANNEL_G_HT40PLUS))
1812 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1813
1814 }
1815 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1816
1817 ath9k_hw_set11nmac2040(ah);
1818
1819 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1820 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
1821}
1822
1823static bool ath9k_hw_chip_reset(struct ath_hw *ah, 1119static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1824 struct ath9k_channel *chan) 1120 struct ath9k_channel *chan)
1825{ 1121{
@@ -1845,7 +1141,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1845 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1141 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1846 struct ath_common *common = ath9k_hw_common(ah); 1142 struct ath_common *common = ath9k_hw_common(ah);
1847 struct ieee80211_channel *channel = chan->chan; 1143 struct ieee80211_channel *channel = chan->chan;
1848 u32 synthDelay, qnum; 1144 u32 qnum;
1849 int r; 1145 int r;
1850 1146
1851 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1147 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
@@ -1857,17 +1153,15 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1857 } 1153 }
1858 } 1154 }
1859 1155
1860 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); 1156 if (!ath9k_hw_rfbus_req(ah)) {
1861 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
1862 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
1863 ath_print(common, ATH_DBG_FATAL, 1157 ath_print(common, ATH_DBG_FATAL,
1864 "Could not kill baseband RX\n"); 1158 "Could not kill baseband RX\n");
1865 return false; 1159 return false;
1866 } 1160 }
1867 1161
1868 ath9k_hw_set_regs(ah, chan); 1162 ath9k_hw_set_channel_regs(ah, chan);
1869 1163
1870 r = ah->ath9k_hw_rf_set_freq(ah, chan); 1164 r = ath9k_hw_rf_set_freq(ah, chan);
1871 if (r) { 1165 if (r) {
1872 ath_print(common, ATH_DBG_FATAL, 1166 ath_print(common, ATH_DBG_FATAL,
1873 "Failed to set channel\n"); 1167 "Failed to set channel\n");
@@ -1881,20 +1175,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1881 min((u32) MAX_RATE_POWER, 1175 min((u32) MAX_RATE_POWER,
1882 (u32) regulatory->power_limit)); 1176 (u32) regulatory->power_limit));
1883 1177
1884 synthDelay = REG_READ(ah, AR_PHY_RX_DELAY) & AR_PHY_RX_DELAY_DELAY; 1178 ath9k_hw_rfbus_done(ah);
1885 if (IS_CHAN_B(chan))
1886 synthDelay = (4 * synthDelay) / 22;
1887 else
1888 synthDelay /= 10;
1889
1890 udelay(synthDelay + BASE_ACTIVATE_DELAY);
1891
1892 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
1893 1179
1894 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1180 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1895 ath9k_hw_set_delta_slope(ah, chan); 1181 ath9k_hw_set_delta_slope(ah, chan);
1896 1182
1897 ah->ath9k_hw_spur_mitigate_freq(ah, chan); 1183 ath9k_hw_spur_mitigate_freq(ah, chan);
1898 1184
1899 if (!chan->oneTimeCalsDone) 1185 if (!chan->oneTimeCalsDone)
1900 chan->oneTimeCalsDone = true; 1186 chan->oneTimeCalsDone = true;
@@ -1902,17 +1188,33 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1902 return true; 1188 return true;
1903} 1189}
1904 1190
1905static void ath9k_enable_rfkill(struct ath_hw *ah) 1191bool ath9k_hw_check_alive(struct ath_hw *ah)
1906{ 1192{
1907 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 1193 int count = 50;
1908 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB); 1194 u32 reg;
1195
1196 if (AR_SREV_9285_10_OR_LATER(ah))
1197 return true;
1198
1199 do {
1200 reg = REG_READ(ah, AR_OBS_BUS_1);
1909 1201
1910 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2, 1202 if ((reg & 0x7E7FFFEF) == 0x00702400)
1911 AR_GPIO_INPUT_MUX2_RFSILENT); 1203 continue;
1912 1204
1913 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio); 1205 switch (reg & 0x7E000B00) {
1914 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB); 1206 case 0x1E000000:
1207 case 0x52000B00:
1208 case 0x18000B00:
1209 continue;
1210 default:
1211 return true;
1212 }
1213 } while (count-- > 0);
1214
1215 return false;
1915} 1216}
1217EXPORT_SYMBOL(ath9k_hw_check_alive);
1916 1218
1917int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1219int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1918 bool bChannelChange) 1220 bool bChannelChange)
@@ -1923,11 +1225,18 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1923 u32 saveDefAntenna; 1225 u32 saveDefAntenna;
1924 u32 macStaId1; 1226 u32 macStaId1;
1925 u64 tsf = 0; 1227 u64 tsf = 0;
1926 int i, rx_chainmask, r; 1228 int i, r;
1927 1229
1928 ah->txchainmask = common->tx_chainmask; 1230 ah->txchainmask = common->tx_chainmask;
1929 ah->rxchainmask = common->rx_chainmask; 1231 ah->rxchainmask = common->rx_chainmask;
1930 1232
1233 if (!ah->chip_fullsleep) {
1234 ath9k_hw_abortpcurecv(ah);
1235 if (!ath9k_hw_stopdmarecv(ah))
1236 ath_print(common, ATH_DBG_XMIT,
1237 "Failed to stop receive dma\n");
1238 }
1239
1931 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1240 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
1932 return -EIO; 1241 return -EIO;
1933 1242
@@ -1940,8 +1249,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1940 (chan->channel != ah->curchan->channel) && 1249 (chan->channel != ah->curchan->channel) &&
1941 ((chan->channelFlags & CHANNEL_ALL) == 1250 ((chan->channelFlags & CHANNEL_ALL) ==
1942 (ah->curchan->channelFlags & CHANNEL_ALL)) && 1251 (ah->curchan->channelFlags & CHANNEL_ALL)) &&
1943 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || 1252 !AR_SREV_9280(ah)) {
1944 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
1945 1253
1946 if (ath9k_hw_channel_change(ah, chan)) { 1254 if (ath9k_hw_channel_change(ah, chan)) {
1947 ath9k_hw_loadnf(ah, ah->curchan); 1255 ath9k_hw_loadnf(ah, ah->curchan);
@@ -1966,6 +1274,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1966 1274
1967 ath9k_hw_mark_phy_inactive(ah); 1275 ath9k_hw_mark_phy_inactive(ah);
1968 1276
1277 /* Only required on the first reset */
1969 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1278 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1970 REG_WRITE(ah, 1279 REG_WRITE(ah,
1971 AR9271_RESET_POWER_DOWN_CONTROL, 1280 AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1287,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1978 return -EINVAL; 1287 return -EINVAL;
1979 } 1288 }
1980 1289
1290 /* Only required on the first reset */
1981 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1291 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1982 ah->htc_reset_init = false; 1292 ah->htc_reset_init = false;
1983 REG_WRITE(ah, 1293 REG_WRITE(ah,
@@ -1993,16 +1303,6 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1993 if (AR_SREV_9280_10_OR_LATER(ah)) 1303 if (AR_SREV_9280_10_OR_LATER(ah))
1994 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE); 1304 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
1995 1305
1996 if (AR_SREV_9287_12_OR_LATER(ah)) {
1997 /* Enable ASYNC FIFO */
1998 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
1999 AR_MAC_PCU_ASYNC_FIFO_REG3_DATAPATH_SEL);
2000 REG_SET_BIT(ah, AR_PHY_MODE, AR_PHY_MODE_ASYNCFIFO);
2001 REG_CLR_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
2002 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
2003 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
2004 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
2005 }
2006 r = ath9k_hw_process_ini(ah, chan); 1306 r = ath9k_hw_process_ini(ah, chan);
2007 if (r) 1307 if (r)
2008 return r; 1308 return r;
@@ -2027,9 +1327,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2027 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1327 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
2028 ath9k_hw_set_delta_slope(ah, chan); 1328 ath9k_hw_set_delta_slope(ah, chan);
2029 1329
2030 ah->ath9k_hw_spur_mitigate_freq(ah, chan); 1330 ath9k_hw_spur_mitigate_freq(ah, chan);
2031 ah->eep_ops->set_board_values(ah, chan); 1331 ah->eep_ops->set_board_values(ah, chan);
2032 1332
1333 ath9k_hw_set_operating_mode(ah, ah->opmode);
1334
1335 ENABLE_REGWRITE_BUFFER(ah);
1336
2033 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr)); 1337 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
2034 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4) 1338 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
2035 | macStaId1 1339 | macStaId1
@@ -2037,25 +1341,27 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2037 | (ah->config. 1341 | (ah->config.
2038 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0) 1342 ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
2039 | ah->sta_id1_defaults); 1343 | ah->sta_id1_defaults);
2040 ath9k_hw_set_operating_mode(ah, ah->opmode);
2041
2042 ath_hw_setbssidmask(common); 1344 ath_hw_setbssidmask(common);
2043
2044 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 1345 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
2045
2046 ath9k_hw_write_associd(ah); 1346 ath9k_hw_write_associd(ah);
2047
2048 REG_WRITE(ah, AR_ISR, ~0); 1347 REG_WRITE(ah, AR_ISR, ~0);
2049
2050 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 1348 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
2051 1349
2052 r = ah->ath9k_hw_rf_set_freq(ah, chan); 1350 REGWRITE_BUFFER_FLUSH(ah);
1351 DISABLE_REGWRITE_BUFFER(ah);
1352
1353 r = ath9k_hw_rf_set_freq(ah, chan);
2053 if (r) 1354 if (r)
2054 return r; 1355 return r;
2055 1356
1357 ENABLE_REGWRITE_BUFFER(ah);
1358
2056 for (i = 0; i < AR_NUM_DCU; i++) 1359 for (i = 0; i < AR_NUM_DCU; i++)
2057 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 1360 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
2058 1361
1362 REGWRITE_BUFFER_FLUSH(ah);
1363 DISABLE_REGWRITE_BUFFER(ah);
1364
2059 ah->intr_txqs = 0; 1365 ah->intr_txqs = 0;
2060 for (i = 0; i < ah->caps.total_queues; i++) 1366 for (i = 0; i < ah->caps.total_queues; i++)
2061 ath9k_hw_resettxqueue(ah, i); 1367 ath9k_hw_resettxqueue(ah, i);
@@ -2068,25 +1374,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2068 1374
2069 ath9k_hw_init_global_settings(ah); 1375 ath9k_hw_init_global_settings(ah);
2070 1376
2071 if (AR_SREV_9287_12_OR_LATER(ah)) { 1377 if (!AR_SREV_9300_20_OR_LATER(ah)) {
2072 REG_WRITE(ah, AR_D_GBL_IFS_SIFS, 1378 ar9002_hw_enable_async_fifo(ah);
2073 AR_D_GBL_IFS_SIFS_ASYNC_FIFO_DUR); 1379 ar9002_hw_enable_wep_aggregation(ah);
2074 REG_WRITE(ah, AR_D_GBL_IFS_SLOT,
2075 AR_D_GBL_IFS_SLOT_ASYNC_FIFO_DUR);
2076 REG_WRITE(ah, AR_D_GBL_IFS_EIFS,
2077 AR_D_GBL_IFS_EIFS_ASYNC_FIFO_DUR);
2078
2079 REG_WRITE(ah, AR_TIME_OUT, AR_TIME_OUT_ACK_CTS_ASYNC_FIFO_DUR);
2080 REG_WRITE(ah, AR_USEC, AR_USEC_ASYNC_FIFO_DUR);
2081
2082 REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
2083 AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
2084 REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
2085 AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
2086 }
2087 if (AR_SREV_9287_12_OR_LATER(ah)) {
2088 REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
2089 AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
2090 } 1380 }
2091 1381
2092 REG_WRITE(ah, AR_STA_ID1, 1382 REG_WRITE(ah, AR_STA_ID1,
@@ -2101,19 +1391,24 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2101 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000); 1391 REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
2102 } 1392 }
2103 1393
1394 if (ah->config.tx_intr_mitigation) {
1395 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
1396 REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
1397 }
1398
2104 ath9k_hw_init_bb(ah, chan); 1399 ath9k_hw_init_bb(ah, chan);
2105 1400
2106 if (!ath9k_hw_init_cal(ah, chan)) 1401 if (!ath9k_hw_init_cal(ah, chan))
2107 return -EIO; 1402 return -EIO;
2108 1403
2109 rx_chainmask = ah->rxchainmask; 1404 ENABLE_REGWRITE_BUFFER(ah);
2110 if ((rx_chainmask == 0x5) || (rx_chainmask == 0x3)) {
2111 REG_WRITE(ah, AR_PHY_RX_CHAINMASK, rx_chainmask);
2112 REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, rx_chainmask);
2113 }
2114 1405
1406 ath9k_hw_restore_chainmask(ah);
2115 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ); 1407 REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
2116 1408
1409 REGWRITE_BUFFER_FLUSH(ah);
1410 DISABLE_REGWRITE_BUFFER(ah);
1411
2117 /* 1412 /*
2118 * For big endian systems turn on swapping for descriptors 1413 * For big endian systems turn on swapping for descriptors
2119 */ 1414 */
@@ -2143,6 +1438,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2143 if (ah->btcoex_hw.enabled) 1438 if (ah->btcoex_hw.enabled)
2144 ath9k_hw_btcoex_enable(ah); 1439 ath9k_hw_btcoex_enable(ah);
2145 1440
1441 if (AR_SREV_9300_20_OR_LATER(ah)) {
1442 ath9k_hw_loadnf(ah, curchan);
1443 ath9k_hw_start_nfcal(ah);
1444 }
1445
2146 return 0; 1446 return 0;
2147} 1447}
2148EXPORT_SYMBOL(ath9k_hw_reset); 1448EXPORT_SYMBOL(ath9k_hw_reset);
@@ -2429,21 +1729,35 @@ EXPORT_SYMBOL(ath9k_hw_keyisvalid);
2429/* Power Management (Chipset) */ 1729/* Power Management (Chipset) */
2430/******************************/ 1730/******************************/
2431 1731
1732/*
1733 * Notify Power Mgt is disabled in self-generated frames.
1734 * If requested, force chip to sleep.
1735 */
2432static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip) 1736static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2433{ 1737{
2434 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 1738 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
2435 if (setChip) { 1739 if (setChip) {
1740 /*
1741 * Clear the RTC force wake bit to allow the
1742 * mac to go to sleep.
1743 */
2436 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 1744 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2437 AR_RTC_FORCE_WAKE_EN); 1745 AR_RTC_FORCE_WAKE_EN);
2438 if (!AR_SREV_9100(ah)) 1746 if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
2439 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 1747 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2440 1748
2441 if(!AR_SREV_5416(ah)) 1749 /* Shutdown chip. Active low */
1750 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
2442 REG_CLR_BIT(ah, (AR_RTC_RESET), 1751 REG_CLR_BIT(ah, (AR_RTC_RESET),
2443 AR_RTC_RESET_EN); 1752 AR_RTC_RESET_EN);
2444 } 1753 }
2445} 1754}
2446 1755
1756/*
1757 * Notify Power Management is enabled in self-generating
1758 * frames. If request, set power mode of chip to
1759 * auto/normal. Duration in units of 128us (1/8 TU).
1760 */
2447static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip) 1761static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2448{ 1762{
2449 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV); 1763 REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
@@ -2451,9 +1765,14 @@ static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
2451 struct ath9k_hw_capabilities *pCap = &ah->caps; 1765 struct ath9k_hw_capabilities *pCap = &ah->caps;
2452 1766
2453 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1767 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1768 /* Set WakeOnInterrupt bit; clear ForceWake bit */
2454 REG_WRITE(ah, AR_RTC_FORCE_WAKE, 1769 REG_WRITE(ah, AR_RTC_FORCE_WAKE,
2455 AR_RTC_FORCE_WAKE_ON_INT); 1770 AR_RTC_FORCE_WAKE_ON_INT);
2456 } else { 1771 } else {
1772 /*
1773 * Clear the RTC force wake bit to allow the
1774 * mac to go to sleep.
1775 */
2457 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, 1776 REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
2458 AR_RTC_FORCE_WAKE_EN); 1777 AR_RTC_FORCE_WAKE_EN);
2459 } 1778 }
@@ -2472,7 +1791,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2472 ATH9K_RESET_POWER_ON) != true) { 1791 ATH9K_RESET_POWER_ON) != true) {
2473 return false; 1792 return false;
2474 } 1793 }
2475 ath9k_hw_init_pll(ah, NULL); 1794 if (!AR_SREV_9300_20_OR_LATER(ah))
1795 ath9k_hw_init_pll(ah, NULL);
2476 } 1796 }
2477 if (AR_SREV_9100(ah)) 1797 if (AR_SREV_9100(ah))
2478 REG_SET_BIT(ah, AR_RTC_RESET, 1798 REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2542,424 +1862,6 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2542} 1862}
2543EXPORT_SYMBOL(ath9k_hw_setpower); 1863EXPORT_SYMBOL(ath9k_hw_setpower);
2544 1864
2545/*
2546 * Helper for ASPM support.
2547 *
2548 * Disable PLL when in L0s as well as receiver clock when in L1.
2549 * This power saving option must be enabled through the SerDes.
2550 *
2551 * Programming the SerDes must go through the same 288 bit serial shift
2552 * register as the other analog registers. Hence the 9 writes.
2553 */
2554void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
2555{
2556 u8 i;
2557 u32 val;
2558
2559 if (ah->is_pciexpress != true)
2560 return;
2561
2562 /* Do not touch SerDes registers */
2563 if (ah->config.pcie_powersave_enable == 2)
2564 return;
2565
2566 /* Nothing to do on restore for 11N */
2567 if (!restore) {
2568 if (AR_SREV_9280_20_OR_LATER(ah)) {
2569 /*
2570 * AR9280 2.0 or later chips use SerDes values from the
2571 * initvals.h initialized depending on chipset during
2572 * ath9k_hw_init()
2573 */
2574 for (i = 0; i < ah->iniPcieSerdes.ia_rows; i++) {
2575 REG_WRITE(ah, INI_RA(&ah->iniPcieSerdes, i, 0),
2576 INI_RA(&ah->iniPcieSerdes, i, 1));
2577 }
2578 } else if (AR_SREV_9280(ah) &&
2579 (ah->hw_version.macRev == AR_SREV_REVISION_9280_10)) {
2580 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fd00);
2581 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2582
2583 /* RX shut off when elecidle is asserted */
2584 REG_WRITE(ah, AR_PCIE_SERDES, 0xa8000019);
2585 REG_WRITE(ah, AR_PCIE_SERDES, 0x13160820);
2586 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980560);
2587
2588 /* Shut off CLKREQ active in L1 */
2589 if (ah->config.pcie_clock_req)
2590 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffc);
2591 else
2592 REG_WRITE(ah, AR_PCIE_SERDES, 0x401deffd);
2593
2594 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2595 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2596 REG_WRITE(ah, AR_PCIE_SERDES, 0x00043007);
2597
2598 /* Load the new settings */
2599 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2600
2601 } else {
2602 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
2603 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
2604
2605 /* RX shut off when elecidle is asserted */
2606 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000039);
2607 REG_WRITE(ah, AR_PCIE_SERDES, 0x53160824);
2608 REG_WRITE(ah, AR_PCIE_SERDES, 0xe5980579);
2609
2610 /*
2611 * Ignore ah->ah_config.pcie_clock_req setting for
2612 * pre-AR9280 11n
2613 */
2614 REG_WRITE(ah, AR_PCIE_SERDES, 0x001defff);
2615
2616 REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
2617 REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
2618 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e3007);
2619
2620 /* Load the new settings */
2621 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
2622 }
2623
2624 udelay(1000);
2625
2626 /* set bit 19 to allow forcing of pcie core into L1 state */
2627 REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
2628
2629 /* Several PCIe massages to ensure proper behaviour */
2630 if (ah->config.pcie_waen) {
2631 val = ah->config.pcie_waen;
2632 if (!power_off)
2633 val &= (~AR_WA_D3_L1_DISABLE);
2634 } else {
2635 if (AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2636 AR_SREV_9287(ah)) {
2637 val = AR9285_WA_DEFAULT;
2638 if (!power_off)
2639 val &= (~AR_WA_D3_L1_DISABLE);
2640 } else if (AR_SREV_9280(ah)) {
2641 /*
2642 * On AR9280 chips bit 22 of 0x4004 needs to be
2643 * set otherwise card may disappear.
2644 */
2645 val = AR9280_WA_DEFAULT;
2646 if (!power_off)
2647 val &= (~AR_WA_D3_L1_DISABLE);
2648 } else
2649 val = AR_WA_DEFAULT;
2650 }
2651
2652 REG_WRITE(ah, AR_WA, val);
2653 }
2654
2655 if (power_off) {
2656 /*
2657 * Set PCIe workaround bits
2658 * bit 14 in WA register (disable L1) should only
2659 * be set when device enters D3 and be cleared
2660 * when device comes back to D0.
2661 */
2662 if (ah->config.pcie_waen) {
2663 if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
2664 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2665 } else {
2666 if (((AR_SREV_9285(ah) || AR_SREV_9271(ah) ||
2667 AR_SREV_9287(ah)) &&
2668 (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
2669 (AR_SREV_9280(ah) &&
2670 (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
2671 REG_SET_BIT(ah, AR_WA, AR_WA_D3_L1_DISABLE);
2672 }
2673 }
2674 }
2675}
2676EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
2677
2678/**********************/
2679/* Interrupt Handling */
2680/**********************/
2681
2682bool ath9k_hw_intrpend(struct ath_hw *ah)
2683{
2684 u32 host_isr;
2685
2686 if (AR_SREV_9100(ah))
2687 return true;
2688
2689 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
2690 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
2691 return true;
2692
2693 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
2694 if ((host_isr & AR_INTR_SYNC_DEFAULT)
2695 && (host_isr != AR_INTR_SPURIOUS))
2696 return true;
2697
2698 return false;
2699}
2700EXPORT_SYMBOL(ath9k_hw_intrpend);
2701
2702bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
2703{
2704 u32 isr = 0;
2705 u32 mask2 = 0;
2706 struct ath9k_hw_capabilities *pCap = &ah->caps;
2707 u32 sync_cause = 0;
2708 bool fatal_int = false;
2709 struct ath_common *common = ath9k_hw_common(ah);
2710
2711 if (!AR_SREV_9100(ah)) {
2712 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
2713 if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
2714 == AR_RTC_STATUS_ON) {
2715 isr = REG_READ(ah, AR_ISR);
2716 }
2717 }
2718
2719 sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) &
2720 AR_INTR_SYNC_DEFAULT;
2721
2722 *masked = 0;
2723
2724 if (!isr && !sync_cause)
2725 return false;
2726 } else {
2727 *masked = 0;
2728 isr = REG_READ(ah, AR_ISR);
2729 }
2730
2731 if (isr) {
2732 if (isr & AR_ISR_BCNMISC) {
2733 u32 isr2;
2734 isr2 = REG_READ(ah, AR_ISR_S2);
2735 if (isr2 & AR_ISR_S2_TIM)
2736 mask2 |= ATH9K_INT_TIM;
2737 if (isr2 & AR_ISR_S2_DTIM)
2738 mask2 |= ATH9K_INT_DTIM;
2739 if (isr2 & AR_ISR_S2_DTIMSYNC)
2740 mask2 |= ATH9K_INT_DTIMSYNC;
2741 if (isr2 & (AR_ISR_S2_CABEND))
2742 mask2 |= ATH9K_INT_CABEND;
2743 if (isr2 & AR_ISR_S2_GTT)
2744 mask2 |= ATH9K_INT_GTT;
2745 if (isr2 & AR_ISR_S2_CST)
2746 mask2 |= ATH9K_INT_CST;
2747 if (isr2 & AR_ISR_S2_TSFOOR)
2748 mask2 |= ATH9K_INT_TSFOOR;
2749 }
2750
2751 isr = REG_READ(ah, AR_ISR_RAC);
2752 if (isr == 0xffffffff) {
2753 *masked = 0;
2754 return false;
2755 }
2756
2757 *masked = isr & ATH9K_INT_COMMON;
2758
2759 if (ah->config.rx_intr_mitigation) {
2760 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
2761 *masked |= ATH9K_INT_RX;
2762 }
2763
2764 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
2765 *masked |= ATH9K_INT_RX;
2766 if (isr &
2767 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
2768 AR_ISR_TXEOL)) {
2769 u32 s0_s, s1_s;
2770
2771 *masked |= ATH9K_INT_TX;
2772
2773 s0_s = REG_READ(ah, AR_ISR_S0_S);
2774 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
2775 ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
2776
2777 s1_s = REG_READ(ah, AR_ISR_S1_S);
2778 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
2779 ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
2780 }
2781
2782 if (isr & AR_ISR_RXORN) {
2783 ath_print(common, ATH_DBG_INTERRUPT,
2784 "receive FIFO overrun interrupt\n");
2785 }
2786
2787 if (!AR_SREV_9100(ah)) {
2788 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2789 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
2790 if (isr5 & AR_ISR_S5_TIM_TIMER)
2791 *masked |= ATH9K_INT_TIM_TIMER;
2792 }
2793 }
2794
2795 *masked |= mask2;
2796 }
2797
2798 if (AR_SREV_9100(ah))
2799 return true;
2800
2801 if (isr & AR_ISR_GENTMR) {
2802 u32 s5_s;
2803
2804 s5_s = REG_READ(ah, AR_ISR_S5_S);
2805 if (isr & AR_ISR_GENTMR) {
2806 ah->intr_gen_timer_trigger =
2807 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
2808
2809 ah->intr_gen_timer_thresh =
2810 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
2811
2812 if (ah->intr_gen_timer_trigger)
2813 *masked |= ATH9K_INT_GENTIMER;
2814
2815 }
2816 }
2817
2818 if (sync_cause) {
2819 fatal_int =
2820 (sync_cause &
2821 (AR_INTR_SYNC_HOST1_FATAL | AR_INTR_SYNC_HOST1_PERR))
2822 ? true : false;
2823
2824 if (fatal_int) {
2825 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
2826 ath_print(common, ATH_DBG_ANY,
2827 "received PCI FATAL interrupt\n");
2828 }
2829 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
2830 ath_print(common, ATH_DBG_ANY,
2831 "received PCI PERR interrupt\n");
2832 }
2833 *masked |= ATH9K_INT_FATAL;
2834 }
2835 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
2836 ath_print(common, ATH_DBG_INTERRUPT,
2837 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
2838 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
2839 REG_WRITE(ah, AR_RC, 0);
2840 *masked |= ATH9K_INT_FATAL;
2841 }
2842 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
2843 ath_print(common, ATH_DBG_INTERRUPT,
2844 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
2845 }
2846
2847 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
2848 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
2849 }
2850
2851 return true;
2852}
2853EXPORT_SYMBOL(ath9k_hw_getisr);
2854
2855enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2856{
2857 u32 omask = ah->mask_reg;
2858 u32 mask, mask2;
2859 struct ath9k_hw_capabilities *pCap = &ah->caps;
2860 struct ath_common *common = ath9k_hw_common(ah);
2861
2862 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
2863
2864 if (omask & ATH9K_INT_GLOBAL) {
2865 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
2866 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
2867 (void) REG_READ(ah, AR_IER);
2868 if (!AR_SREV_9100(ah)) {
2869 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
2870 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
2871
2872 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
2873 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
2874 }
2875 }
2876
2877 mask = ints & ATH9K_INT_COMMON;
2878 mask2 = 0;
2879
2880 if (ints & ATH9K_INT_TX) {
2881 if (ah->txok_interrupt_mask)
2882 mask |= AR_IMR_TXOK;
2883 if (ah->txdesc_interrupt_mask)
2884 mask |= AR_IMR_TXDESC;
2885 if (ah->txerr_interrupt_mask)
2886 mask |= AR_IMR_TXERR;
2887 if (ah->txeol_interrupt_mask)
2888 mask |= AR_IMR_TXEOL;
2889 }
2890 if (ints & ATH9K_INT_RX) {
2891 mask |= AR_IMR_RXERR;
2892 if (ah->config.rx_intr_mitigation)
2893 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
2894 else
2895 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
2896 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
2897 mask |= AR_IMR_GENTMR;
2898 }
2899
2900 if (ints & (ATH9K_INT_BMISC)) {
2901 mask |= AR_IMR_BCNMISC;
2902 if (ints & ATH9K_INT_TIM)
2903 mask2 |= AR_IMR_S2_TIM;
2904 if (ints & ATH9K_INT_DTIM)
2905 mask2 |= AR_IMR_S2_DTIM;
2906 if (ints & ATH9K_INT_DTIMSYNC)
2907 mask2 |= AR_IMR_S2_DTIMSYNC;
2908 if (ints & ATH9K_INT_CABEND)
2909 mask2 |= AR_IMR_S2_CABEND;
2910 if (ints & ATH9K_INT_TSFOOR)
2911 mask2 |= AR_IMR_S2_TSFOOR;
2912 }
2913
2914 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
2915 mask |= AR_IMR_BCNMISC;
2916 if (ints & ATH9K_INT_GTT)
2917 mask2 |= AR_IMR_S2_GTT;
2918 if (ints & ATH9K_INT_CST)
2919 mask2 |= AR_IMR_S2_CST;
2920 }
2921
2922 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
2923 REG_WRITE(ah, AR_IMR, mask);
2924 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
2925 AR_IMR_S2_DTIM |
2926 AR_IMR_S2_DTIMSYNC |
2927 AR_IMR_S2_CABEND |
2928 AR_IMR_S2_CABTO |
2929 AR_IMR_S2_TSFOOR |
2930 AR_IMR_S2_GTT | AR_IMR_S2_CST);
2931 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
2932 ah->mask_reg = ints;
2933
2934 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2935 if (ints & ATH9K_INT_TIM_TIMER)
2936 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2937 else
2938 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
2939 }
2940
2941 if (ints & ATH9K_INT_GLOBAL) {
2942 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
2943 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
2944 if (!AR_SREV_9100(ah)) {
2945 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
2946 AR_INTR_MAC_IRQ);
2947 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
2948
2949
2950 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
2951 AR_INTR_SYNC_DEFAULT);
2952 REG_WRITE(ah, AR_INTR_SYNC_MASK,
2953 AR_INTR_SYNC_DEFAULT);
2954 }
2955 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
2956 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
2957 }
2958
2959 return omask;
2960}
2961EXPORT_SYMBOL(ath9k_hw_set_interrupts);
2962
2963/*******************/ 1865/*******************/
2964/* Beacon Handling */ 1866/* Beacon Handling */
2965/*******************/ 1867/*******************/
@@ -2970,6 +1872,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
2970 1872
2971 ah->beacon_interval = beacon_period; 1873 ah->beacon_interval = beacon_period;
2972 1874
1875 ENABLE_REGWRITE_BUFFER(ah);
1876
2973 switch (ah->opmode) { 1877 switch (ah->opmode) {
2974 case NL80211_IFTYPE_STATION: 1878 case NL80211_IFTYPE_STATION:
2975 case NL80211_IFTYPE_MONITOR: 1879 case NL80211_IFTYPE_MONITOR:
@@ -3013,6 +1917,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3013 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period)); 1917 REG_WRITE(ah, AR_SWBA_PERIOD, TU_TO_USEC(beacon_period));
3014 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period)); 1918 REG_WRITE(ah, AR_NDP_PERIOD, TU_TO_USEC(beacon_period));
3015 1919
1920 REGWRITE_BUFFER_FLUSH(ah);
1921 DISABLE_REGWRITE_BUFFER(ah);
1922
3016 beacon_period &= ~ATH9K_BEACON_ENA; 1923 beacon_period &= ~ATH9K_BEACON_ENA;
3017 if (beacon_period & ATH9K_BEACON_RESET_TSF) { 1924 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
3018 ath9k_hw_reset_tsf(ah); 1925 ath9k_hw_reset_tsf(ah);
@@ -3029,6 +1936,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3029 struct ath9k_hw_capabilities *pCap = &ah->caps; 1936 struct ath9k_hw_capabilities *pCap = &ah->caps;
3030 struct ath_common *common = ath9k_hw_common(ah); 1937 struct ath_common *common = ath9k_hw_common(ah);
3031 1938
1939 ENABLE_REGWRITE_BUFFER(ah);
1940
3032 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 1941 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
3033 1942
3034 REG_WRITE(ah, AR_BEACON_PERIOD, 1943 REG_WRITE(ah, AR_BEACON_PERIOD,
@@ -3036,6 +1945,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3036 REG_WRITE(ah, AR_DMA_BEACON_PERIOD, 1945 REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
3037 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD)); 1946 TU_TO_USEC(bs->bs_intval & ATH9K_BEACON_PERIOD));
3038 1947
1948 REGWRITE_BUFFER_FLUSH(ah);
1949 DISABLE_REGWRITE_BUFFER(ah);
1950
3039 REG_RMW_FIELD(ah, AR_RSSI_THR, 1951 REG_RMW_FIELD(ah, AR_RSSI_THR,
3040 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold); 1952 AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
3041 1953
@@ -3058,6 +1970,8 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3058 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); 1970 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
3059 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); 1971 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
3060 1972
1973 ENABLE_REGWRITE_BUFFER(ah);
1974
3061 REG_WRITE(ah, AR_NEXT_DTIM, 1975 REG_WRITE(ah, AR_NEXT_DTIM,
3062 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 1976 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
3063 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP)); 1977 REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
@@ -3077,6 +1991,9 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3077 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval)); 1991 REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
3078 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod)); 1992 REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
3079 1993
1994 REGWRITE_BUFFER_FLUSH(ah);
1995 DISABLE_REGWRITE_BUFFER(ah);
1996
3080 REG_SET_BIT(ah, AR_TIMER_MODE, 1997 REG_SET_BIT(ah, AR_TIMER_MODE,
3081 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN | 1998 AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
3082 AR_DTIM_TIMER_EN); 1999 AR_DTIM_TIMER_EN);
@@ -3219,7 +2136,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3219 else 2136 else
3220 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; 2137 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3221 2138
3222 if (AR_SREV_9285_10_OR_LATER(ah)) 2139 if (AR_SREV_9271(ah))
2140 pCap->num_gpio_pins = AR9271_NUM_GPIO;
2141 else if (AR_SREV_9285_10_OR_LATER(ah))
3223 pCap->num_gpio_pins = AR9285_NUM_GPIO; 2142 pCap->num_gpio_pins = AR9285_NUM_GPIO;
3224 else if (AR_SREV_9280_10_OR_LATER(ah)) 2143 else if (AR_SREV_9280_10_OR_LATER(ah))
3225 pCap->num_gpio_pins = AR928X_NUM_GPIO; 2144 pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +2165,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3246 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 2165 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3247 } 2166 }
3248#endif 2167#endif
3249 2168 if (AR_SREV_9271(ah))
3250 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; 2169 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
2170 else
2171 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3251 2172
3252 if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) 2173 if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
3253 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; 2174 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3291,6 +2212,26 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3291 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE; 2212 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
3292 } 2213 }
3293 2214
2215 if (AR_SREV_9300_20_OR_LATER(ah)) {
2216 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC |
2217 ATH9K_HW_CAP_FASTCLOCK;
2218 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
2219 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
2220 pCap->rx_status_len = sizeof(struct ar9003_rxs);
2221 pCap->tx_desc_len = sizeof(struct ar9003_txc);
2222 pCap->txs_len = sizeof(struct ar9003_txs);
2223 } else {
2224 pCap->tx_desc_len = sizeof(struct ath_desc);
2225 if (AR_SREV_9280_20(ah) &&
2226 ((ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV) <=
2227 AR5416_EEP_MINOR_VER_16) ||
2228 ah->eep_ops->get_eeprom(ah, EEP_FSTCLK_5G)))
2229 pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
2230 }
2231
2232 if (AR_SREV_9300_20_OR_LATER(ah))
2233 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
2234
3294 return 0; 2235 return 0;
3295} 2236}
3296 2237
@@ -3323,10 +2264,6 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3323 case ATH9K_CAP_TKIP_SPLIT: 2264 case ATH9K_CAP_TKIP_SPLIT:
3324 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ? 2265 return (ah->misc_mode & AR_PCU_MIC_NEW_LOC_ENA) ?
3325 false : true; 2266 false : true;
3326 case ATH9K_CAP_DIVERSITY:
3327 return (REG_READ(ah, AR_PHY_CCK_DETECT) &
3328 AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV) ?
3329 true : false;
3330 case ATH9K_CAP_MCAST_KEYSRCH: 2267 case ATH9K_CAP_MCAST_KEYSRCH:
3331 switch (capability) { 2268 switch (capability) {
3332 case 0: 2269 case 0:
@@ -3369,8 +2306,6 @@ EXPORT_SYMBOL(ath9k_hw_getcapability);
3369bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 2306bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3370 u32 capability, u32 setting, int *status) 2307 u32 capability, u32 setting, int *status)
3371{ 2308{
3372 u32 v;
3373
3374 switch (type) { 2309 switch (type) {
3375 case ATH9K_CAP_TKIP_MIC: 2310 case ATH9K_CAP_TKIP_MIC:
3376 if (setting) 2311 if (setting)
@@ -3380,14 +2315,6 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3380 ah->sta_id1_defaults &= 2315 ah->sta_id1_defaults &=
3381 ~AR_STA_ID1_CRPT_MIC_ENABLE; 2316 ~AR_STA_ID1_CRPT_MIC_ENABLE;
3382 return true; 2317 return true;
3383 case ATH9K_CAP_DIVERSITY:
3384 v = REG_READ(ah, AR_PHY_CCK_DETECT);
3385 if (setting)
3386 v |= AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
3387 else
3388 v &= ~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV;
3389 REG_WRITE(ah, AR_PHY_CCK_DETECT, v);
3390 return true;
3391 case ATH9K_CAP_MCAST_KEYSRCH: 2318 case ATH9K_CAP_MCAST_KEYSRCH:
3392 if (setting) 2319 if (setting)
3393 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH; 2320 ah->sta_id1_defaults |= AR_STA_ID1_MCAST_KSRCH;
@@ -3455,7 +2382,11 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3455 if (gpio >= ah->caps.num_gpio_pins) 2382 if (gpio >= ah->caps.num_gpio_pins)
3456 return 0xffffffff; 2383 return 0xffffffff;
3457 2384
3458 if (AR_SREV_9287_10_OR_LATER(ah)) 2385 if (AR_SREV_9300_20_OR_LATER(ah))
2386 return MS_REG_READ(AR9300, gpio) != 0;
2387 else if (AR_SREV_9271(ah))
2388 return MS_REG_READ(AR9271, gpio) != 0;
2389 else if (AR_SREV_9287_10_OR_LATER(ah))
3459 return MS_REG_READ(AR9287, gpio) != 0; 2390 return MS_REG_READ(AR9287, gpio) != 0;
3460 else if (AR_SREV_9285_10_OR_LATER(ah)) 2391 else if (AR_SREV_9285_10_OR_LATER(ah))
3461 return MS_REG_READ(AR9285, gpio) != 0; 2392 return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +2415,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
3484 2415
3485void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 2416void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3486{ 2417{
2418 if (AR_SREV_9271(ah))
2419 val = ~val;
2420
3487 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 2421 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3488 AR_GPIO_BIT(gpio)); 2422 AR_GPIO_BIT(gpio));
3489} 2423}
@@ -3523,6 +2457,8 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3523{ 2457{
3524 u32 phybits; 2458 u32 phybits;
3525 2459
2460 ENABLE_REGWRITE_BUFFER(ah);
2461
3526 REG_WRITE(ah, AR_RX_FILTER, bits); 2462 REG_WRITE(ah, AR_RX_FILTER, bits);
3527 2463
3528 phybits = 0; 2464 phybits = 0;
@@ -3538,6 +2474,9 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
3538 else 2474 else
3539 REG_WRITE(ah, AR_RXCFG, 2475 REG_WRITE(ah, AR_RXCFG,
3540 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 2476 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
2477
2478 REGWRITE_BUFFER_FLUSH(ah);
2479 DISABLE_REGWRITE_BUFFER(ah);
3541} 2480}
3542EXPORT_SYMBOL(ath9k_hw_setrxfilter); 2481EXPORT_SYMBOL(ath9k_hw_setrxfilter);
3543 2482
@@ -3610,14 +2549,25 @@ void ath9k_hw_write_associd(struct ath_hw *ah)
3610} 2549}
3611EXPORT_SYMBOL(ath9k_hw_write_associd); 2550EXPORT_SYMBOL(ath9k_hw_write_associd);
3612 2551
2552#define ATH9K_MAX_TSF_READ 10
2553
3613u64 ath9k_hw_gettsf64(struct ath_hw *ah) 2554u64 ath9k_hw_gettsf64(struct ath_hw *ah)
3614{ 2555{
3615 u64 tsf; 2556 u32 tsf_lower, tsf_upper1, tsf_upper2;
2557 int i;
2558
2559 tsf_upper1 = REG_READ(ah, AR_TSF_U32);
2560 for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
2561 tsf_lower = REG_READ(ah, AR_TSF_L32);
2562 tsf_upper2 = REG_READ(ah, AR_TSF_U32);
2563 if (tsf_upper2 == tsf_upper1)
2564 break;
2565 tsf_upper1 = tsf_upper2;
2566 }
3616 2567
3617 tsf = REG_READ(ah, AR_TSF_U32); 2568 WARN_ON( i == ATH9K_MAX_TSF_READ );
3618 tsf = (tsf << 32) | REG_READ(ah, AR_TSF_L32);
3619 2569
3620 return tsf; 2570 return (((u64)tsf_upper1 << 32) | tsf_lower);
3621} 2571}
3622EXPORT_SYMBOL(ath9k_hw_gettsf64); 2572EXPORT_SYMBOL(ath9k_hw_gettsf64);
3623 2573
@@ -3868,6 +2818,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
3868} 2818}
3869EXPORT_SYMBOL(ath_gen_timer_isr); 2819EXPORT_SYMBOL(ath_gen_timer_isr);
3870 2820
2821/********/
2822/* HTC */
2823/********/
2824
2825void ath9k_hw_htc_resetinit(struct ath_hw *ah)
2826{
2827 ah->htc_reset_init = true;
2828}
2829EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
2830
3871static struct { 2831static struct {
3872 u32 version; 2832 u32 version;
3873 const char * name; 2833 const char * name;
@@ -3882,6 +2842,7 @@ static struct {
3882 { AR_SREV_VERSION_9285, "9285" }, 2842 { AR_SREV_VERSION_9285, "9285" },
3883 { AR_SREV_VERSION_9287, "9287" }, 2843 { AR_SREV_VERSION_9287, "9287" },
3884 { AR_SREV_VERSION_9271, "9271" }, 2844 { AR_SREV_VERSION_9271, "9271" },
2845 { AR_SREV_VERSION_9300, "9300" },
3885}; 2846};
3886 2847
3887/* For devices with external radios */ 2848/* For devices with external radios */
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca5f97d..77245dff5993 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc. 2 * Copyright (c) 2008-2010 Atheros Communications Inc.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 5 * purpose with or without fee is hereby granted, provided that the above
@@ -41,18 +41,16 @@
41#define AR9280_DEVID_PCIE 0x002a 41#define AR9280_DEVID_PCIE 0x002a
42#define AR9285_DEVID_PCIE 0x002b 42#define AR9285_DEVID_PCIE 0x002b
43#define AR2427_DEVID_PCIE 0x002c 43#define AR2427_DEVID_PCIE 0x002c
44#define AR9287_DEVID_PCI 0x002d
45#define AR9287_DEVID_PCIE 0x002e
46#define AR9300_DEVID_PCIE 0x0030
44 47
45#define AR5416_AR9100_DEVID 0x000b 48#define AR5416_AR9100_DEVID 0x000b
46 49
47#define AR9271_USB 0x9271
48
49#define AR_SUBVENDOR_ID_NOG 0x0e11 50#define AR_SUBVENDOR_ID_NOG 0x0e11
50#define AR_SUBVENDOR_ID_NEW_A 0x7065 51#define AR_SUBVENDOR_ID_NEW_A 0x7065
51#define AR5416_MAGIC 0x19641014 52#define AR5416_MAGIC 0x19641014
52 53
53#define AR5416_DEVID_AR9287_PCI 0x002D
54#define AR5416_DEVID_AR9287_PCIE 0x002E
55
56#define AR9280_COEX2WIRE_SUBSYSID 0x309b 54#define AR9280_COEX2WIRE_SUBSYSID 0x309b
57#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 55#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
58#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 56#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
@@ -70,6 +68,24 @@
70#define REG_READ(_ah, _reg) \ 68#define REG_READ(_ah, _reg) \
71 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 69 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
72 70
71#define ENABLE_REGWRITE_BUFFER(_ah) \
72 do { \
73 if (AR_SREV_9271(_ah)) \
74 ath9k_hw_common(_ah)->ops->enable_write_buffer((_ah)); \
75 } while (0)
76
77#define DISABLE_REGWRITE_BUFFER(_ah) \
78 do { \
79 if (AR_SREV_9271(_ah)) \
80 ath9k_hw_common(_ah)->ops->disable_write_buffer((_ah)); \
81 } while (0)
82
83#define REGWRITE_BUFFER_FLUSH(_ah) \
84 do { \
85 if (AR_SREV_9271(_ah)) \
86 ath9k_hw_common(_ah)->ops->write_flush((_ah)); \
87 } while (0)
88
73#define SM(_v, _f) (((_v) << _f##_S) & _f) 89#define SM(_v, _f) (((_v) << _f##_S) & _f)
74#define MS(_v, _f) (((_v) & _f) >> _f##_S) 90#define MS(_v, _f) (((_v) & _f) >> _f##_S)
75#define REG_RMW(_a, _r, _set, _clr) \ 91#define REG_RMW(_a, _r, _set, _clr) \
@@ -77,6 +93,8 @@
77#define REG_RMW_FIELD(_a, _r, _f, _v) \ 93#define REG_RMW_FIELD(_a, _r, _f, _v) \
78 REG_WRITE(_a, _r, \ 94 REG_WRITE(_a, _r, \
79 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f)) 95 (REG_READ(_a, _r) & ~_f) | (((_v) << _f##_S) & _f))
96#define REG_READ_FIELD(_a, _r, _f) \
97 (((REG_READ(_a, _r) & _f) >> _f##_S))
80#define REG_SET_BIT(_a, _r, _f) \ 98#define REG_SET_BIT(_a, _r, _f) \
81 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f) 99 REG_WRITE(_a, _r, REG_READ(_a, _r) | _f)
82#define REG_CLR_BIT(_a, _r, _f) \ 100#define REG_CLR_BIT(_a, _r, _f) \
@@ -137,6 +155,16 @@
137 155
138#define TU_TO_USEC(_tu) ((_tu) << 10) 156#define TU_TO_USEC(_tu) ((_tu) << 10)
139 157
158#define ATH9K_HW_RX_HP_QDEPTH 16
159#define ATH9K_HW_RX_LP_QDEPTH 128
160
161enum ath_ini_subsys {
162 ATH_INI_PRE = 0,
163 ATH_INI_CORE,
164 ATH_INI_POST,
165 ATH_INI_NUM_SPLIT,
166};
167
140enum wireless_mode { 168enum wireless_mode {
141 ATH9K_MODE_11A = 0, 169 ATH9K_MODE_11A = 0,
142 ATH9K_MODE_11G, 170 ATH9K_MODE_11G,
@@ -167,13 +195,16 @@ enum ath9k_hw_caps {
167 ATH9K_HW_CAP_ENHANCEDPM = BIT(14), 195 ATH9K_HW_CAP_ENHANCEDPM = BIT(14),
168 ATH9K_HW_CAP_AUTOSLEEP = BIT(15), 196 ATH9K_HW_CAP_AUTOSLEEP = BIT(15),
169 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16), 197 ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(16),
198 ATH9K_HW_CAP_EDMA = BIT(17),
199 ATH9K_HW_CAP_RAC_SUPPORTED = BIT(18),
200 ATH9K_HW_CAP_LDPC = BIT(19),
201 ATH9K_HW_CAP_FASTCLOCK = BIT(20),
170}; 202};
171 203
172enum ath9k_capability_type { 204enum ath9k_capability_type {
173 ATH9K_CAP_CIPHER = 0, 205 ATH9K_CAP_CIPHER = 0,
174 ATH9K_CAP_TKIP_MIC, 206 ATH9K_CAP_TKIP_MIC,
175 ATH9K_CAP_TKIP_SPLIT, 207 ATH9K_CAP_TKIP_SPLIT,
176 ATH9K_CAP_DIVERSITY,
177 ATH9K_CAP_TXPOW, 208 ATH9K_CAP_TXPOW,
178 ATH9K_CAP_MCAST_KEYSRCH, 209 ATH9K_CAP_MCAST_KEYSRCH,
179 ATH9K_CAP_DS 210 ATH9K_CAP_DS
@@ -194,6 +225,11 @@ struct ath9k_hw_capabilities {
194 u8 num_gpio_pins; 225 u8 num_gpio_pins;
195 u8 num_antcfg_2ghz; 226 u8 num_antcfg_2ghz;
196 u8 num_antcfg_5ghz; 227 u8 num_antcfg_5ghz;
228 u8 rx_hp_qdepth;
229 u8 rx_lp_qdepth;
230 u8 rx_status_len;
231 u8 tx_desc_len;
232 u8 txs_len;
197}; 233};
198 234
199struct ath9k_ops_config { 235struct ath9k_ops_config {
@@ -214,6 +250,7 @@ struct ath9k_ops_config {
214 u32 enable_ani; 250 u32 enable_ani;
215 int serialize_regmode; 251 int serialize_regmode;
216 bool rx_intr_mitigation; 252 bool rx_intr_mitigation;
253 bool tx_intr_mitigation;
217#define SPUR_DISABLE 0 254#define SPUR_DISABLE 0
218#define SPUR_ENABLE_IOCTL 1 255#define SPUR_ENABLE_IOCTL 1
219#define SPUR_ENABLE_EEPROM 2 256#define SPUR_ENABLE_EEPROM 2
@@ -225,6 +262,7 @@ struct ath9k_ops_config {
225#define AR_BASE_FREQ_5GHZ 4900 262#define AR_BASE_FREQ_5GHZ 4900
226#define AR_SPUR_FEEQ_BOUND_HT40 19 263#define AR_SPUR_FEEQ_BOUND_HT40 19
227#define AR_SPUR_FEEQ_BOUND_HT20 10 264#define AR_SPUR_FEEQ_BOUND_HT20 10
265 bool tx_iq_calibration; /* Only available for >= AR9003 */
228 int spurmode; 266 int spurmode;
229 u16 spurchans[AR_EEPROM_MODAL_SPURS][2]; 267 u16 spurchans[AR_EEPROM_MODAL_SPURS][2];
230 u8 max_txtrig_level; 268 u8 max_txtrig_level;
@@ -233,6 +271,8 @@ struct ath9k_ops_config {
233enum ath9k_int { 271enum ath9k_int {
234 ATH9K_INT_RX = 0x00000001, 272 ATH9K_INT_RX = 0x00000001,
235 ATH9K_INT_RXDESC = 0x00000002, 273 ATH9K_INT_RXDESC = 0x00000002,
274 ATH9K_INT_RXHP = 0x00000001,
275 ATH9K_INT_RXLP = 0x00000002,
236 ATH9K_INT_RXNOFRM = 0x00000008, 276 ATH9K_INT_RXNOFRM = 0x00000008,
237 ATH9K_INT_RXEOL = 0x00000010, 277 ATH9K_INT_RXEOL = 0x00000010,
238 ATH9K_INT_RXORN = 0x00000020, 278 ATH9K_INT_RXORN = 0x00000020,
@@ -329,10 +369,9 @@ struct ath9k_channel {
329#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0) 369#define IS_CHAN_2GHZ(_c) (((_c)->channelFlags & CHANNEL_2GHZ) != 0)
330#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0) 370#define IS_CHAN_HALF_RATE(_c) (((_c)->channelFlags & CHANNEL_HALF) != 0)
331#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0) 371#define IS_CHAN_QUARTER_RATE(_c) (((_c)->channelFlags & CHANNEL_QUARTER) != 0)
332#define IS_CHAN_A_5MHZ_SPACED(_c) \ 372#define IS_CHAN_A_FAST_CLOCK(_ah, _c) \
333 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \ 373 ((((_c)->channelFlags & CHANNEL_5GHZ) != 0) && \
334 (((_c)->channel % 20) != 0) && \ 374 ((_ah)->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK))
335 (((_c)->channel % 10) != 0))
336 375
337/* These macros check chanmode and not channelFlags */ 376/* These macros check chanmode and not channelFlags */
338#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B) 377#define IS_CHAN_B(_c) ((_c)->chanmode == CHANNEL_B)
@@ -365,6 +404,12 @@ enum ser_reg_mode {
365 SER_REG_MODE_AUTO = 2, 404 SER_REG_MODE_AUTO = 2,
366}; 405};
367 406
407enum ath9k_rx_qtype {
408 ATH9K_RX_QUEUE_HP,
409 ATH9K_RX_QUEUE_LP,
410 ATH9K_RX_QUEUE_MAX,
411};
412
368struct ath9k_beacon_state { 413struct ath9k_beacon_state {
369 u32 bs_nexttbtt; 414 u32 bs_nexttbtt;
370 u32 bs_nextdtim; 415 u32 bs_nextdtim;
@@ -442,6 +487,124 @@ struct ath_gen_timer_table {
442 } timer_mask; 487 } timer_mask;
443}; 488};
444 489
490/**
491 * struct ath_hw_private_ops - callbacks used internally by hardware code
492 *
493 * This structure contains private callbacks designed to only be used internally
494 * by the hardware core.
495 *
496 * @init_cal_settings: setup types of calibrations supported
497 * @init_cal: starts actual calibration
498 *
499 * @init_mode_regs: Initializes mode registers
500 * @init_mode_gain_regs: Initialize TX/RX gain registers
501 * @macversion_supported: If this specific mac revision is supported
502 *
503 * @rf_set_freq: change frequency
504 * @spur_mitigate_freq: spur mitigation
505 * @rf_alloc_ext_banks:
506 * @rf_free_ext_banks:
507 * @set_rf_regs:
508 * @compute_pll_control: compute the PLL control value to use for
509 * AR_RTC_PLL_CONTROL for a given channel
510 * @setup_calibration: set up calibration
511 * @iscal_supported: used to query if a type of calibration is supported
512 * @loadnf: load noise floor read from each chain on the CCA registers
513 */
514struct ath_hw_private_ops {
515 /* Calibration ops */
516 void (*init_cal_settings)(struct ath_hw *ah);
517 bool (*init_cal)(struct ath_hw *ah, struct ath9k_channel *chan);
518
519 void (*init_mode_regs)(struct ath_hw *ah);
520 void (*init_mode_gain_regs)(struct ath_hw *ah);
521 bool (*macversion_supported)(u32 macversion);
522 void (*setup_calibration)(struct ath_hw *ah,
523 struct ath9k_cal_list *currCal);
524 bool (*iscal_supported)(struct ath_hw *ah,
525 enum ath9k_cal_types calType);
526
527 /* PHY ops */
528 int (*rf_set_freq)(struct ath_hw *ah,
529 struct ath9k_channel *chan);
530 void (*spur_mitigate_freq)(struct ath_hw *ah,
531 struct ath9k_channel *chan);
532 int (*rf_alloc_ext_banks)(struct ath_hw *ah);
533 void (*rf_free_ext_banks)(struct ath_hw *ah);
534 bool (*set_rf_regs)(struct ath_hw *ah,
535 struct ath9k_channel *chan,
536 u16 modesIndex);
537 void (*set_channel_regs)(struct ath_hw *ah, struct ath9k_channel *chan);
538 void (*init_bb)(struct ath_hw *ah,
539 struct ath9k_channel *chan);
540 int (*process_ini)(struct ath_hw *ah, struct ath9k_channel *chan);
541 void (*olc_init)(struct ath_hw *ah);
542 void (*set_rfmode)(struct ath_hw *ah, struct ath9k_channel *chan);
543 void (*mark_phy_inactive)(struct ath_hw *ah);
544 void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
545 bool (*rfbus_req)(struct ath_hw *ah);
546 void (*rfbus_done)(struct ath_hw *ah);
547 void (*enable_rfkill)(struct ath_hw *ah);
548 void (*restore_chainmask)(struct ath_hw *ah);
549 void (*set_diversity)(struct ath_hw *ah, bool value);
550 u32 (*compute_pll_control)(struct ath_hw *ah,
551 struct ath9k_channel *chan);
552 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
553 int param);
554 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
555 void (*loadnf)(struct ath_hw *ah, struct ath9k_channel *chan);
556};
557
558/**
559 * struct ath_hw_ops - callbacks used by hardware code and driver code
560 *
561 * This structure contains callbacks designed to to be used internally by
562 * hardware code and also by the lower level driver.
563 *
564 * @config_pci_powersave:
565 * @calibrate: periodic calibration for NF, ANI, IQ, ADC gain, ADC-DC
566 */
567struct ath_hw_ops {
568 void (*config_pci_powersave)(struct ath_hw *ah,
569 int restore,
570 int power_off);
571 void (*rx_enable)(struct ath_hw *ah);
572 void (*set_desc_link)(void *ds, u32 link);
573 void (*get_desc_link)(void *ds, u32 **link);
574 bool (*calibrate)(struct ath_hw *ah,
575 struct ath9k_channel *chan,
576 u8 rxchainmask,
577 bool longcal);
578 bool (*get_isr)(struct ath_hw *ah, enum ath9k_int *masked);
579 void (*fill_txdesc)(struct ath_hw *ah, void *ds, u32 seglen,
580 bool is_firstseg, bool is_is_lastseg,
581 const void *ds0, dma_addr_t buf_addr,
582 unsigned int qcu);
583 int (*proc_txdesc)(struct ath_hw *ah, void *ds,
584 struct ath_tx_status *ts);
585 void (*set11n_txdesc)(struct ath_hw *ah, void *ds,
586 u32 pktLen, enum ath9k_pkt_type type,
587 u32 txPower, u32 keyIx,
588 enum ath9k_key_type keyType,
589 u32 flags);
590 void (*set11n_ratescenario)(struct ath_hw *ah, void *ds,
591 void *lastds,
592 u32 durUpdateEn, u32 rtsctsRate,
593 u32 rtsctsDuration,
594 struct ath9k_11n_rate_series series[],
595 u32 nseries, u32 flags);
596 void (*set11n_aggr_first)(struct ath_hw *ah, void *ds,
597 u32 aggrLen);
598 void (*set11n_aggr_middle)(struct ath_hw *ah, void *ds,
599 u32 numDelims);
600 void (*set11n_aggr_last)(struct ath_hw *ah, void *ds);
601 void (*clr11n_aggr)(struct ath_hw *ah, void *ds);
602 void (*set11n_burstduration)(struct ath_hw *ah, void *ds,
603 u32 burstDuration);
604 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
605 u32 vmf);
606};
607
445struct ath_hw { 608struct ath_hw {
446 struct ieee80211_hw *hw; 609 struct ieee80211_hw *hw;
447 struct ath_common common; 610 struct ath_common common;
@@ -455,13 +618,18 @@ struct ath_hw {
455 struct ar5416_eeprom_def def; 618 struct ar5416_eeprom_def def;
456 struct ar5416_eeprom_4k map4k; 619 struct ar5416_eeprom_4k map4k;
457 struct ar9287_eeprom map9287; 620 struct ar9287_eeprom map9287;
621 struct ar9300_eeprom ar9300_eep;
458 } eeprom; 622 } eeprom;
459 const struct eeprom_ops *eep_ops; 623 const struct eeprom_ops *eep_ops;
460 enum ath9k_eep_map eep_map;
461 624
462 bool sw_mgmt_crypto; 625 bool sw_mgmt_crypto;
463 bool is_pciexpress; 626 bool is_pciexpress;
627 bool need_an_top2_fixup;
464 u16 tx_trig_level; 628 u16 tx_trig_level;
629 s16 nf_2g_max;
630 s16 nf_2g_min;
631 s16 nf_5g_max;
632 s16 nf_5g_min;
465 u16 rfsilent; 633 u16 rfsilent;
466 u32 rfkill_gpio; 634 u32 rfkill_gpio;
467 u32 rfkill_polarity; 635 u32 rfkill_polarity;
@@ -478,7 +646,8 @@ struct ath_hw {
478 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; 646 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
479 647
480 int16_t curchan_rad_index; 648 int16_t curchan_rad_index;
481 u32 mask_reg; 649 enum ath9k_int imask;
650 u32 imrs2_reg;
482 u32 txok_interrupt_mask; 651 u32 txok_interrupt_mask;
483 u32 txerr_interrupt_mask; 652 u32 txerr_interrupt_mask;
484 u32 txdesc_interrupt_mask; 653 u32 txdesc_interrupt_mask;
@@ -493,6 +662,7 @@ struct ath_hw {
493 struct ath9k_cal_list adcgain_caldata; 662 struct ath9k_cal_list adcgain_caldata;
494 struct ath9k_cal_list adcdc_calinitdata; 663 struct ath9k_cal_list adcdc_calinitdata;
495 struct ath9k_cal_list adcdc_caldata; 664 struct ath9k_cal_list adcdc_caldata;
665 struct ath9k_cal_list tempCompCalData;
496 struct ath9k_cal_list *cal_list; 666 struct ath9k_cal_list *cal_list;
497 struct ath9k_cal_list *cal_list_last; 667 struct ath9k_cal_list *cal_list_last;
498 struct ath9k_cal_list *cal_list_curr; 668 struct ath9k_cal_list *cal_list_curr;
@@ -533,12 +703,10 @@ struct ath_hw {
533 DONT_USE_32KHZ, 703 DONT_USE_32KHZ,
534 } enable_32kHz_clock; 704 } enable_32kHz_clock;
535 705
536 /* Callback for radio frequency change */ 706 /* Private to hardware code */
537 int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan); 707 struct ath_hw_private_ops private_ops;
538 708 /* Accessed by the lower level driver */
539 /* Callback for baseband spur frequency */ 709 struct ath_hw_ops ops;
540 void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
541 struct ath9k_channel *chan);
542 710
543 /* Used to program the radio on non single-chip devices */ 711 /* Used to program the radio on non single-chip devices */
544 u32 *analogBank0Data; 712 u32 *analogBank0Data;
@@ -551,6 +719,7 @@ struct ath_hw {
551 u32 *addac5416_21; 719 u32 *addac5416_21;
552 u32 *bank6Temp; 720 u32 *bank6Temp;
553 721
722 u8 txpower_limit;
554 int16_t txpower_indexoffset; 723 int16_t txpower_indexoffset;
555 int coverage_class; 724 int coverage_class;
556 u32 beacon_interval; 725 u32 beacon_interval;
@@ -592,16 +761,34 @@ struct ath_hw {
592 struct ar5416IniArray iniBank7; 761 struct ar5416IniArray iniBank7;
593 struct ar5416IniArray iniAddac; 762 struct ar5416IniArray iniAddac;
594 struct ar5416IniArray iniPcieSerdes; 763 struct ar5416IniArray iniPcieSerdes;
764 struct ar5416IniArray iniPcieSerdesLowPower;
595 struct ar5416IniArray iniModesAdditional; 765 struct ar5416IniArray iniModesAdditional;
596 struct ar5416IniArray iniModesRxGain; 766 struct ar5416IniArray iniModesRxGain;
597 struct ar5416IniArray iniModesTxGain; 767 struct ar5416IniArray iniModesTxGain;
598 struct ar5416IniArray iniModes_9271_1_0_only; 768 struct ar5416IniArray iniModes_9271_1_0_only;
599 struct ar5416IniArray iniCckfirNormal; 769 struct ar5416IniArray iniCckfirNormal;
600 struct ar5416IniArray iniCckfirJapan2484; 770 struct ar5416IniArray iniCckfirJapan2484;
771 struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
772 struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
773 struct ar5416IniArray iniModes_9271_ANI_reg;
774 struct ar5416IniArray iniModes_high_power_tx_gain_9271;
775 struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
776
777 struct ar5416IniArray iniMac[ATH_INI_NUM_SPLIT];
778 struct ar5416IniArray iniBB[ATH_INI_NUM_SPLIT];
779 struct ar5416IniArray iniRadio[ATH_INI_NUM_SPLIT];
780 struct ar5416IniArray iniSOC[ATH_INI_NUM_SPLIT];
601 781
602 u32 intr_gen_timer_trigger; 782 u32 intr_gen_timer_trigger;
603 u32 intr_gen_timer_thresh; 783 u32 intr_gen_timer_thresh;
604 struct ath_gen_timer_table hw_gen_timers; 784 struct ath_gen_timer_table hw_gen_timers;
785
786 struct ar9003_txs *ts_ring;
787 void *ts_start;
788 u32 ts_paddr_start;
789 u32 ts_paddr_end;
790 u16 ts_tail;
791 u8 ts_size;
605}; 792};
606 793
607static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 794static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -614,6 +801,16 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
614 return &(ath9k_hw_common(ah)->regulatory); 801 return &(ath9k_hw_common(ah)->regulatory);
615} 802}
616 803
804static inline struct ath_hw_private_ops *ath9k_hw_private_ops(struct ath_hw *ah)
805{
806 return &ah->private_ops;
807}
808
809static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
810{
811 return &ah->ops;
812}
813
617/* Initialization, Detach, Reset */ 814/* Initialization, Detach, Reset */
618const char *ath9k_hw_probe(u16 vendorid, u16 devid); 815const char *ath9k_hw_probe(u16 vendorid, u16 devid);
619void ath9k_hw_deinit(struct ath_hw *ah); 816void ath9k_hw_deinit(struct ath_hw *ah);
@@ -625,6 +822,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
625 u32 capability, u32 *result); 822 u32 capability, u32 *result);
626bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 823bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
627 u32 capability, u32 setting, int *status); 824 u32 capability, u32 setting, int *status);
825u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
628 826
629/* Key Cache Management */ 827/* Key Cache Management */
630bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry); 828bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry);
@@ -673,16 +871,10 @@ void ath9k_hw_set11nmac2040(struct ath_hw *ah);
673void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 871void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
674void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 872void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
675 const struct ath9k_beacon_state *bs); 873 const struct ath9k_beacon_state *bs);
874bool ath9k_hw_check_alive(struct ath_hw *ah);
676 875
677bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode); 876bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
678 877
679void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
680
681/* Interrupt Handling */
682bool ath9k_hw_intrpend(struct ath_hw *ah);
683bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked);
684enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
685
686/* Generic hw timer primitives */ 878/* Generic hw timer primitives */
687struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 879struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
688 void (*trigger)(void *), 880 void (*trigger)(void *),
@@ -701,6 +893,39 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
701 893
702void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); 894void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
703 895
896/* HTC */
897void ath9k_hw_htc_resetinit(struct ath_hw *ah);
898
899/* PHY */
900void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
901 u32 *coef_mantissa, u32 *coef_exponent);
902
903/*
904 * Code Specific to AR5008, AR9001 or AR9002,
905 * we stuff these here to avoid callbacks for AR9003.
906 */
907void ar9002_hw_cck_chan14_spread(struct ath_hw *ah);
908int ar9002_hw_rf_claim(struct ath_hw *ah);
909void ar9002_hw_enable_async_fifo(struct ath_hw *ah);
910void ar9002_hw_enable_wep_aggregation(struct ath_hw *ah);
911
912/*
913 * Code specifric to AR9003, we stuff these here to avoid callbacks
914 * for older families
915 */
916void ar9003_hw_set_nf_limits(struct ath_hw *ah);
917
918/* Hardware family op attach helpers */
919void ar5008_hw_attach_phy_ops(struct ath_hw *ah);
920void ar9002_hw_attach_phy_ops(struct ath_hw *ah);
921void ar9003_hw_attach_phy_ops(struct ath_hw *ah);
922
923void ar9002_hw_attach_calib_ops(struct ath_hw *ah);
924void ar9003_hw_attach_calib_ops(struct ath_hw *ah);
925
926void ar9002_hw_attach_ops(struct ath_hw *ah);
927void ar9003_hw_attach_ops(struct ath_hw *ah);
928
704#define ATH_PCIE_CAP_LINK_CTRL 0x70 929#define ATH_PCIE_CAP_LINK_CTRL 0x70
705#define ATH_PCIE_CAP_LINK_L0S 1 930#define ATH_PCIE_CAP_LINK_L0S 1
706#define ATH_PCIE_CAP_LINK_L1 2 931#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897add6d..d457cb3bd772 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -175,6 +175,18 @@ static const struct ath_ops ath9k_common_ops = {
175 .write = ath9k_iowrite32, 175 .write = ath9k_iowrite32,
176}; 176};
177 177
178static int count_streams(unsigned int chainmask, int max)
179{
180 int streams = 0;
181
182 do {
183 if (++streams == max)
184 break;
185 } while ((chainmask = chainmask & (chainmask - 1)));
186
187 return streams;
188}
189
178/**************************/ 190/**************************/
179/* Initialization */ 191/* Initialization */
180/**************************/ 192/**************************/
@@ -182,8 +194,10 @@ static const struct ath_ops ath9k_common_ops = {
182static void setup_ht_cap(struct ath_softc *sc, 194static void setup_ht_cap(struct ath_softc *sc,
183 struct ieee80211_sta_ht_cap *ht_info) 195 struct ieee80211_sta_ht_cap *ht_info)
184{ 196{
185 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 197 struct ath_hw *ah = sc->sc_ah;
198 struct ath_common *common = ath9k_hw_common(ah);
186 u8 tx_streams, rx_streams; 199 u8 tx_streams, rx_streams;
200 int i, max_streams;
187 201
188 ht_info->ht_supported = true; 202 ht_info->ht_supported = true;
189 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 203 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -191,28 +205,40 @@ static void setup_ht_cap(struct ath_softc *sc,
191 IEEE80211_HT_CAP_SGI_40 | 205 IEEE80211_HT_CAP_SGI_40 |
192 IEEE80211_HT_CAP_DSSSCCK40; 206 IEEE80211_HT_CAP_DSSSCCK40;
193 207
208 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
209 ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
210
194 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 211 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
195 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 212 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
196 213
214 if (AR_SREV_9300_20_OR_LATER(ah))
215 max_streams = 3;
216 else
217 max_streams = 2;
218
219 if (AR_SREV_9280_10_OR_LATER(ah)) {
220 if (max_streams >= 2)
221 ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
222 ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
223 }
224
197 /* set up supported mcs set */ 225 /* set up supported mcs set */
198 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 226 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
199 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ? 227 tx_streams = count_streams(common->tx_chainmask, max_streams);
200 1 : 2; 228 rx_streams = count_streams(common->rx_chainmask, max_streams);
201 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ? 229
202 1 : 2; 230 ath_print(common, ATH_DBG_CONFIG,
231 "TX streams %d, RX streams: %d\n",
232 tx_streams, rx_streams);
203 233
204 if (tx_streams != rx_streams) { 234 if (tx_streams != rx_streams) {
205 ath_print(common, ATH_DBG_CONFIG,
206 "TX streams %d, RX streams: %d\n",
207 tx_streams, rx_streams);
208 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 235 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
209 ht_info->mcs.tx_params |= ((tx_streams - 1) << 236 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
210 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 237 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
211 } 238 }
212 239
213 ht_info->mcs.rx_mask[0] = 0xff; 240 for (i = 0; i < rx_streams; i++)
214 if (rx_streams >= 2) 241 ht_info->mcs.rx_mask[i] = 0xff;
215 ht_info->mcs.rx_mask[1] = 0xff;
216 242
217 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 243 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
218} 244}
@@ -235,31 +261,37 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
235*/ 261*/
236int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, 262int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
237 struct list_head *head, const char *name, 263 struct list_head *head, const char *name,
238 int nbuf, int ndesc) 264 int nbuf, int ndesc, bool is_tx)
239{ 265{
240#define DS2PHYS(_dd, _ds) \ 266#define DS2PHYS(_dd, _ds) \
241 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 267 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
242#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 268#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
243#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 269#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
244 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 270 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
245 struct ath_desc *ds; 271 u8 *ds;
246 struct ath_buf *bf; 272 struct ath_buf *bf;
247 int i, bsize, error; 273 int i, bsize, error, desc_len;
248 274
249 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 275 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
250 name, nbuf, ndesc); 276 name, nbuf, ndesc);
251 277
252 INIT_LIST_HEAD(head); 278 INIT_LIST_HEAD(head);
279
280 if (is_tx)
281 desc_len = sc->sc_ah->caps.tx_desc_len;
282 else
283 desc_len = sizeof(struct ath_desc);
284
253 /* ath_desc must be a multiple of DWORDs */ 285 /* ath_desc must be a multiple of DWORDs */
254 if ((sizeof(struct ath_desc) % 4) != 0) { 286 if ((desc_len % 4) != 0) {
255 ath_print(common, ATH_DBG_FATAL, 287 ath_print(common, ATH_DBG_FATAL,
256 "ath_desc not DWORD aligned\n"); 288 "ath_desc not DWORD aligned\n");
257 BUG_ON((sizeof(struct ath_desc) % 4) != 0); 289 BUG_ON((desc_len % 4) != 0);
258 error = -ENOMEM; 290 error = -ENOMEM;
259 goto fail; 291 goto fail;
260 } 292 }
261 293
262 dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; 294 dd->dd_desc_len = desc_len * nbuf * ndesc;
263 295
264 /* 296 /*
265 * Need additional DMA memory because we can't use 297 * Need additional DMA memory because we can't use
@@ -272,11 +304,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
272 u32 dma_len; 304 u32 dma_len;
273 305
274 while (ndesc_skipped) { 306 while (ndesc_skipped) {
275 dma_len = ndesc_skipped * sizeof(struct ath_desc); 307 dma_len = ndesc_skipped * desc_len;
276 dd->dd_desc_len += dma_len; 308 dd->dd_desc_len += dma_len;
277 309
278 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len); 310 ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
279 }; 311 }
280 } 312 }
281 313
282 /* allocate descriptors */ 314 /* allocate descriptors */
@@ -286,7 +318,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
286 error = -ENOMEM; 318 error = -ENOMEM;
287 goto fail; 319 goto fail;
288 } 320 }
289 ds = dd->dd_desc; 321 ds = (u8 *) dd->dd_desc;
290 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 322 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
291 name, ds, (u32) dd->dd_desc_len, 323 name, ds, (u32) dd->dd_desc_len,
292 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 324 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -300,7 +332,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
300 } 332 }
301 dd->dd_bufptr = bf; 333 dd->dd_bufptr = bf;
302 334
303 for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { 335 for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
304 bf->bf_desc = ds; 336 bf->bf_desc = ds;
305 bf->bf_daddr = DS2PHYS(dd, ds); 337 bf->bf_daddr = DS2PHYS(dd, ds);
306 338
@@ -316,7 +348,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
316 ((caddr_t) dd->dd_desc + 348 ((caddr_t) dd->dd_desc +
317 dd->dd_desc_len)); 349 dd->dd_desc_len));
318 350
319 ds += ndesc; 351 ds += (desc_len * ndesc);
320 bf->bf_desc = ds; 352 bf->bf_desc = ds;
321 bf->bf_daddr = DS2PHYS(dd, ds); 353 bf->bf_daddr = DS2PHYS(dd, ds);
322 } 354 }
@@ -514,7 +546,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
514 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 546 common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
515 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 547 common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
516 548
517 ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 549 ath9k_hw_set_diversity(sc->sc_ah, true);
518 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah); 550 sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
519 551
520 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 552 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
@@ -568,13 +600,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
568 ath_read_cachesize(common, &csz); 600 ath_read_cachesize(common, &csz);
569 common->cachelsz = csz << 2; /* convert to bytes */ 601 common->cachelsz = csz << 2; /* convert to bytes */
570 602
603 /* Initializes the hardware for all supported chipsets */
571 ret = ath9k_hw_init(ah); 604 ret = ath9k_hw_init(ah);
572 if (ret) { 605 if (ret)
573 ath_print(common, ATH_DBG_FATAL,
574 "Unable to initialize hardware; "
575 "initialization status: %d\n", ret);
576 goto err_hw; 606 goto err_hw;
577 }
578 607
579 ret = ath9k_init_debug(ah); 608 ret = ath9k_init_debug(ah);
580 if (ret) { 609 if (ret) {
@@ -760,6 +789,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
760 789
761 tasklet_kill(&sc->intr_tq); 790 tasklet_kill(&sc->intr_tq);
762 tasklet_kill(&sc->bcon_tasklet); 791 tasklet_kill(&sc->bcon_tasklet);
792
793 kfree(sc->sc_ah);
794 sc->sc_ah = NULL;
763} 795}
764 796
765void ath9k_deinit_device(struct ath_softc *sc) 797void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420cd42bf..0e425cb4bbb1 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -25,14 +25,21 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask); 26 ah->txurn_interrupt_mask);
27 27
28 ENABLE_REGWRITE_BUFFER(ah);
29
28 REG_WRITE(ah, AR_IMR_S0, 30 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 31 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
30 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC)); 32 | SM(ah->txdesc_interrupt_mask, AR_IMR_S0_QCU_TXDESC));
31 REG_WRITE(ah, AR_IMR_S1, 33 REG_WRITE(ah, AR_IMR_S1,
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 34 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 35 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
34 REG_RMW_FIELD(ah, AR_IMR_S2, 36
35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask); 37 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
38 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
39 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
40
41 REGWRITE_BUFFER_FLUSH(ah);
42 DISABLE_REGWRITE_BUFFER(ah);
36} 43}
37 44
38u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 45u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -55,6 +62,18 @@ void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
55} 62}
56EXPORT_SYMBOL(ath9k_hw_txstart); 63EXPORT_SYMBOL(ath9k_hw_txstart);
57 64
65void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds)
66{
67 struct ar5416_desc *ads = AR5416DESC(ds);
68
69 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
70 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
71 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
72 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
73 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
74}
75EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
76
58u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 77u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
59{ 78{
60 u32 npend; 79 u32 npend;
@@ -103,7 +122,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
103 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 122 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
104 return false; 123 return false;
105 124
106 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); 125 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
107 126
108 txcfg = REG_READ(ah, AR_TXCFG); 127 txcfg = REG_READ(ah, AR_TXCFG);
109 curLevel = MS(txcfg, AR_FTRIG); 128 curLevel = MS(txcfg, AR_FTRIG);
@@ -205,280 +224,6 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
205} 224}
206EXPORT_SYMBOL(ath9k_hw_stoptxdma); 225EXPORT_SYMBOL(ath9k_hw_stoptxdma);
207 226
208void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
209 u32 segLen, bool firstSeg,
210 bool lastSeg, const struct ath_desc *ds0)
211{
212 struct ar5416_desc *ads = AR5416DESC(ds);
213
214 if (firstSeg) {
215 ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
216 } else if (lastSeg) {
217 ads->ds_ctl0 = 0;
218 ads->ds_ctl1 = segLen;
219 ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
220 ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
221 } else {
222 ads->ds_ctl0 = 0;
223 ads->ds_ctl1 = segLen | AR_TxMore;
224 ads->ds_ctl2 = 0;
225 ads->ds_ctl3 = 0;
226 }
227 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
228 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
229 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
230 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
231 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
232}
233EXPORT_SYMBOL(ath9k_hw_filltxdesc);
234
235void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
236{
237 struct ar5416_desc *ads = AR5416DESC(ds);
238
239 ads->ds_txstatus0 = ads->ds_txstatus1 = 0;
240 ads->ds_txstatus2 = ads->ds_txstatus3 = 0;
241 ads->ds_txstatus4 = ads->ds_txstatus5 = 0;
242 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
243 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
244}
245EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
246
247int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
248{
249 struct ar5416_desc *ads = AR5416DESC(ds);
250
251 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
252 return -EINPROGRESS;
253
254 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
255 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp;
256 ds->ds_txstat.ts_status = 0;
257 ds->ds_txstat.ts_flags = 0;
258
259 if (ads->ds_txstatus1 & AR_FrmXmitOK)
260 ds->ds_txstat.ts_status |= ATH9K_TX_ACKED;
261 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
262 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY;
263 if (ads->ds_txstatus1 & AR_Filtered)
264 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT;
265 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
266 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO;
267 ath9k_hw_updatetxtriglevel(ah, true);
268 }
269 if (ads->ds_txstatus9 & AR_TxOpExceeded)
270 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP;
271 if (ads->ds_txstatus1 & AR_TxTimerExpired)
272 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
273
274 if (ads->ds_txstatus1 & AR_DescCfgErr)
275 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR;
276 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
277 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN;
278 ath9k_hw_updatetxtriglevel(ah, true);
279 }
280 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
281 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
282 ath9k_hw_updatetxtriglevel(ah, true);
283 }
284 if (ads->ds_txstatus0 & AR_TxBaStatus) {
285 ds->ds_txstat.ts_flags |= ATH9K_TX_BA;
286 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow;
287 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh;
288 }
289
290 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
291 switch (ds->ds_txstat.ts_rateindex) {
292 case 0:
293 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
294 break;
295 case 1:
296 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
297 break;
298 case 2:
299 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
300 break;
301 case 3:
302 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
303 break;
304 }
305
306 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
307 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
308 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
309 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
310 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
311 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
312 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
313 ds->ds_txstat.evm0 = ads->AR_TxEVM0;
314 ds->ds_txstat.evm1 = ads->AR_TxEVM1;
315 ds->ds_txstat.evm2 = ads->AR_TxEVM2;
316 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
317 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
318 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
319 ds->ds_txstat.ts_antenna = 0;
320
321 return 0;
322}
323EXPORT_SYMBOL(ath9k_hw_txprocdesc);
324
325void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
326 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
327 u32 keyIx, enum ath9k_key_type keyType, u32 flags)
328{
329 struct ar5416_desc *ads = AR5416DESC(ds);
330
331 txPower += ah->txpower_indexoffset;
332 if (txPower > 63)
333 txPower = 63;
334
335 ads->ds_ctl0 = (pktLen & AR_FrameLen)
336 | (flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
337 | SM(txPower, AR_XmitPower)
338 | (flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
339 | (flags & ATH9K_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
340 | (flags & ATH9K_TXDESC_INTREQ ? AR_TxIntrReq : 0)
341 | (keyIx != ATH9K_TXKEYIX_INVALID ? AR_DestIdxValid : 0);
342
343 ads->ds_ctl1 =
344 (keyIx != ATH9K_TXKEYIX_INVALID ? SM(keyIx, AR_DestIdx) : 0)
345 | SM(type, AR_FrameType)
346 | (flags & ATH9K_TXDESC_NOACK ? AR_NoAck : 0)
347 | (flags & ATH9K_TXDESC_EXT_ONLY ? AR_ExtOnly : 0)
348 | (flags & ATH9K_TXDESC_EXT_AND_CTL ? AR_ExtAndCtl : 0);
349
350 ads->ds_ctl6 = SM(keyType, AR_EncrType);
351
352 if (AR_SREV_9285(ah)) {
353 ads->ds_ctl8 = 0;
354 ads->ds_ctl9 = 0;
355 ads->ds_ctl10 = 0;
356 ads->ds_ctl11 = 0;
357 }
358}
359EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
360
361void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
362 struct ath_desc *lastds,
363 u32 durUpdateEn, u32 rtsctsRate,
364 u32 rtsctsDuration,
365 struct ath9k_11n_rate_series series[],
366 u32 nseries, u32 flags)
367{
368 struct ar5416_desc *ads = AR5416DESC(ds);
369 struct ar5416_desc *last_ads = AR5416DESC(lastds);
370 u32 ds_ctl0;
371
372 if (flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA)) {
373 ds_ctl0 = ads->ds_ctl0;
374
375 if (flags & ATH9K_TXDESC_RTSENA) {
376 ds_ctl0 &= ~AR_CTSEnable;
377 ds_ctl0 |= AR_RTSEnable;
378 } else {
379 ds_ctl0 &= ~AR_RTSEnable;
380 ds_ctl0 |= AR_CTSEnable;
381 }
382
383 ads->ds_ctl0 = ds_ctl0;
384 } else {
385 ads->ds_ctl0 =
386 (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
387 }
388
389 ads->ds_ctl2 = set11nTries(series, 0)
390 | set11nTries(series, 1)
391 | set11nTries(series, 2)
392 | set11nTries(series, 3)
393 | (durUpdateEn ? AR_DurUpdateEna : 0)
394 | SM(0, AR_BurstDur);
395
396 ads->ds_ctl3 = set11nRate(series, 0)
397 | set11nRate(series, 1)
398 | set11nRate(series, 2)
399 | set11nRate(series, 3);
400
401 ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
402 | set11nPktDurRTSCTS(series, 1);
403
404 ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
405 | set11nPktDurRTSCTS(series, 3);
406
407 ads->ds_ctl7 = set11nRateFlags(series, 0)
408 | set11nRateFlags(series, 1)
409 | set11nRateFlags(series, 2)
410 | set11nRateFlags(series, 3)
411 | SM(rtsctsRate, AR_RTSCTSRate);
412 last_ads->ds_ctl2 = ads->ds_ctl2;
413 last_ads->ds_ctl3 = ads->ds_ctl3;
414}
415EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
416
417void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
418 u32 aggrLen)
419{
420 struct ar5416_desc *ads = AR5416DESC(ds);
421
422 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
423 ads->ds_ctl6 &= ~AR_AggrLen;
424 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
425}
426EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
427
428void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
429 u32 numDelims)
430{
431 struct ar5416_desc *ads = AR5416DESC(ds);
432 unsigned int ctl6;
433
434 ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
435
436 ctl6 = ads->ds_ctl6;
437 ctl6 &= ~AR_PadDelim;
438 ctl6 |= SM(numDelims, AR_PadDelim);
439 ads->ds_ctl6 = ctl6;
440}
441EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
442
443void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
444{
445 struct ar5416_desc *ads = AR5416DESC(ds);
446
447 ads->ds_ctl1 |= AR_IsAggr;
448 ads->ds_ctl1 &= ~AR_MoreAggr;
449 ads->ds_ctl6 &= ~AR_PadDelim;
450}
451EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
452
453void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
454{
455 struct ar5416_desc *ads = AR5416DESC(ds);
456
457 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
458}
459EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
460
461void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
462 u32 burstDuration)
463{
464 struct ar5416_desc *ads = AR5416DESC(ds);
465
466 ads->ds_ctl2 &= ~AR_BurstDur;
467 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
468}
469EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
470
471void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
472 u32 vmf)
473{
474 struct ar5416_desc *ads = AR5416DESC(ds);
475
476 if (vmf)
477 ads->ds_ctl0 |= AR_VirtMoreFrag;
478 else
479 ads->ds_ctl0 &= ~AR_VirtMoreFrag;
480}
481
482void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs) 227void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
483{ 228{
484 *txqs &= ah->intr_txqs; 229 *txqs &= ah->intr_txqs;
@@ -730,6 +475,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
730 } else 475 } else
731 cwMin = qi->tqi_cwmin; 476 cwMin = qi->tqi_cwmin;
732 477
478 ENABLE_REGWRITE_BUFFER(ah);
479
733 REG_WRITE(ah, AR_DLCL_IFS(q), 480 REG_WRITE(ah, AR_DLCL_IFS(q),
734 SM(cwMin, AR_D_LCL_IFS_CWMIN) | 481 SM(cwMin, AR_D_LCL_IFS_CWMIN) |
735 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) | 482 SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX) |
@@ -744,6 +491,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
744 REG_WRITE(ah, AR_DMISC(q), 491 REG_WRITE(ah, AR_DMISC(q),
745 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2); 492 AR_D_MISC_CW_BKOFF_EN | AR_D_MISC_FRAG_WAIT_EN | 0x2);
746 493
494 REGWRITE_BUFFER_FLUSH(ah);
495
747 if (qi->tqi_cbrPeriod) { 496 if (qi->tqi_cbrPeriod) {
748 REG_WRITE(ah, AR_QCBRCFG(q), 497 REG_WRITE(ah, AR_QCBRCFG(q),
749 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) | 498 SM(qi->tqi_cbrPeriod, AR_Q_CBRCFG_INTERVAL) |
@@ -759,6 +508,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
759 AR_Q_RDYTIMECFG_EN); 508 AR_Q_RDYTIMECFG_EN);
760 } 509 }
761 510
511 REGWRITE_BUFFER_FLUSH(ah);
512
762 REG_WRITE(ah, AR_DCHNTIME(q), 513 REG_WRITE(ah, AR_DCHNTIME(q),
763 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) | 514 SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR) |
764 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0)); 515 (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
@@ -776,6 +527,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
776 REG_READ(ah, AR_DMISC(q)) | 527 REG_READ(ah, AR_DMISC(q)) |
777 AR_D_MISC_POST_FR_BKOFF_DIS); 528 AR_D_MISC_POST_FR_BKOFF_DIS);
778 } 529 }
530
531 REGWRITE_BUFFER_FLUSH(ah);
532 DISABLE_REGWRITE_BUFFER(ah);
533
779 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) { 534 if (qi->tqi_qflags & TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) {
780 REG_WRITE(ah, AR_DMISC(q), 535 REG_WRITE(ah, AR_DMISC(q),
781 REG_READ(ah, AR_DMISC(q)) | 536 REG_READ(ah, AR_DMISC(q)) |
@@ -783,6 +538,8 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
783 } 538 }
784 switch (qi->tqi_type) { 539 switch (qi->tqi_type) {
785 case ATH9K_TX_QUEUE_BEACON: 540 case ATH9K_TX_QUEUE_BEACON:
541 ENABLE_REGWRITE_BUFFER(ah);
542
786 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 543 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
787 | AR_Q_MISC_FSP_DBA_GATED 544 | AR_Q_MISC_FSP_DBA_GATED
788 | AR_Q_MISC_BEACON_USE 545 | AR_Q_MISC_BEACON_USE
@@ -793,8 +550,20 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
793 AR_D_MISC_ARB_LOCKOUT_CNTRL_S) 550 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)
794 | AR_D_MISC_BEACON_USE 551 | AR_D_MISC_BEACON_USE
795 | AR_D_MISC_POST_FR_BKOFF_DIS); 552 | AR_D_MISC_POST_FR_BKOFF_DIS);
553
554 REGWRITE_BUFFER_FLUSH(ah);
555 DISABLE_REGWRITE_BUFFER(ah);
556
557 /* cwmin and cwmax should be 0 for beacon queue */
558 if (AR_SREV_9300_20_OR_LATER(ah)) {
559 REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN)
560 | SM(0, AR_D_LCL_IFS_CWMAX)
561 | SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
562 }
796 break; 563 break;
797 case ATH9K_TX_QUEUE_CAB: 564 case ATH9K_TX_QUEUE_CAB:
565 ENABLE_REGWRITE_BUFFER(ah);
566
798 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q)) 567 REG_WRITE(ah, AR_QMISC(q), REG_READ(ah, AR_QMISC(q))
799 | AR_Q_MISC_FSP_DBA_GATED 568 | AR_Q_MISC_FSP_DBA_GATED
800 | AR_Q_MISC_CBR_INCR_DIS1 569 | AR_Q_MISC_CBR_INCR_DIS1
@@ -808,6 +577,10 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
808 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q)) 577 REG_WRITE(ah, AR_DMISC(q), REG_READ(ah, AR_DMISC(q))
809 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL << 578 | (AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL <<
810 AR_D_MISC_ARB_LOCKOUT_CNTRL_S)); 579 AR_D_MISC_ARB_LOCKOUT_CNTRL_S));
580
581 REGWRITE_BUFFER_FLUSH(ah);
582 DISABLE_REGWRITE_BUFFER(ah);
583
811 break; 584 break;
812 case ATH9K_TX_QUEUE_PSPOLL: 585 case ATH9K_TX_QUEUE_PSPOLL:
813 REG_WRITE(ah, AR_QMISC(q), 586 REG_WRITE(ah, AR_QMISC(q),
@@ -829,6 +602,9 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
829 AR_D_MISC_POST_FR_BKOFF_DIS); 602 AR_D_MISC_POST_FR_BKOFF_DIS);
830 } 603 }
831 604
605 if (AR_SREV_9300_20_OR_LATER(ah))
606 REG_WRITE(ah, AR_Q_DESC_CRCCHK, AR_Q_DESC_CRCCHK_EN);
607
832 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE) 608 if (qi->tqi_qflags & TXQ_FLAG_TXOKINT_ENABLE)
833 ah->txok_interrupt_mask |= 1 << q; 609 ah->txok_interrupt_mask |= 1 << q;
834 else 610 else
@@ -856,7 +632,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
856EXPORT_SYMBOL(ath9k_hw_resettxqueue); 632EXPORT_SYMBOL(ath9k_hw_resettxqueue);
857 633
858int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 634int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
859 u32 pa, struct ath_desc *nds, u64 tsf) 635 struct ath_rx_status *rs, u64 tsf)
860{ 636{
861 struct ar5416_desc ads; 637 struct ar5416_desc ads;
862 struct ar5416_desc *adsp = AR5416DESC(ds); 638 struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,92 +643,76 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
867 643
868 ads.u.rx = adsp->u.rx; 644 ads.u.rx = adsp->u.rx;
869 645
870 ds->ds_rxstat.rs_status = 0; 646 rs->rs_status = 0;
871 ds->ds_rxstat.rs_flags = 0; 647 rs->rs_flags = 0;
872 648
873 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 649 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
874 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; 650 rs->rs_tstamp = ads.AR_RcvTimestamp;
875 651
876 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 652 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
877 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD; 653 rs->rs_rssi = ATH9K_RSSI_BAD;
878 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD; 654 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
879 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD; 655 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
880 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD; 656 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
881 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD; 657 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
882 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD; 658 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
883 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD; 659 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
884 } else { 660 } else {
885 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 661 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
886 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 662 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
887 AR_RxRSSIAnt00); 663 AR_RxRSSIAnt00);
888 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 664 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
889 AR_RxRSSIAnt01); 665 AR_RxRSSIAnt01);
890 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 666 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
891 AR_RxRSSIAnt02); 667 AR_RxRSSIAnt02);
892 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, 668 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
893 AR_RxRSSIAnt10); 669 AR_RxRSSIAnt10);
894 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, 670 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
895 AR_RxRSSIAnt11); 671 AR_RxRSSIAnt11);
896 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, 672 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
897 AR_RxRSSIAnt12); 673 AR_RxRSSIAnt12);
898 } 674 }
899 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 675 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
900 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 676 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
901 else 677 else
902 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; 678 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
903 679
904 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads)); 680 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
905 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 681 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
906 682
907 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 683 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
908 ds->ds_rxstat.rs_moreaggr = 684 rs->rs_moreaggr =
909 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 685 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
910 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 686 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
911 ds->ds_rxstat.rs_flags = 687 rs->rs_flags =
912 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 688 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
913 ds->ds_rxstat.rs_flags |= 689 rs->rs_flags |=
914 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 690 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
915 691
916 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 692 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
917 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 693 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
918 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 694 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
919 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST; 695 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
920 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 696 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
921 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY; 697 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
922 698
923 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 699 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
924 if (ads.ds_rxstatus8 & AR_CRCErr) 700 if (ads.ds_rxstatus8 & AR_CRCErr)
925 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC; 701 rs->rs_status |= ATH9K_RXERR_CRC;
926 else if (ads.ds_rxstatus8 & AR_PHYErr) { 702 else if (ads.ds_rxstatus8 & AR_PHYErr) {
927 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY; 703 rs->rs_status |= ATH9K_RXERR_PHY;
928 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 704 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
929 ds->ds_rxstat.rs_phyerr = phyerr; 705 rs->rs_phyerr = phyerr;
930 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 706 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
931 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT; 707 rs->rs_status |= ATH9K_RXERR_DECRYPT;
932 else if (ads.ds_rxstatus8 & AR_MichaelErr) 708 else if (ads.ds_rxstatus8 & AR_MichaelErr)
933 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC; 709 rs->rs_status |= ATH9K_RXERR_MIC;
934 } 710 }
935 711
936 return 0; 712 return 0;
937} 713}
938EXPORT_SYMBOL(ath9k_hw_rxprocdesc); 714EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
939 715
940void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
941 u32 size, u32 flags)
942{
943 struct ar5416_desc *ads = AR5416DESC(ds);
944 struct ath9k_hw_capabilities *pCap = &ah->caps;
945
946 ads->ds_ctl1 = size & AR_BufLen;
947 if (flags & ATH9K_RXDESC_INTREQ)
948 ads->ds_ctl1 |= AR_RxIntrReq;
949
950 ads->ds_rxstatus8 &= ~AR_RxDone;
951 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
952 memset(&(ads->u), 0, sizeof(ads->u));
953}
954EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
955
956/* 716/*
957 * This can stop or re-enables RX. 717 * This can stop or re-enables RX.
958 * 718 *
@@ -996,12 +756,6 @@ void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
996} 756}
997EXPORT_SYMBOL(ath9k_hw_putrxbuf); 757EXPORT_SYMBOL(ath9k_hw_putrxbuf);
998 758
999void ath9k_hw_rxena(struct ath_hw *ah)
1000{
1001 REG_WRITE(ah, AR_CR, AR_CR_RXE);
1002}
1003EXPORT_SYMBOL(ath9k_hw_rxena);
1004
1005void ath9k_hw_startpcureceive(struct ath_hw *ah) 759void ath9k_hw_startpcureceive(struct ath_hw *ah)
1006{ 760{
1007 ath9k_enable_mib_counters(ah); 761 ath9k_enable_mib_counters(ah);
@@ -1020,6 +774,14 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
1020} 774}
1021EXPORT_SYMBOL(ath9k_hw_stoppcurecv); 775EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
1022 776
777void ath9k_hw_abortpcurecv(struct ath_hw *ah)
778{
779 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
780
781 ath9k_hw_disable_mib_counters(ah);
782}
783EXPORT_SYMBOL(ath9k_hw_abortpcurecv);
784
1023bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 785bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
1024{ 786{
1025#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 787#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
@@ -1065,3 +827,142 @@ int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1065 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi); 827 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1066} 828}
1067EXPORT_SYMBOL(ath9k_hw_beaconq_setup); 829EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
830
831bool ath9k_hw_intrpend(struct ath_hw *ah)
832{
833 u32 host_isr;
834
835 if (AR_SREV_9100(ah))
836 return true;
837
838 host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
839 if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
840 return true;
841
842 host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
843 if ((host_isr & AR_INTR_SYNC_DEFAULT)
844 && (host_isr != AR_INTR_SPURIOUS))
845 return true;
846
847 return false;
848}
849EXPORT_SYMBOL(ath9k_hw_intrpend);
850
851enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
852 enum ath9k_int ints)
853{
854 enum ath9k_int omask = ah->imask;
855 u32 mask, mask2;
856 struct ath9k_hw_capabilities *pCap = &ah->caps;
857 struct ath_common *common = ath9k_hw_common(ah);
858
859 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
860
861 if (omask & ATH9K_INT_GLOBAL) {
862 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
863 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
864 (void) REG_READ(ah, AR_IER);
865 if (!AR_SREV_9100(ah)) {
866 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
867 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
868
869 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
870 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
871 }
872 }
873
874 /* TODO: global int Ref count */
875 mask = ints & ATH9K_INT_COMMON;
876 mask2 = 0;
877
878 if (ints & ATH9K_INT_TX) {
879 if (ah->config.tx_intr_mitigation)
880 mask |= AR_IMR_TXMINTR | AR_IMR_TXINTM;
881 else {
882 if (ah->txok_interrupt_mask)
883 mask |= AR_IMR_TXOK;
884 if (ah->txdesc_interrupt_mask)
885 mask |= AR_IMR_TXDESC;
886 }
887 if (ah->txerr_interrupt_mask)
888 mask |= AR_IMR_TXERR;
889 if (ah->txeol_interrupt_mask)
890 mask |= AR_IMR_TXEOL;
891 }
892 if (ints & ATH9K_INT_RX) {
893 if (AR_SREV_9300_20_OR_LATER(ah)) {
894 mask |= AR_IMR_RXERR | AR_IMR_RXOK_HP;
895 if (ah->config.rx_intr_mitigation) {
896 mask &= ~AR_IMR_RXOK_LP;
897 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
898 } else {
899 mask |= AR_IMR_RXOK_LP;
900 }
901 } else {
902 if (ah->config.rx_intr_mitigation)
903 mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
904 else
905 mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
906 }
907 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
908 mask |= AR_IMR_GENTMR;
909 }
910
911 if (ints & (ATH9K_INT_BMISC)) {
912 mask |= AR_IMR_BCNMISC;
913 if (ints & ATH9K_INT_TIM)
914 mask2 |= AR_IMR_S2_TIM;
915 if (ints & ATH9K_INT_DTIM)
916 mask2 |= AR_IMR_S2_DTIM;
917 if (ints & ATH9K_INT_DTIMSYNC)
918 mask2 |= AR_IMR_S2_DTIMSYNC;
919 if (ints & ATH9K_INT_CABEND)
920 mask2 |= AR_IMR_S2_CABEND;
921 if (ints & ATH9K_INT_TSFOOR)
922 mask2 |= AR_IMR_S2_TSFOOR;
923 }
924
925 if (ints & (ATH9K_INT_GTT | ATH9K_INT_CST)) {
926 mask |= AR_IMR_BCNMISC;
927 if (ints & ATH9K_INT_GTT)
928 mask2 |= AR_IMR_S2_GTT;
929 if (ints & ATH9K_INT_CST)
930 mask2 |= AR_IMR_S2_CST;
931 }
932
933 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
934 REG_WRITE(ah, AR_IMR, mask);
935 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
936 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
937 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
938 ah->imrs2_reg |= mask2;
939 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
940
941 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
942 if (ints & ATH9K_INT_TIM_TIMER)
943 REG_SET_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
944 else
945 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
946 }
947
948 if (ints & ATH9K_INT_GLOBAL) {
949 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
950 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
951 if (!AR_SREV_9100(ah)) {
952 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
953 AR_INTR_MAC_IRQ);
954 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
955
956
957 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
958 AR_INTR_SYNC_DEFAULT);
959 REG_WRITE(ah, AR_INTR_SYNC_MASK,
960 AR_INTR_SYNC_DEFAULT);
961 }
962 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
963 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
964 }
965
966 return omask;
967}
968EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6376a9..00f3e0c7528a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -37,6 +37,8 @@
37 AR_2040_##_index : 0) \ 37 AR_2040_##_index : 0) \
38 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \ 38 |((_series)[_index].RateFlags & ATH9K_RATESERIES_HALFGI ? \
39 AR_GI##_index : 0) \ 39 AR_GI##_index : 0) \
40 |((_series)[_index].RateFlags & ATH9K_RATESERIES_STBC ? \
41 AR_STBC##_index : 0) \
40 |SM((_series)[_index].ChSel, AR_ChainSel##_index)) 42 |SM((_series)[_index].ChSel, AR_ChainSel##_index))
41 43
42#define CCK_SIFS_TIME 10 44#define CCK_SIFS_TIME 10
@@ -86,7 +88,6 @@
86#define ATH9K_TX_DESC_CFG_ERR 0x04 88#define ATH9K_TX_DESC_CFG_ERR 0x04
87#define ATH9K_TX_DATA_UNDERRUN 0x08 89#define ATH9K_TX_DATA_UNDERRUN 0x08
88#define ATH9K_TX_DELIM_UNDERRUN 0x10 90#define ATH9K_TX_DELIM_UNDERRUN 0x10
89#define ATH9K_TX_SW_ABORTED 0x40
90#define ATH9K_TX_SW_FILTERED 0x80 91#define ATH9K_TX_SW_FILTERED 0x80
91 92
92/* 64 bytes */ 93/* 64 bytes */
@@ -117,7 +118,10 @@ struct ath_tx_status {
117 int8_t ts_rssi_ext0; 118 int8_t ts_rssi_ext0;
118 int8_t ts_rssi_ext1; 119 int8_t ts_rssi_ext1;
119 int8_t ts_rssi_ext2; 120 int8_t ts_rssi_ext2;
120 u8 pad[3]; 121 u8 qid;
122 u16 desc_id;
123 u8 tid;
124 u8 pad[2];
121 u32 ba_low; 125 u32 ba_low;
122 u32 ba_high; 126 u32 ba_high;
123 u32 evm0; 127 u32 evm0;
@@ -148,6 +152,34 @@ struct ath_rx_status {
148 u32 evm0; 152 u32 evm0;
149 u32 evm1; 153 u32 evm1;
150 u32 evm2; 154 u32 evm2;
155 u32 evm3;
156 u32 evm4;
157};
158
159struct ath_htc_rx_status {
160 __be64 rs_tstamp;
161 __be16 rs_datalen;
162 u8 rs_status;
163 u8 rs_phyerr;
164 int8_t rs_rssi;
165 int8_t rs_rssi_ctl0;
166 int8_t rs_rssi_ctl1;
167 int8_t rs_rssi_ctl2;
168 int8_t rs_rssi_ext0;
169 int8_t rs_rssi_ext1;
170 int8_t rs_rssi_ext2;
171 u8 rs_keyix;
172 u8 rs_rate;
173 u8 rs_antenna;
174 u8 rs_more;
175 u8 rs_isaggr;
176 u8 rs_moreaggr;
177 u8 rs_num_delims;
178 u8 rs_flags;
179 u8 rs_dummy;
180 __be32 evm0;
181 __be32 evm1;
182 __be32 evm2;
151}; 183};
152 184
153#define ATH9K_RXERR_CRC 0x01 185#define ATH9K_RXERR_CRC 0x01
@@ -207,18 +239,9 @@ struct ath_desc {
207 u32 ds_ctl0; 239 u32 ds_ctl0;
208 u32 ds_ctl1; 240 u32 ds_ctl1;
209 u32 ds_hw[20]; 241 u32 ds_hw[20];
210 union {
211 struct ath_tx_status tx;
212 struct ath_rx_status rx;
213 void *stats;
214 } ds_us;
215 void *ds_vdata; 242 void *ds_vdata;
216} __packed; 243} __packed;
217 244
218#define ds_txstat ds_us.tx
219#define ds_rxstat ds_us.rx
220#define ds_stat ds_us.stats
221
222#define ATH9K_TXDESC_CLRDMASK 0x0001 245#define ATH9K_TXDESC_CLRDMASK 0x0001
223#define ATH9K_TXDESC_NOACK 0x0002 246#define ATH9K_TXDESC_NOACK 0x0002
224#define ATH9K_TXDESC_RTSENA 0x0004 247#define ATH9K_TXDESC_RTSENA 0x0004
@@ -242,7 +265,8 @@ struct ath_desc {
242#define ATH9K_TXDESC_EXT_AND_CTL 0x0080 265#define ATH9K_TXDESC_EXT_AND_CTL 0x0080
243#define ATH9K_TXDESC_VMF 0x0100 266#define ATH9K_TXDESC_VMF 0x0100
244#define ATH9K_TXDESC_FRAG_IS_ON 0x0200 267#define ATH9K_TXDESC_FRAG_IS_ON 0x0200
245#define ATH9K_TXDESC_CAB 0x0400 268#define ATH9K_TXDESC_LOWRXCHAIN 0x0400
269#define ATH9K_TXDESC_LDPC 0x00010000
246 270
247#define ATH9K_RXDESC_INTREQ 0x0020 271#define ATH9K_RXDESC_INTREQ 0x0020
248 272
@@ -336,7 +360,6 @@ struct ar5416_desc {
336#define AR_DestIdxValid 0x40000000 360#define AR_DestIdxValid 0x40000000
337#define AR_CTSEnable 0x80000000 361#define AR_CTSEnable 0x80000000
338 362
339#define AR_BufLen 0x00000fff
340#define AR_TxMore 0x00001000 363#define AR_TxMore 0x00001000
341#define AR_DestIdx 0x000fe000 364#define AR_DestIdx 0x000fe000
342#define AR_DestIdx_S 13 365#define AR_DestIdx_S 13
@@ -393,6 +416,7 @@ struct ar5416_desc {
393#define AR_EncrType 0x0c000000 416#define AR_EncrType 0x0c000000
394#define AR_EncrType_S 26 417#define AR_EncrType_S 26
395#define AR_TxCtlRsvd61 0xf0000000 418#define AR_TxCtlRsvd61 0xf0000000
419#define AR_LDPC 0x80000000
396 420
397#define AR_2040_0 0x00000001 421#define AR_2040_0 0x00000001
398#define AR_GI0 0x00000002 422#define AR_GI0 0x00000002
@@ -412,7 +436,10 @@ struct ar5416_desc {
412#define AR_ChainSel3_S 17 436#define AR_ChainSel3_S 17
413#define AR_RTSCTSRate 0x0ff00000 437#define AR_RTSCTSRate 0x0ff00000
414#define AR_RTSCTSRate_S 20 438#define AR_RTSCTSRate_S 20
415#define AR_TxCtlRsvd70 0xf0000000 439#define AR_STBC0 0x10000000
440#define AR_STBC1 0x20000000
441#define AR_STBC2 0x40000000
442#define AR_STBC3 0x80000000
416 443
417#define AR_TxRSSIAnt00 0x000000ff 444#define AR_TxRSSIAnt00 0x000000ff
418#define AR_TxRSSIAnt00_S 0 445#define AR_TxRSSIAnt00_S 0
@@ -476,7 +503,6 @@ struct ar5416_desc {
476 503
477#define AR_RxCTLRsvd00 0xffffffff 504#define AR_RxCTLRsvd00 0xffffffff
478 505
479#define AR_BufLen 0x00000fff
480#define AR_RxCtlRsvd00 0x00001000 506#define AR_RxCtlRsvd00 0x00001000
481#define AR_RxIntrReq 0x00002000 507#define AR_RxIntrReq 0x00002000
482#define AR_RxCtlRsvd01 0xffffc000 508#define AR_RxCtlRsvd01 0xffffc000
@@ -626,6 +652,7 @@ enum ath9k_rx_filter {
626#define ATH9K_RATESERIES_RTS_CTS 0x0001 652#define ATH9K_RATESERIES_RTS_CTS 0x0001
627#define ATH9K_RATESERIES_2040 0x0002 653#define ATH9K_RATESERIES_2040 0x0002
628#define ATH9K_RATESERIES_HALFGI 0x0004 654#define ATH9K_RATESERIES_HALFGI 0x0004
655#define ATH9K_RATESERIES_STBC 0x0008
629 656
630struct ath9k_11n_rate_series { 657struct ath9k_11n_rate_series {
631 u32 Tries; 658 u32 Tries;
@@ -669,33 +696,10 @@ struct ath9k_channel;
669u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); 696u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
670void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); 697void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
671void ath9k_hw_txstart(struct ath_hw *ah, u32 q); 698void ath9k_hw_txstart(struct ath_hw *ah, u32 q);
699void ath9k_hw_cleartxdesc(struct ath_hw *ah, void *ds);
672u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); 700u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q);
673bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel); 701bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel);
674bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q); 702bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q);
675void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
676 u32 segLen, bool firstSeg,
677 bool lastSeg, const struct ath_desc *ds0);
678void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
679int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds);
680void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
681 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
682 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
683void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
684 struct ath_desc *lastds,
685 u32 durUpdateEn, u32 rtsctsRate,
686 u32 rtsctsDuration,
687 struct ath9k_11n_rate_series series[],
688 u32 nseries, u32 flags);
689void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
690 u32 aggrLen);
691void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
692 u32 numDelims);
693void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds);
694void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds);
695void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
696 u32 burstDuration);
697void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
698 u32 vmf);
699void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs); 703void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs);
700bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 704bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
701 const struct ath9k_tx_queue_info *qinfo); 705 const struct ath9k_tx_queue_info *qinfo);
@@ -706,15 +710,22 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
706bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q); 710bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
707bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q); 711bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
708int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 712int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
709 u32 pa, struct ath_desc *nds, u64 tsf); 713 struct ath_rx_status *rs, u64 tsf);
710void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 714void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
711 u32 size, u32 flags); 715 u32 size, u32 flags);
712bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 716bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
713void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 717void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
714void ath9k_hw_rxena(struct ath_hw *ah);
715void ath9k_hw_startpcureceive(struct ath_hw *ah); 718void ath9k_hw_startpcureceive(struct ath_hw *ah);
716void ath9k_hw_stoppcurecv(struct ath_hw *ah); 719void ath9k_hw_stoppcurecv(struct ath_hw *ah);
720void ath9k_hw_abortpcurecv(struct ath_hw *ah);
717bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 721bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
718int ath9k_hw_beaconq_setup(struct ath_hw *ah); 722int ath9k_hw_beaconq_setup(struct ath_hw *ah);
719 723
724/* Interrupt Handling */
725bool ath9k_hw_intrpend(struct ath_hw *ah);
726enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
727 enum ath9k_int ints);
728
729void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
730
720#endif /* MAC_H */ 731#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115e1aeedb59..893b552981a0 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
225 225
226 ath_cache_conf_rate(sc, &hw->conf); 226 ath_cache_conf_rate(sc, &hw->conf);
227 ath_update_txpow(sc); 227 ath_update_txpow(sc);
228 ath9k_hw_set_interrupts(ah, sc->imask); 228 ath9k_hw_set_interrupts(ah, ah->imask);
229 229
230 ps_restore: 230 ps_restore:
231 ath9k_ps_restore(sc); 231 ath9k_ps_restore(sc);
@@ -401,23 +401,41 @@ void ath9k_tasklet(unsigned long data)
401 struct ath_common *common = ath9k_hw_common(ah); 401 struct ath_common *common = ath9k_hw_common(ah);
402 402
403 u32 status = sc->intrstatus; 403 u32 status = sc->intrstatus;
404 u32 rxmask;
404 405
405 ath9k_ps_wakeup(sc); 406 ath9k_ps_wakeup(sc);
406 407
407 if (status & ATH9K_INT_FATAL) { 408 if ((status & ATH9K_INT_FATAL) ||
409 !ath9k_hw_check_alive(ah)) {
408 ath_reset(sc, false); 410 ath_reset(sc, false);
409 ath9k_ps_restore(sc); 411 ath9k_ps_restore(sc);
410 return; 412 return;
411 } 413 }
412 414
413 if (status & (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN)) { 415 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
416 rxmask = (ATH9K_INT_RXHP | ATH9K_INT_RXLP | ATH9K_INT_RXEOL |
417 ATH9K_INT_RXORN);
418 else
419 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
420
421 if (status & rxmask) {
414 spin_lock_bh(&sc->rx.rxflushlock); 422 spin_lock_bh(&sc->rx.rxflushlock);
415 ath_rx_tasklet(sc, 0); 423
424 /* Check for high priority Rx first */
425 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
426 (status & ATH9K_INT_RXHP))
427 ath_rx_tasklet(sc, 0, true);
428
429 ath_rx_tasklet(sc, 0, false);
416 spin_unlock_bh(&sc->rx.rxflushlock); 430 spin_unlock_bh(&sc->rx.rxflushlock);
417 } 431 }
418 432
419 if (status & ATH9K_INT_TX) 433 if (status & ATH9K_INT_TX) {
420 ath_tx_tasklet(sc); 434 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
435 ath_tx_edma_tasklet(sc);
436 else
437 ath_tx_tasklet(sc);
438 }
421 439
422 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) { 440 if ((status & ATH9K_INT_TSFOOR) && sc->ps_enabled) {
423 /* 441 /*
@@ -434,7 +452,7 @@ void ath9k_tasklet(unsigned long data)
434 ath_gen_timer_isr(sc->sc_ah); 452 ath_gen_timer_isr(sc->sc_ah);
435 453
436 /* re-enable hardware interrupt */ 454 /* re-enable hardware interrupt */
437 ath9k_hw_set_interrupts(ah, sc->imask); 455 ath9k_hw_set_interrupts(ah, ah->imask);
438 ath9k_ps_restore(sc); 456 ath9k_ps_restore(sc);
439} 457}
440 458
@@ -445,6 +463,8 @@ irqreturn_t ath_isr(int irq, void *dev)
445 ATH9K_INT_RXORN | \ 463 ATH9K_INT_RXORN | \
446 ATH9K_INT_RXEOL | \ 464 ATH9K_INT_RXEOL | \
447 ATH9K_INT_RX | \ 465 ATH9K_INT_RX | \
466 ATH9K_INT_RXLP | \
467 ATH9K_INT_RXHP | \
448 ATH9K_INT_TX | \ 468 ATH9K_INT_TX | \
449 ATH9K_INT_BMISS | \ 469 ATH9K_INT_BMISS | \
450 ATH9K_INT_CST | \ 470 ATH9K_INT_CST | \
@@ -477,7 +497,7 @@ irqreturn_t ath_isr(int irq, void *dev)
477 * value to insure we only process bits we requested. 497 * value to insure we only process bits we requested.
478 */ 498 */
479 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ 499 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
480 status &= sc->imask; /* discard unasked-for bits */ 500 status &= ah->imask; /* discard unasked-for bits */
481 501
482 /* 502 /*
483 * If there are no status bits set, then this interrupt was not 503 * If there are no status bits set, then this interrupt was not
@@ -496,7 +516,8 @@ irqreturn_t ath_isr(int irq, void *dev)
496 * If a FATAL or RXORN interrupt is received, we have to reset the 516 * If a FATAL or RXORN interrupt is received, we have to reset the
497 * chip immediately. 517 * chip immediately.
498 */ 518 */
499 if (status & (ATH9K_INT_FATAL | ATH9K_INT_RXORN)) 519 if ((status & ATH9K_INT_FATAL) || ((status & ATH9K_INT_RXORN) &&
520 !(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)))
500 goto chip_reset; 521 goto chip_reset;
501 522
502 if (status & ATH9K_INT_SWBA) 523 if (status & ATH9K_INT_SWBA)
@@ -505,6 +526,13 @@ irqreturn_t ath_isr(int irq, void *dev)
505 if (status & ATH9K_INT_TXURN) 526 if (status & ATH9K_INT_TXURN)
506 ath9k_hw_updatetxtriglevel(ah, true); 527 ath9k_hw_updatetxtriglevel(ah, true);
507 528
529 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
530 if (status & ATH9K_INT_RXEOL) {
531 ah->imask &= ~(ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
532 ath9k_hw_set_interrupts(ah, ah->imask);
533 }
534 }
535
508 if (status & ATH9K_INT_MIB) { 536 if (status & ATH9K_INT_MIB) {
509 /* 537 /*
510 * Disable interrupts until we service the MIB 538 * Disable interrupts until we service the MIB
@@ -518,7 +546,7 @@ irqreturn_t ath_isr(int irq, void *dev)
518 * the interrupt. 546 * the interrupt.
519 */ 547 */
520 ath9k_hw_procmibevent(ah); 548 ath9k_hw_procmibevent(ah);
521 ath9k_hw_set_interrupts(ah, sc->imask); 549 ath9k_hw_set_interrupts(ah, ah->imask);
522 } 550 }
523 551
524 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 552 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -536,7 +564,7 @@ chip_reset:
536 564
537 if (sched) { 565 if (sched) {
538 /* turn off every interrupt except SWBA */ 566 /* turn off every interrupt except SWBA */
539 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA)); 567 ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
540 tasklet_schedule(&sc->intr_tq); 568 tasklet_schedule(&sc->intr_tq);
541 } 569 }
542 570
@@ -724,6 +752,7 @@ static int ath_key_config(struct ath_common *common,
724 struct ath_hw *ah = common->ah; 752 struct ath_hw *ah = common->ah;
725 struct ath9k_keyval hk; 753 struct ath9k_keyval hk;
726 const u8 *mac = NULL; 754 const u8 *mac = NULL;
755 u8 gmac[ETH_ALEN];
727 int ret = 0; 756 int ret = 0;
728 int idx; 757 int idx;
729 758
@@ -747,9 +776,30 @@ static int ath_key_config(struct ath_common *common,
747 memcpy(hk.kv_val, key->key, key->keylen); 776 memcpy(hk.kv_val, key->key, key->keylen);
748 777
749 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 778 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
750 /* For now, use the default keys for broadcast keys. This may 779
751 * need to change with virtual interfaces. */ 780 if (key->ap_addr) {
752 idx = key->keyidx; 781 /*
782 * Group keys on hardware that supports multicast frame
783 * key search use a mac that is the sender's address with
784 * the high bit set instead of the app-specified address.
785 */
786 memcpy(gmac, key->ap_addr, ETH_ALEN);
787 gmac[0] |= 0x80;
788 mac = gmac;
789
790 if (key->alg == ALG_TKIP)
791 idx = ath_reserve_key_cache_slot_tkip(common);
792 else
793 idx = ath_reserve_key_cache_slot(common);
794 if (idx < 0)
795 mac = NULL; /* no free key cache entries */
796 }
797
798 if (!mac) {
799 /* For now, use the default keys for broadcast keys. This may
800 * need to change with virtual interfaces. */
801 idx = key->keyidx;
802 }
753 } else if (key->keyidx) { 803 } else if (key->keyidx) {
754 if (WARN_ON(!sta)) 804 if (WARN_ON(!sta))
755 return -EOPNOTSUPP; 805 return -EOPNOTSUPP;
@@ -887,7 +937,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
887 ath_beacon_config(sc, NULL); /* restart beacons */ 937 ath_beacon_config(sc, NULL); /* restart beacons */
888 938
889 /* Re-Enable interrupts */ 939 /* Re-Enable interrupts */
890 ath9k_hw_set_interrupts(ah, sc->imask); 940 ath9k_hw_set_interrupts(ah, ah->imask);
891 941
892 /* Enable LED */ 942 /* Enable LED */
893 ath9k_hw_cfg_output(ah, ah->led_pin, 943 ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -977,7 +1027,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
977 if (sc->sc_flags & SC_OP_BEACONS) 1027 if (sc->sc_flags & SC_OP_BEACONS)
978 ath_beacon_config(sc, NULL); /* restart beacons */ 1028 ath_beacon_config(sc, NULL); /* restart beacons */
979 1029
980 ath9k_hw_set_interrupts(ah, sc->imask); 1030 ath9k_hw_set_interrupts(ah, ah->imask);
981 1031
982 if (retry_tx) { 1032 if (retry_tx) {
983 int i; 1033 int i;
@@ -1162,23 +1212,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
1162 } 1212 }
1163 1213
1164 /* Setup our intr mask. */ 1214 /* Setup our intr mask. */
1165 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX 1215 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
1166 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 1216 ATH9K_INT_RXORN | ATH9K_INT_FATAL |
1167 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 1217 ATH9K_INT_GLOBAL;
1218
1219 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
1220 ah->imask |= ATH9K_INT_RXHP | ATH9K_INT_RXLP;
1221 else
1222 ah->imask |= ATH9K_INT_RX;
1168 1223
1169 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1224 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1170 sc->imask |= ATH9K_INT_GTT; 1225 ah->imask |= ATH9K_INT_GTT;
1171 1226
1172 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1227 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1173 sc->imask |= ATH9K_INT_CST; 1228 ah->imask |= ATH9K_INT_CST;
1174 1229
1175 ath_cache_conf_rate(sc, &hw->conf); 1230 ath_cache_conf_rate(sc, &hw->conf);
1176 1231
1177 sc->sc_flags &= ~SC_OP_INVALID; 1232 sc->sc_flags &= ~SC_OP_INVALID;
1178 1233
1179 /* Disable BMISS interrupt when we're not associated */ 1234 /* Disable BMISS interrupt when we're not associated */
1180 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1235 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1181 ath9k_hw_set_interrupts(ah, sc->imask); 1236 ath9k_hw_set_interrupts(ah, ah->imask);
1182 1237
1183 ieee80211_wake_queues(hw); 1238 ieee80211_wake_queues(hw);
1184 1239
@@ -1372,14 +1427,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1372{ 1427{
1373 struct ath_wiphy *aphy = hw->priv; 1428 struct ath_wiphy *aphy = hw->priv;
1374 struct ath_softc *sc = aphy->sc; 1429 struct ath_softc *sc = aphy->sc;
1375 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1430 struct ath_hw *ah = sc->sc_ah;
1431 struct ath_common *common = ath9k_hw_common(ah);
1376 struct ath_vif *avp = (void *)vif->drv_priv; 1432 struct ath_vif *avp = (void *)vif->drv_priv;
1377 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 1433 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
1378 int ret = 0; 1434 int ret = 0;
1379 1435
1380 mutex_lock(&sc->mutex); 1436 mutex_lock(&sc->mutex);
1381 1437
1382 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) && 1438 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
1383 sc->nvifs > 0) { 1439 sc->nvifs > 0) {
1384 ret = -ENOBUFS; 1440 ret = -ENOBUFS;
1385 goto out; 1441 goto out;
@@ -1414,19 +1470,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1414 1470
1415 sc->nvifs++; 1471 sc->nvifs++;
1416 1472
1417 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1473 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1418 ath9k_set_bssid_mask(hw); 1474 ath9k_set_bssid_mask(hw);
1419 1475
1420 if (sc->nvifs > 1) 1476 if (sc->nvifs > 1)
1421 goto out; /* skip global settings for secondary vif */ 1477 goto out; /* skip global settings for secondary vif */
1422 1478
1423 if (ic_opmode == NL80211_IFTYPE_AP) { 1479 if (ic_opmode == NL80211_IFTYPE_AP) {
1424 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 1480 ath9k_hw_set_tsfadjust(ah, 1);
1425 sc->sc_flags |= SC_OP_TSF_RESET; 1481 sc->sc_flags |= SC_OP_TSF_RESET;
1426 } 1482 }
1427 1483
1428 /* Set the device opmode */ 1484 /* Set the device opmode */
1429 sc->sc_ah->opmode = ic_opmode; 1485 ah->opmode = ic_opmode;
1430 1486
1431 /* 1487 /*
1432 * Enable MIB interrupts when there are hardware phy counters. 1488 * Enable MIB interrupts when there are hardware phy counters.
@@ -1435,11 +1491,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1435 if ((vif->type == NL80211_IFTYPE_STATION) || 1491 if ((vif->type == NL80211_IFTYPE_STATION) ||
1436 (vif->type == NL80211_IFTYPE_ADHOC) || 1492 (vif->type == NL80211_IFTYPE_ADHOC) ||
1437 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 1493 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
1438 sc->imask |= ATH9K_INT_MIB; 1494 if (ah->config.enable_ani)
1439 sc->imask |= ATH9K_INT_TSFOOR; 1495 ah->imask |= ATH9K_INT_MIB;
1496 ah->imask |= ATH9K_INT_TSFOOR;
1440 } 1497 }
1441 1498
1442 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 1499 ath9k_hw_set_interrupts(ah, ah->imask);
1443 1500
1444 if (vif->type == NL80211_IFTYPE_AP || 1501 if (vif->type == NL80211_IFTYPE_AP ||
1445 vif->type == NL80211_IFTYPE_ADHOC || 1502 vif->type == NL80211_IFTYPE_ADHOC ||
@@ -1495,15 +1552,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1495 1552
1496void ath9k_enable_ps(struct ath_softc *sc) 1553void ath9k_enable_ps(struct ath_softc *sc)
1497{ 1554{
1555 struct ath_hw *ah = sc->sc_ah;
1556
1498 sc->ps_enabled = true; 1557 sc->ps_enabled = true;
1499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1558 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1500 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { 1559 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
1501 sc->imask |= ATH9K_INT_TIM_TIMER; 1560 ah->imask |= ATH9K_INT_TIM_TIMER;
1502 ath9k_hw_set_interrupts(sc->sc_ah, 1561 ath9k_hw_set_interrupts(ah, ah->imask);
1503 sc->imask);
1504 } 1562 }
1505 } 1563 }
1506 ath9k_hw_setrxabort(sc->sc_ah, 1); 1564 ath9k_hw_setrxabort(ah, 1);
1507} 1565}
1508 1566
1509static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1567static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1579,10 +1637,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1579 PS_WAIT_FOR_CAB | 1637 PS_WAIT_FOR_CAB |
1580 PS_WAIT_FOR_PSPOLL_DATA | 1638 PS_WAIT_FOR_PSPOLL_DATA |
1581 PS_WAIT_FOR_TX_ACK); 1639 PS_WAIT_FOR_TX_ACK);
1582 if (sc->imask & ATH9K_INT_TIM_TIMER) { 1640 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1583 sc->imask &= ~ATH9K_INT_TIM_TIMER; 1641 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1584 ath9k_hw_set_interrupts(sc->sc_ah, 1642 ath9k_hw_set_interrupts(sc->sc_ah,
1585 sc->imask); 1643 ah->imask);
1586 } 1644 }
1587 } 1645 }
1588 } 1646 }
@@ -1986,6 +2044,25 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1986 return ret; 2044 return ret;
1987} 2045}
1988 2046
2047static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
2048 struct survey_info *survey)
2049{
2050 struct ath_wiphy *aphy = hw->priv;
2051 struct ath_softc *sc = aphy->sc;
2052 struct ath_hw *ah = sc->sc_ah;
2053 struct ath_common *common = ath9k_hw_common(ah);
2054 struct ieee80211_conf *conf = &hw->conf;
2055
2056 if (idx != 0)
2057 return -ENOENT;
2058
2059 survey->channel = conf->channel;
2060 survey->filled = SURVEY_INFO_NOISE_DBM;
2061 survey->noise = common->ani.noise_floor;
2062
2063 return 0;
2064}
2065
1989static void ath9k_sw_scan_start(struct ieee80211_hw *hw) 2066static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
1990{ 2067{
1991 struct ath_wiphy *aphy = hw->priv; 2068 struct ath_wiphy *aphy = hw->priv;
@@ -2057,6 +2134,7 @@ struct ieee80211_ops ath9k_ops = {
2057 .set_tsf = ath9k_set_tsf, 2134 .set_tsf = ath9k_set_tsf,
2058 .reset_tsf = ath9k_reset_tsf, 2135 .reset_tsf = ath9k_reset_tsf,
2059 .ampdu_action = ath9k_ampdu_action, 2136 .ampdu_action = ath9k_ampdu_action,
2137 .get_survey = ath9k_get_survey,
2060 .sw_scan_start = ath9k_sw_scan_start, 2138 .sw_scan_start = ath9k_sw_scan_start,
2061 .sw_scan_complete = ath9k_sw_scan_complete, 2139 .sw_scan_complete = ath9k_sw_scan_complete,
2062 .rfkill_poll = ath9k_rfkill_poll_state, 2140 .rfkill_poll = ath9k_rfkill_poll_state,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9441c6718a30..257b10ba6f57 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -28,6 +28,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */ 28 { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
31 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
31 { 0 } 32 { 0 }
32}; 33};
33 34
@@ -88,6 +89,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
88} 89}
89 90
90static const struct ath_bus_ops ath_pci_bus_ops = { 91static const struct ath_bus_ops ath_pci_bus_ops = {
92 .ath_bus_type = ATH_PCI,
91 .read_cachesize = ath_pci_read_cachesize, 93 .read_cachesize = ath_pci_read_cachesize,
92 .eeprom_read = ath_pci_eeprom_read, 94 .eeprom_read = ath_pci_eeprom_read,
93 .bt_coex_prep = ath_pci_bt_coex_prep, 95 .bt_coex_prep = ath_pci_bt_coex_prep,
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
deleted file mode 100644
index 2547b3c4a26c..000000000000
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ /dev/null
@@ -1,978 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
41
42#include <linux/slab.h>
43
44#include "hw.h"
45
46/**
47 * ath9k_hw_write_regs - ??
48 *
49 * @ah: atheros hardware structure
50 * @freqIndex:
51 * @regWrites:
52 *
53 * Used for both the chipsets with an external AR2133/AR5133 radios and
54 * single-chip devices.
55 */
56void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
57{
58 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
59}
60
61/**
62 * ath9k_hw_ar9280_set_channel - set channel on single-chip device
63 * @ah: atheros hardware structure
64 * @chan:
65 *
66 * This is the function to change channel on single-chip devices, that is
67 * all devices after ar9280.
68 *
69 * This function takes the channel value in MHz and sets
70 * hardware channel value. Assumes writes have been enabled to analog bus.
71 *
72 * Actual Expression,
73 *
74 * For 2GHz channel,
75 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
76 * (freq_ref = 40MHz)
77 *
78 * For 5GHz channel,
79 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
80 * (freq_ref = 40MHz/(24>>amodeRefSel))
81 */
82int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
83{
84 u16 bMode, fracMode, aModeRefSel = 0;
85 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
86 struct chan_centers centers;
87 u32 refDivA = 24;
88
89 ath9k_hw_get_channel_centers(ah, chan, &centers);
90 freq = centers.synth_center;
91
92 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
93 reg32 &= 0xc0000000;
94
95 if (freq < 4800) { /* 2 GHz, fractional mode */
96 u32 txctl;
97 int regWrites = 0;
98
99 bMode = 1;
100 fracMode = 1;
101 aModeRefSel = 0;
102 channelSel = (freq * 0x10000) / 15;
103
104 if (AR_SREV_9287_11_OR_LATER(ah)) {
105 if (freq == 2484) {
106 /* Enable channel spreading for channel 14 */
107 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
108 1, regWrites);
109 } else {
110 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
111 1, regWrites);
112 }
113 } else {
114 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
115 if (freq == 2484) {
116 /* Enable channel spreading for channel 14 */
117 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
118 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
119 } else {
120 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
121 txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
122 }
123 }
124 } else {
125 bMode = 0;
126 fracMode = 0;
127
128 switch(ah->eep_ops->get_eeprom(ah, EEP_FRAC_N_5G)) {
129 case 0:
130 if ((freq % 20) == 0) {
131 aModeRefSel = 3;
132 } else if ((freq % 10) == 0) {
133 aModeRefSel = 2;
134 }
135 if (aModeRefSel)
136 break;
137 case 1:
138 default:
139 aModeRefSel = 0;
140 /*
141 * Enable 2G (fractional) mode for channels
142 * which are 5MHz spaced.
143 */
144 fracMode = 1;
145 refDivA = 1;
146 channelSel = (freq * 0x8000) / 15;
147
148 /* RefDivA setting */
149 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
150 AR_AN_SYNTH9_REFDIVA, refDivA);
151
152 }
153
154 if (!fracMode) {
155 ndiv = (freq * (refDivA >> aModeRefSel)) / 60;
156 channelSel = ndiv & 0x1ff;
157 channelFrac = (ndiv & 0xfffffe00) * 2;
158 channelSel = (channelSel << 17) | channelFrac;
159 }
160 }
161
162 reg32 = reg32 |
163 (bMode << 29) |
164 (fracMode << 28) | (aModeRefSel << 26) | (channelSel);
165
166 REG_WRITE(ah, AR_PHY_SYNTH_CONTROL, reg32);
167
168 ah->curchan = chan;
169 ah->curchan_rad_index = -1;
170
171 return 0;
172}
173
174/**
175 * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
176 * @ah: atheros hardware structure
177 * @chan:
178 *
179 * For single-chip solutions. Converts to baseband spur frequency given the
180 * input channel frequency and compute register settings below.
181 */
182void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
183{
184 int bb_spur = AR_NO_SPUR;
185 int freq;
186 int bin, cur_bin;
187 int bb_spur_off, spur_subchannel_sd;
188 int spur_freq_sd;
189 int spur_delta_phase;
190 int denominator;
191 int upper, lower, cur_vit_mask;
192 int tmp, newVal;
193 int i;
194 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
195 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
196 };
197 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
198 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
199 };
200 int inc[4] = { 0, 100, 0, 0 };
201 struct chan_centers centers;
202
203 int8_t mask_m[123];
204 int8_t mask_p[123];
205 int8_t mask_amt;
206 int tmp_mask;
207 int cur_bb_spur;
208 bool is2GHz = IS_CHAN_2GHZ(chan);
209
210 memset(&mask_m, 0, sizeof(int8_t) * 123);
211 memset(&mask_p, 0, sizeof(int8_t) * 123);
212
213 ath9k_hw_get_channel_centers(ah, chan, &centers);
214 freq = centers.synth_center;
215
216 ah->config.spurmode = SPUR_ENABLE_EEPROM;
217 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
218 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
219
220 if (is2GHz)
221 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
222 else
223 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
224
225 if (AR_NO_SPUR == cur_bb_spur)
226 break;
227 cur_bb_spur = cur_bb_spur - freq;
228
229 if (IS_CHAN_HT40(chan)) {
230 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
231 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
232 bb_spur = cur_bb_spur;
233 break;
234 }
235 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
236 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
237 bb_spur = cur_bb_spur;
238 break;
239 }
240 }
241
242 if (AR_NO_SPUR == bb_spur) {
243 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
244 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
245 return;
246 } else {
247 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
248 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
249 }
250
251 bin = bb_spur * 320;
252
253 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
254
255 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
256 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
257 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
258 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
259 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
260
261 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
262 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
263 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
264 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
265 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
266 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
267
268 if (IS_CHAN_HT40(chan)) {
269 if (bb_spur < 0) {
270 spur_subchannel_sd = 1;
271 bb_spur_off = bb_spur + 10;
272 } else {
273 spur_subchannel_sd = 0;
274 bb_spur_off = bb_spur - 10;
275 }
276 } else {
277 spur_subchannel_sd = 0;
278 bb_spur_off = bb_spur;
279 }
280
281 if (IS_CHAN_HT40(chan))
282 spur_delta_phase =
283 ((bb_spur * 262144) /
284 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
285 else
286 spur_delta_phase =
287 ((bb_spur * 524288) /
288 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
289
290 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
291 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
292
293 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
294 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
295 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
296 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
297
298 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
299 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
300
301 cur_bin = -6000;
302 upper = bin + 100;
303 lower = bin - 100;
304
305 for (i = 0; i < 4; i++) {
306 int pilot_mask = 0;
307 int chan_mask = 0;
308 int bp = 0;
309 for (bp = 0; bp < 30; bp++) {
310 if ((cur_bin > lower) && (cur_bin < upper)) {
311 pilot_mask = pilot_mask | 0x1 << bp;
312 chan_mask = chan_mask | 0x1 << bp;
313 }
314 cur_bin += 100;
315 }
316 cur_bin += inc[i];
317 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
318 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
319 }
320
321 cur_vit_mask = 6100;
322 upper = bin + 120;
323 lower = bin - 120;
324
325 for (i = 0; i < 123; i++) {
326 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
327
328 /* workaround for gcc bug #37014 */
329 volatile int tmp_v = abs(cur_vit_mask - bin);
330
331 if (tmp_v < 75)
332 mask_amt = 1;
333 else
334 mask_amt = 0;
335 if (cur_vit_mask < 0)
336 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
337 else
338 mask_p[cur_vit_mask / 100] = mask_amt;
339 }
340 cur_vit_mask -= 100;
341 }
342
343 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
344 | (mask_m[48] << 26) | (mask_m[49] << 24)
345 | (mask_m[50] << 22) | (mask_m[51] << 20)
346 | (mask_m[52] << 18) | (mask_m[53] << 16)
347 | (mask_m[54] << 14) | (mask_m[55] << 12)
348 | (mask_m[56] << 10) | (mask_m[57] << 8)
349 | (mask_m[58] << 6) | (mask_m[59] << 4)
350 | (mask_m[60] << 2) | (mask_m[61] << 0);
351 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
352 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
353
354 tmp_mask = (mask_m[31] << 28)
355 | (mask_m[32] << 26) | (mask_m[33] << 24)
356 | (mask_m[34] << 22) | (mask_m[35] << 20)
357 | (mask_m[36] << 18) | (mask_m[37] << 16)
358 | (mask_m[48] << 14) | (mask_m[39] << 12)
359 | (mask_m[40] << 10) | (mask_m[41] << 8)
360 | (mask_m[42] << 6) | (mask_m[43] << 4)
361 | (mask_m[44] << 2) | (mask_m[45] << 0);
362 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
363 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
364
365 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
366 | (mask_m[18] << 26) | (mask_m[18] << 24)
367 | (mask_m[20] << 22) | (mask_m[20] << 20)
368 | (mask_m[22] << 18) | (mask_m[22] << 16)
369 | (mask_m[24] << 14) | (mask_m[24] << 12)
370 | (mask_m[25] << 10) | (mask_m[26] << 8)
371 | (mask_m[27] << 6) | (mask_m[28] << 4)
372 | (mask_m[29] << 2) | (mask_m[30] << 0);
373 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
374 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
375
376 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
377 | (mask_m[2] << 26) | (mask_m[3] << 24)
378 | (mask_m[4] << 22) | (mask_m[5] << 20)
379 | (mask_m[6] << 18) | (mask_m[7] << 16)
380 | (mask_m[8] << 14) | (mask_m[9] << 12)
381 | (mask_m[10] << 10) | (mask_m[11] << 8)
382 | (mask_m[12] << 6) | (mask_m[13] << 4)
383 | (mask_m[14] << 2) | (mask_m[15] << 0);
384 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
385 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
386
387 tmp_mask = (mask_p[15] << 28)
388 | (mask_p[14] << 26) | (mask_p[13] << 24)
389 | (mask_p[12] << 22) | (mask_p[11] << 20)
390 | (mask_p[10] << 18) | (mask_p[9] << 16)
391 | (mask_p[8] << 14) | (mask_p[7] << 12)
392 | (mask_p[6] << 10) | (mask_p[5] << 8)
393 | (mask_p[4] << 6) | (mask_p[3] << 4)
394 | (mask_p[2] << 2) | (mask_p[1] << 0);
395 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
396 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
397
398 tmp_mask = (mask_p[30] << 28)
399 | (mask_p[29] << 26) | (mask_p[28] << 24)
400 | (mask_p[27] << 22) | (mask_p[26] << 20)
401 | (mask_p[25] << 18) | (mask_p[24] << 16)
402 | (mask_p[23] << 14) | (mask_p[22] << 12)
403 | (mask_p[21] << 10) | (mask_p[20] << 8)
404 | (mask_p[19] << 6) | (mask_p[18] << 4)
405 | (mask_p[17] << 2) | (mask_p[16] << 0);
406 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
407 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
408
409 tmp_mask = (mask_p[45] << 28)
410 | (mask_p[44] << 26) | (mask_p[43] << 24)
411 | (mask_p[42] << 22) | (mask_p[41] << 20)
412 | (mask_p[40] << 18) | (mask_p[39] << 16)
413 | (mask_p[38] << 14) | (mask_p[37] << 12)
414 | (mask_p[36] << 10) | (mask_p[35] << 8)
415 | (mask_p[34] << 6) | (mask_p[33] << 4)
416 | (mask_p[32] << 2) | (mask_p[31] << 0);
417 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
418 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
419
420 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
421 | (mask_p[59] << 26) | (mask_p[58] << 24)
422 | (mask_p[57] << 22) | (mask_p[56] << 20)
423 | (mask_p[55] << 18) | (mask_p[54] << 16)
424 | (mask_p[53] << 14) | (mask_p[52] << 12)
425 | (mask_p[51] << 10) | (mask_p[50] << 8)
426 | (mask_p[49] << 6) | (mask_p[48] << 4)
427 | (mask_p[47] << 2) | (mask_p[46] << 0);
428 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
429 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
430}
431
432/* All code below is for non single-chip solutions */
433
434/**
435 * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
436 * @rfbuf:
437 * @reg32:
438 * @numBits:
439 * @firstBit:
440 * @column:
441 *
442 * Performs analog "swizzling" of parameters into their location.
443 * Used on external AR2133/AR5133 radios.
444 */
445static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
446 u32 numBits, u32 firstBit,
447 u32 column)
448{
449 u32 tmp32, mask, arrayEntry, lastBit;
450 int32_t bitPosition, bitsLeft;
451
452 tmp32 = ath9k_hw_reverse_bits(reg32, numBits);
453 arrayEntry = (firstBit - 1) / 8;
454 bitPosition = (firstBit - 1) % 8;
455 bitsLeft = numBits;
456 while (bitsLeft > 0) {
457 lastBit = (bitPosition + bitsLeft > 8) ?
458 8 : bitPosition + bitsLeft;
459 mask = (((1 << lastBit) - 1) ^ ((1 << bitPosition) - 1)) <<
460 (column * 8);
461 rfBuf[arrayEntry] &= ~mask;
462 rfBuf[arrayEntry] |= ((tmp32 << bitPosition) <<
463 (column * 8)) & mask;
464 bitsLeft -= 8 - bitPosition;
465 tmp32 = tmp32 >> (8 - bitPosition);
466 bitPosition = 0;
467 arrayEntry++;
468 }
469}
470
471/*
472 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
473 * rf_pwd_icsyndiv.
474 *
475 * Theoretical Rules:
476 * if 2 GHz band
477 * if forceBiasAuto
478 * if synth_freq < 2412
479 * bias = 0
480 * else if 2412 <= synth_freq <= 2422
481 * bias = 1
482 * else // synth_freq > 2422
483 * bias = 2
484 * else if forceBias > 0
485 * bias = forceBias & 7
486 * else
487 * no change, use value from ini file
488 * else
489 * no change, invalid band
490 *
491 * 1st Mod:
492 * 2422 also uses value of 2
493 * <approved>
494 *
495 * 2nd Mod:
496 * Less than 2412 uses value of 0, 2412 and above uses value of 2
497 */
498static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
499{
500 struct ath_common *common = ath9k_hw_common(ah);
501 u32 tmp_reg;
502 int reg_writes = 0;
503 u32 new_bias = 0;
504
505 if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
506 return;
507 }
508
509 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
510
511 if (synth_freq < 2412)
512 new_bias = 0;
513 else if (synth_freq < 2422)
514 new_bias = 1;
515 else
516 new_bias = 2;
517
518 /* pre-reverse this field */
519 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
520
521 ath_print(common, ATH_DBG_CONFIG,
522 "Force rf_pwd_icsyndiv to %1d on %4d\n",
523 new_bias, synth_freq);
524
525 /* swizzle rf_pwd_icsyndiv */
526 ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
527
528 /* write Bank 6 with new params */
529 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
530}
531
532/**
533 * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
534 * @ah: atheros hardware stucture
535 * @chan:
536 *
537 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
538 * the channel value. Assumes writes enabled to analog bus and bank6 register
539 * cache in ah->analogBank6Data.
540 */
541int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
542{
543 struct ath_common *common = ath9k_hw_common(ah);
544 u32 channelSel = 0;
545 u32 bModeSynth = 0;
546 u32 aModeRefSel = 0;
547 u32 reg32 = 0;
548 u16 freq;
549 struct chan_centers centers;
550
551 ath9k_hw_get_channel_centers(ah, chan, &centers);
552 freq = centers.synth_center;
553
554 if (freq < 4800) {
555 u32 txctl;
556
557 if (((freq - 2192) % 5) == 0) {
558 channelSel = ((freq - 672) * 2 - 3040) / 10;
559 bModeSynth = 0;
560 } else if (((freq - 2224) % 5) == 0) {
561 channelSel = ((freq - 704) * 2 - 3040) / 10;
562 bModeSynth = 1;
563 } else {
564 ath_print(common, ATH_DBG_FATAL,
565 "Invalid channel %u MHz\n", freq);
566 return -EINVAL;
567 }
568
569 channelSel = (channelSel << 2) & 0xff;
570 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
571
572 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
573 if (freq == 2484) {
574
575 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
576 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
577 } else {
578 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
579 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
580 }
581
582 } else if ((freq % 20) == 0 && freq >= 5120) {
583 channelSel =
584 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
585 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
586 } else if ((freq % 10) == 0) {
587 channelSel =
588 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
589 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
590 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
591 else
592 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
593 } else if ((freq % 5) == 0) {
594 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
595 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
596 } else {
597 ath_print(common, ATH_DBG_FATAL,
598 "Invalid channel %u MHz\n", freq);
599 return -EINVAL;
600 }
601
602 ath9k_hw_force_bias(ah, freq);
603
604 reg32 =
605 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
606 (1 << 5) | 0x1;
607
608 REG_WRITE(ah, AR_PHY(0x37), reg32);
609
610 ah->curchan = chan;
611 ah->curchan_rad_index = -1;
612
613 return 0;
614}
615
616/**
617 * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
618 * @ah: atheros hardware structure
619 * @chan:
620 *
621 * For non single-chip solutions. Converts to baseband spur frequency given the
622 * input channel frequency and compute register settings below.
623 */
624void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
625{
626 int bb_spur = AR_NO_SPUR;
627 int bin, cur_bin;
628 int spur_freq_sd;
629 int spur_delta_phase;
630 int denominator;
631 int upper, lower, cur_vit_mask;
632 int tmp, new;
633 int i;
634 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
635 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
636 };
637 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
638 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
639 };
640 int inc[4] = { 0, 100, 0, 0 };
641
642 int8_t mask_m[123];
643 int8_t mask_p[123];
644 int8_t mask_amt;
645 int tmp_mask;
646 int cur_bb_spur;
647 bool is2GHz = IS_CHAN_2GHZ(chan);
648
649 memset(&mask_m, 0, sizeof(int8_t) * 123);
650 memset(&mask_p, 0, sizeof(int8_t) * 123);
651
652 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
653 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
654 if (AR_NO_SPUR == cur_bb_spur)
655 break;
656 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
657 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
658 bb_spur = cur_bb_spur;
659 break;
660 }
661 }
662
663 if (AR_NO_SPUR == bb_spur)
664 return;
665
666 bin = bb_spur * 32;
667
668 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
669 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
670 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
671 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
672 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
673
674 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
675
676 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
677 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
678 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
679 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
680 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
681 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
682
683 spur_delta_phase = ((bb_spur * 524288) / 100) &
684 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
685
686 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
687 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
688
689 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
690 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
691 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
692 REG_WRITE(ah, AR_PHY_TIMING11, new);
693
694 cur_bin = -6000;
695 upper = bin + 100;
696 lower = bin - 100;
697
698 for (i = 0; i < 4; i++) {
699 int pilot_mask = 0;
700 int chan_mask = 0;
701 int bp = 0;
702 for (bp = 0; bp < 30; bp++) {
703 if ((cur_bin > lower) && (cur_bin < upper)) {
704 pilot_mask = pilot_mask | 0x1 << bp;
705 chan_mask = chan_mask | 0x1 << bp;
706 }
707 cur_bin += 100;
708 }
709 cur_bin += inc[i];
710 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
711 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
712 }
713
714 cur_vit_mask = 6100;
715 upper = bin + 120;
716 lower = bin - 120;
717
718 for (i = 0; i < 123; i++) {
719 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
720
721 /* workaround for gcc bug #37014 */
722 volatile int tmp_v = abs(cur_vit_mask - bin);
723
724 if (tmp_v < 75)
725 mask_amt = 1;
726 else
727 mask_amt = 0;
728 if (cur_vit_mask < 0)
729 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
730 else
731 mask_p[cur_vit_mask / 100] = mask_amt;
732 }
733 cur_vit_mask -= 100;
734 }
735
736 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
737 | (mask_m[48] << 26) | (mask_m[49] << 24)
738 | (mask_m[50] << 22) | (mask_m[51] << 20)
739 | (mask_m[52] << 18) | (mask_m[53] << 16)
740 | (mask_m[54] << 14) | (mask_m[55] << 12)
741 | (mask_m[56] << 10) | (mask_m[57] << 8)
742 | (mask_m[58] << 6) | (mask_m[59] << 4)
743 | (mask_m[60] << 2) | (mask_m[61] << 0);
744 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
745 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
746
747 tmp_mask = (mask_m[31] << 28)
748 | (mask_m[32] << 26) | (mask_m[33] << 24)
749 | (mask_m[34] << 22) | (mask_m[35] << 20)
750 | (mask_m[36] << 18) | (mask_m[37] << 16)
751 | (mask_m[48] << 14) | (mask_m[39] << 12)
752 | (mask_m[40] << 10) | (mask_m[41] << 8)
753 | (mask_m[42] << 6) | (mask_m[43] << 4)
754 | (mask_m[44] << 2) | (mask_m[45] << 0);
755 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
756 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
757
758 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
759 | (mask_m[18] << 26) | (mask_m[18] << 24)
760 | (mask_m[20] << 22) | (mask_m[20] << 20)
761 | (mask_m[22] << 18) | (mask_m[22] << 16)
762 | (mask_m[24] << 14) | (mask_m[24] << 12)
763 | (mask_m[25] << 10) | (mask_m[26] << 8)
764 | (mask_m[27] << 6) | (mask_m[28] << 4)
765 | (mask_m[29] << 2) | (mask_m[30] << 0);
766 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
767 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
768
769 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
770 | (mask_m[2] << 26) | (mask_m[3] << 24)
771 | (mask_m[4] << 22) | (mask_m[5] << 20)
772 | (mask_m[6] << 18) | (mask_m[7] << 16)
773 | (mask_m[8] << 14) | (mask_m[9] << 12)
774 | (mask_m[10] << 10) | (mask_m[11] << 8)
775 | (mask_m[12] << 6) | (mask_m[13] << 4)
776 | (mask_m[14] << 2) | (mask_m[15] << 0);
777 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
778 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
779
780 tmp_mask = (mask_p[15] << 28)
781 | (mask_p[14] << 26) | (mask_p[13] << 24)
782 | (mask_p[12] << 22) | (mask_p[11] << 20)
783 | (mask_p[10] << 18) | (mask_p[9] << 16)
784 | (mask_p[8] << 14) | (mask_p[7] << 12)
785 | (mask_p[6] << 10) | (mask_p[5] << 8)
786 | (mask_p[4] << 6) | (mask_p[3] << 4)
787 | (mask_p[2] << 2) | (mask_p[1] << 0);
788 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
789 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
790
791 tmp_mask = (mask_p[30] << 28)
792 | (mask_p[29] << 26) | (mask_p[28] << 24)
793 | (mask_p[27] << 22) | (mask_p[26] << 20)
794 | (mask_p[25] << 18) | (mask_p[24] << 16)
795 | (mask_p[23] << 14) | (mask_p[22] << 12)
796 | (mask_p[21] << 10) | (mask_p[20] << 8)
797 | (mask_p[19] << 6) | (mask_p[18] << 4)
798 | (mask_p[17] << 2) | (mask_p[16] << 0);
799 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
800 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
801
802 tmp_mask = (mask_p[45] << 28)
803 | (mask_p[44] << 26) | (mask_p[43] << 24)
804 | (mask_p[42] << 22) | (mask_p[41] << 20)
805 | (mask_p[40] << 18) | (mask_p[39] << 16)
806 | (mask_p[38] << 14) | (mask_p[37] << 12)
807 | (mask_p[36] << 10) | (mask_p[35] << 8)
808 | (mask_p[34] << 6) | (mask_p[33] << 4)
809 | (mask_p[32] << 2) | (mask_p[31] << 0);
810 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
811 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
812
813 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
814 | (mask_p[59] << 26) | (mask_p[58] << 24)
815 | (mask_p[57] << 22) | (mask_p[56] << 20)
816 | (mask_p[55] << 18) | (mask_p[54] << 16)
817 | (mask_p[53] << 14) | (mask_p[52] << 12)
818 | (mask_p[51] << 10) | (mask_p[50] << 8)
819 | (mask_p[49] << 6) | (mask_p[48] << 4)
820 | (mask_p[47] << 2) | (mask_p[46] << 0);
821 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
822 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
823}
824
825/**
826 * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
827 * @ah: atheros hardware structure
828 *
829 * Only required for older devices with external AR2133/AR5133 radios.
830 */
831int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
832{
833#define ATH_ALLOC_BANK(bank, size) do { \
834 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
835 if (!bank) { \
836 ath_print(common, ATH_DBG_FATAL, \
837 "Cannot allocate RF banks\n"); \
838 return -ENOMEM; \
839 } \
840 } while (0);
841
842 struct ath_common *common = ath9k_hw_common(ah);
843
844 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
845
846 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
847 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
848 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
849 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
850 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
851 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
852 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
853 ATH_ALLOC_BANK(ah->addac5416_21,
854 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
855 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
856
857 return 0;
858#undef ATH_ALLOC_BANK
859}
860
861
862/**
863 * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
864 * @ah: atheros hardware struture
865 * For the external AR2133/AR5133 radios banks.
866 */
867void
868ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
869{
870#define ATH_FREE_BANK(bank) do { \
871 kfree(bank); \
872 bank = NULL; \
873 } while (0);
874
875 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
876
877 ATH_FREE_BANK(ah->analogBank0Data);
878 ATH_FREE_BANK(ah->analogBank1Data);
879 ATH_FREE_BANK(ah->analogBank2Data);
880 ATH_FREE_BANK(ah->analogBank3Data);
881 ATH_FREE_BANK(ah->analogBank6Data);
882 ATH_FREE_BANK(ah->analogBank6TPCData);
883 ATH_FREE_BANK(ah->analogBank7Data);
884 ATH_FREE_BANK(ah->addac5416_21);
885 ATH_FREE_BANK(ah->bank6Temp);
886
887#undef ATH_FREE_BANK
888}
889
890/* *
891 * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
892 * @ah: atheros hardware structure
893 * @chan:
894 * @modesIndex:
895 *
896 * Used for the external AR2133/AR5133 radios.
897 *
898 * Reads the EEPROM header info from the device structure and programs
899 * all rf registers. This routine requires access to the analog
900 * rf device. This is not required for single-chip devices.
901 */
902bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
903 u16 modesIndex)
904{
905 u32 eepMinorRev;
906 u32 ob5GHz = 0, db5GHz = 0;
907 u32 ob2GHz = 0, db2GHz = 0;
908 int regWrites = 0;
909
910 /*
911 * Software does not need to program bank data
912 * for single chip devices, that is AR9280 or anything
913 * after that.
914 */
915 if (AR_SREV_9280_10_OR_LATER(ah))
916 return true;
917
918 /* Setup rf parameters */
919 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
920
921 /* Setup Bank 0 Write */
922 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
923
924 /* Setup Bank 1 Write */
925 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
926
927 /* Setup Bank 2 Write */
928 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
929
930 /* Setup Bank 6 Write */
931 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
932 modesIndex);
933 {
934 int i;
935 for (i = 0; i < ah->iniBank6TPC.ia_rows; i++) {
936 ah->analogBank6Data[i] =
937 INI_RA(&ah->iniBank6TPC, i, modesIndex);
938 }
939 }
940
941 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
942 if (eepMinorRev >= 2) {
943 if (IS_CHAN_2GHZ(chan)) {
944 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
945 db2GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_2);
946 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
947 ob2GHz, 3, 197, 0);
948 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
949 db2GHz, 3, 194, 0);
950 } else {
951 ob5GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_5);
952 db5GHz = ah->eep_ops->get_eeprom(ah, EEP_DB_5);
953 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
954 ob5GHz, 3, 203, 0);
955 ath9k_phy_modify_rx_buffer(ah->analogBank6Data,
956 db5GHz, 3, 200, 0);
957 }
958 }
959
960 /* Setup Bank 7 Setup */
961 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
962
963 /* Write Analog registers */
964 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
965 regWrites);
966 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
967 regWrites);
968 REG_WRITE_RF_ARRAY(&ah->iniBank2, ah->analogBank2Data,
969 regWrites);
970 REG_WRITE_RF_ARRAY(&ah->iniBank3, ah->analogBank3Data,
971 regWrites);
972 REG_WRITE_RF_ARRAY(&ah->iniBank6TPC, ah->analogBank6Data,
973 regWrites);
974 REG_WRITE_RF_ARRAY(&ah->iniBank7, ah->analogBank7Data,
975 regWrites);
976
977 return true;
978}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0999a495fd46..e724c2c1ae2a 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,589 +17,25 @@
17#ifndef PHY_H 17#ifndef PHY_H
18#define PHY_H 18#define PHY_H
19 19
20/* Common between single chip and non single-chip solutions */ 20#define CHANSEL_DIV 15
21void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites); 21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
22 22#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
23/* Single chip radio settings */
24int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
25void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
26
27/* Routines below are for non single-chip solutions */
28int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
29void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
30
31int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
32void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
33
34bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
35 struct ath9k_channel *chan,
36 u16 modesIndex);
37 23
38#define AR_PHY_BASE 0x9800 24#define AR_PHY_BASE 0x9800
39#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 25#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
40 26
41#define AR_PHY_TEST 0x9800 27#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
42#define PHY_AGC_CLR 0x10000000 28#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
43#define RFSILENT_BB 0x00002000 29#define AR_PHY_TX_GAIN_CLC 0x0000001E
44 30#define AR_PHY_TX_GAIN_CLC_S 1
45#define AR_PHY_TURBO 0x9804 31#define AR_PHY_TX_GAIN 0x0007F000
46#define AR_PHY_FC_TURBO_MODE 0x00000001 32#define AR_PHY_TX_GAIN_S 12
47#define AR_PHY_FC_TURBO_SHORT 0x00000002
48#define AR_PHY_FC_DYN2040_EN 0x00000004
49#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
50#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
51/* For 25 MHz channel spacing -- not used but supported by hw */
52#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
53#define AR_PHY_FC_HT_EN 0x00000040
54#define AR_PHY_FC_SHORT_GI_40 0x00000080
55#define AR_PHY_FC_WALSH 0x00000100
56#define AR_PHY_FC_SINGLE_HT_LTF1 0x00000200
57#define AR_PHY_FC_ENABLE_DAC_FIFO 0x00000800
58
59#define AR_PHY_TEST2 0x9808
60
61#define AR_PHY_TIMING2 0x9810
62#define AR_PHY_TIMING3 0x9814
63#define AR_PHY_TIMING3_DSC_MAN 0xFFFE0000
64#define AR_PHY_TIMING3_DSC_MAN_S 17
65#define AR_PHY_TIMING3_DSC_EXP 0x0001E000
66#define AR_PHY_TIMING3_DSC_EXP_S 13
67
68#define AR_PHY_CHIP_ID 0x9818
69#define AR_PHY_CHIP_ID_REV_0 0x80
70#define AR_PHY_CHIP_ID_REV_1 0x81
71#define AR_PHY_CHIP_ID_9160_REV_0 0xb0
72
73#define AR_PHY_ACTIVE 0x981C
74#define AR_PHY_ACTIVE_EN 0x00000001
75#define AR_PHY_ACTIVE_DIS 0x00000000
76
77#define AR_PHY_RF_CTL2 0x9824
78#define AR_PHY_TX_END_DATA_START 0x000000FF
79#define AR_PHY_TX_END_DATA_START_S 0
80#define AR_PHY_TX_END_PA_ON 0x0000FF00
81#define AR_PHY_TX_END_PA_ON_S 8
82
83#define AR_PHY_RF_CTL3 0x9828
84#define AR_PHY_TX_END_TO_A2_RX_ON 0x00FF0000
85#define AR_PHY_TX_END_TO_A2_RX_ON_S 16
86
87#define AR_PHY_ADC_CTL 0x982C
88#define AR_PHY_ADC_CTL_OFF_INBUFGAIN 0x00000003
89#define AR_PHY_ADC_CTL_OFF_INBUFGAIN_S 0
90#define AR_PHY_ADC_CTL_OFF_PWDDAC 0x00002000
91#define AR_PHY_ADC_CTL_OFF_PWDBANDGAP 0x00004000
92#define AR_PHY_ADC_CTL_OFF_PWDADC 0x00008000
93#define AR_PHY_ADC_CTL_ON_INBUFGAIN 0x00030000
94#define AR_PHY_ADC_CTL_ON_INBUFGAIN_S 16
95
96#define AR_PHY_ADC_SERIAL_CTL 0x9830
97#define AR_PHY_SEL_INTERNAL_ADDAC 0x00000000
98#define AR_PHY_SEL_EXTERNAL_RADIO 0x00000001
99
100#define AR_PHY_RF_CTL4 0x9834
101#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF 0xFF000000
102#define AR_PHY_RF_CTL4_TX_END_XPAB_OFF_S 24
103#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF 0x00FF0000
104#define AR_PHY_RF_CTL4_TX_END_XPAA_OFF_S 16
105#define AR_PHY_RF_CTL4_FRAME_XPAB_ON 0x0000FF00
106#define AR_PHY_RF_CTL4_FRAME_XPAB_ON_S 8
107#define AR_PHY_RF_CTL4_FRAME_XPAA_ON 0x000000FF
108#define AR_PHY_RF_CTL4_FRAME_XPAA_ON_S 0
109
110#define AR_PHY_TSTDAC_CONST 0x983c
111
112#define AR_PHY_SETTLING 0x9844
113#define AR_PHY_SETTLING_SWITCH 0x00003F80
114#define AR_PHY_SETTLING_SWITCH_S 7
115
116#define AR_PHY_RXGAIN 0x9848
117#define AR_PHY_RXGAIN_TXRX_ATTEN 0x0003F000
118#define AR_PHY_RXGAIN_TXRX_ATTEN_S 12
119#define AR_PHY_RXGAIN_TXRX_RF_MAX 0x007C0000
120#define AR_PHY_RXGAIN_TXRX_RF_MAX_S 18
121#define AR9280_PHY_RXGAIN_TXRX_ATTEN 0x00003F80
122#define AR9280_PHY_RXGAIN_TXRX_ATTEN_S 7
123#define AR9280_PHY_RXGAIN_TXRX_MARGIN 0x001FC000
124#define AR9280_PHY_RXGAIN_TXRX_MARGIN_S 14
125
126#define AR_PHY_DESIRED_SZ 0x9850
127#define AR_PHY_DESIRED_SZ_ADC 0x000000FF
128#define AR_PHY_DESIRED_SZ_ADC_S 0
129#define AR_PHY_DESIRED_SZ_PGA 0x0000FF00
130#define AR_PHY_DESIRED_SZ_PGA_S 8
131#define AR_PHY_DESIRED_SZ_TOT_DES 0x0FF00000
132#define AR_PHY_DESIRED_SZ_TOT_DES_S 20
133
134#define AR_PHY_FIND_SIG 0x9858
135#define AR_PHY_FIND_SIG_FIRSTEP 0x0003F000
136#define AR_PHY_FIND_SIG_FIRSTEP_S 12
137#define AR_PHY_FIND_SIG_FIRPWR 0x03FC0000
138#define AR_PHY_FIND_SIG_FIRPWR_S 18
139
140#define AR_PHY_AGC_CTL1 0x985C
141#define AR_PHY_AGC_CTL1_COARSE_LOW 0x00007F80
142#define AR_PHY_AGC_CTL1_COARSE_LOW_S 7
143#define AR_PHY_AGC_CTL1_COARSE_HIGH 0x003F8000
144#define AR_PHY_AGC_CTL1_COARSE_HIGH_S 15
145
146#define AR_PHY_AGC_CONTROL 0x9860
147#define AR_PHY_AGC_CONTROL_CAL 0x00000001
148#define AR_PHY_AGC_CONTROL_NF 0x00000002
149#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000
150#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000
151#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
152
153#define AR_PHY_CCA 0x9864
154#define AR_PHY_MINCCA_PWR 0x0FF80000
155#define AR_PHY_MINCCA_PWR_S 19
156#define AR_PHY_CCA_THRESH62 0x0007F000
157#define AR_PHY_CCA_THRESH62_S 12
158#define AR9280_PHY_MINCCA_PWR 0x1FF00000
159#define AR9280_PHY_MINCCA_PWR_S 20
160#define AR9280_PHY_CCA_THRESH62 0x000FF000
161#define AR9280_PHY_CCA_THRESH62_S 12
162
163#define AR_PHY_SFCORR_LOW 0x986C
164#define AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW 0x00000001
165#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW 0x00003F00
166#define AR_PHY_SFCORR_LOW_M2COUNT_THR_LOW_S 8
167#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW 0x001FC000
168#define AR_PHY_SFCORR_LOW_M1_THRESH_LOW_S 14
169#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW 0x0FE00000
170#define AR_PHY_SFCORR_LOW_M2_THRESH_LOW_S 21
171
172#define AR_PHY_SFCORR 0x9868
173#define AR_PHY_SFCORR_M2COUNT_THR 0x0000001F
174#define AR_PHY_SFCORR_M2COUNT_THR_S 0
175#define AR_PHY_SFCORR_M1_THRESH 0x00FE0000
176#define AR_PHY_SFCORR_M1_THRESH_S 17
177#define AR_PHY_SFCORR_M2_THRESH 0x7F000000
178#define AR_PHY_SFCORR_M2_THRESH_S 24
179
180#define AR_PHY_SLEEP_CTR_CONTROL 0x9870
181#define AR_PHY_SLEEP_CTR_LIMIT 0x9874
182#define AR_PHY_SYNTH_CONTROL 0x9874
183#define AR_PHY_SLEEP_SCAL 0x9878
184
185#define AR_PHY_PLL_CTL 0x987c
186#define AR_PHY_PLL_CTL_40 0xaa
187#define AR_PHY_PLL_CTL_40_5413 0x04
188#define AR_PHY_PLL_CTL_44 0xab
189#define AR_PHY_PLL_CTL_44_2133 0xeb
190#define AR_PHY_PLL_CTL_40_2133 0xea
191
192#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
193#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
194#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
195#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
196#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
197#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
198#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
199#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
200#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
201#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
202#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
203#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
204#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
205#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
206
207#define AR_PHY_RX_DELAY 0x9914
208#define AR_PHY_SEARCH_START_DELAY 0x9918
209#define AR_PHY_RX_DELAY_DELAY 0x00003FFF
210
211#define AR_PHY_TIMING_CTRL4(_i) (0x9920 + ((_i) << 12))
212#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF 0x01F
213#define AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF_S 0
214#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF 0x7E0
215#define AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF_S 5
216#define AR_PHY_TIMING_CTRL4_IQCORR_ENABLE 0x800
217#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX 0xF000
218#define AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX_S 12
219#define AR_PHY_TIMING_CTRL4_DO_CAL 0x10000
220
221#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI 0x80000000
222#define AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER 0x40000000
223#define AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK 0x20000000
224#define AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK 0x10000000
225
226#define AR_PHY_TIMING5 0x9924
227#define AR_PHY_TIMING5_CYCPWR_THR1 0x000000FE
228#define AR_PHY_TIMING5_CYCPWR_THR1_S 1
229
230#define AR_PHY_POWER_TX_RATE1 0x9934
231#define AR_PHY_POWER_TX_RATE2 0x9938
232#define AR_PHY_POWER_TX_RATE_MAX 0x993c
233#define AR_PHY_POWER_TX_RATE_MAX_TPC_ENABLE 0x00000040
234
235#define AR_PHY_FRAME_CTL 0x9944
236#define AR_PHY_FRAME_CTL_TX_CLIP 0x00000038
237#define AR_PHY_FRAME_CTL_TX_CLIP_S 3
238
239#define AR_PHY_TXPWRADJ 0x994C
240#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA 0x00000FC0
241#define AR_PHY_TXPWRADJ_CCK_GAIN_DELTA_S 6
242#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX 0x00FC0000
243#define AR_PHY_TXPWRADJ_CCK_PCDAC_INDEX_S 18
244
245#define AR_PHY_RADAR_EXT 0x9940
246#define AR_PHY_RADAR_EXT_ENA 0x00004000
247
248#define AR_PHY_RADAR_0 0x9954
249#define AR_PHY_RADAR_0_ENA 0x00000001
250#define AR_PHY_RADAR_0_FFT_ENA 0x80000000
251#define AR_PHY_RADAR_0_INBAND 0x0000003e
252#define AR_PHY_RADAR_0_INBAND_S 1
253#define AR_PHY_RADAR_0_PRSSI 0x00000FC0
254#define AR_PHY_RADAR_0_PRSSI_S 6
255#define AR_PHY_RADAR_0_HEIGHT 0x0003F000
256#define AR_PHY_RADAR_0_HEIGHT_S 12
257#define AR_PHY_RADAR_0_RRSSI 0x00FC0000
258#define AR_PHY_RADAR_0_RRSSI_S 18
259#define AR_PHY_RADAR_0_FIRPWR 0x7F000000
260#define AR_PHY_RADAR_0_FIRPWR_S 24
261
262#define AR_PHY_RADAR_1 0x9958
263#define AR_PHY_RADAR_1_RELPWR_ENA 0x00800000
264#define AR_PHY_RADAR_1_USE_FIR128 0x00400000
265#define AR_PHY_RADAR_1_RELPWR_THRESH 0x003F0000
266#define AR_PHY_RADAR_1_RELPWR_THRESH_S 16
267#define AR_PHY_RADAR_1_BLOCK_CHECK 0x00008000
268#define AR_PHY_RADAR_1_MAX_RRSSI 0x00004000
269#define AR_PHY_RADAR_1_RELSTEP_CHECK 0x00002000
270#define AR_PHY_RADAR_1_RELSTEP_THRESH 0x00001F00
271#define AR_PHY_RADAR_1_RELSTEP_THRESH_S 8
272#define AR_PHY_RADAR_1_MAXLEN 0x000000FF
273#define AR_PHY_RADAR_1_MAXLEN_S 0
274
275#define AR_PHY_SWITCH_CHAIN_0 0x9960
276#define AR_PHY_SWITCH_COM 0x9964
277
278#define AR_PHY_SIGMA_DELTA 0x996C
279#define AR_PHY_SIGMA_DELTA_ADC_SEL 0x00000003
280#define AR_PHY_SIGMA_DELTA_ADC_SEL_S 0
281#define AR_PHY_SIGMA_DELTA_FILT2 0x000000F8
282#define AR_PHY_SIGMA_DELTA_FILT2_S 3
283#define AR_PHY_SIGMA_DELTA_FILT1 0x00001F00
284#define AR_PHY_SIGMA_DELTA_FILT1_S 8
285#define AR_PHY_SIGMA_DELTA_ADC_CLIP 0x01FFE000
286#define AR_PHY_SIGMA_DELTA_ADC_CLIP_S 13
287
288#define AR_PHY_RESTART 0x9970
289#define AR_PHY_RESTART_DIV_GC 0x001C0000
290#define AR_PHY_RESTART_DIV_GC_S 18
291
292#define AR_PHY_RFBUS_REQ 0x997C
293#define AR_PHY_RFBUS_REQ_EN 0x00000001
294
295#define AR_PHY_TIMING7 0x9980
296#define AR_PHY_TIMING8 0x9984
297#define AR_PHY_TIMING8_PILOT_MASK_2 0x000FFFFF
298#define AR_PHY_TIMING8_PILOT_MASK_2_S 0
299
300#define AR_PHY_BIN_MASK2_1 0x9988
301#define AR_PHY_BIN_MASK2_2 0x998c
302#define AR_PHY_BIN_MASK2_3 0x9990
303#define AR_PHY_BIN_MASK2_4 0x9994
304
305#define AR_PHY_BIN_MASK_1 0x9900
306#define AR_PHY_BIN_MASK_2 0x9904
307#define AR_PHY_BIN_MASK_3 0x9908
308
309#define AR_PHY_MASK_CTL 0x990c
310
311#define AR_PHY_BIN_MASK2_4_MASK_4 0x00003FFF
312#define AR_PHY_BIN_MASK2_4_MASK_4_S 0
313
314#define AR_PHY_TIMING9 0x9998
315#define AR_PHY_TIMING10 0x999c
316#define AR_PHY_TIMING10_PILOT_MASK_2 0x000FFFFF
317#define AR_PHY_TIMING10_PILOT_MASK_2_S 0
318
319#define AR_PHY_TIMING11 0x99a0
320#define AR_PHY_TIMING11_SPUR_DELTA_PHASE 0x000FFFFF
321#define AR_PHY_TIMING11_SPUR_DELTA_PHASE_S 0
322#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
323#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
324#define AR_PHY_TIMING11_USE_SPUR_IN_AGC 0x40000000
325#define AR_PHY_TIMING11_USE_SPUR_IN_SELFCOR 0x80000000
326
327#define AR_PHY_RX_CHAINMASK 0x99a4
328#define AR_PHY_NEW_ADC_DC_GAIN_CORR(_i) (0x99b4 + ((_i) << 12))
329#define AR_PHY_NEW_ADC_GAIN_CORR_ENABLE 0x40000000
330#define AR_PHY_NEW_ADC_DC_OFFSET_CORR_ENABLE 0x80000000
331
332#define AR_PHY_MULTICHAIN_GAIN_CTL 0x99ac
333#define AR_PHY_9285_ANT_DIV_CTL_ALL 0x7f000000
334#define AR_PHY_9285_ANT_DIV_CTL 0x01000000
335#define AR_PHY_9285_ANT_DIV_CTL_S 24
336#define AR_PHY_9285_ANT_DIV_ALT_LNACONF 0x06000000
337#define AR_PHY_9285_ANT_DIV_ALT_LNACONF_S 25
338#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF 0x18000000
339#define AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S 27
340#define AR_PHY_9285_ANT_DIV_ALT_GAINTB 0x20000000
341#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
342#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
343#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
344#define AR_PHY_9285_ANT_DIV_LNA1 2
345#define AR_PHY_9285_ANT_DIV_LNA2 1
346#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
347#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
348#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
349#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
350 33
351#define AR_PHY_EXT_CCA0 0x99b8 34#define AR_PHY_CLC_TBL1 0xa35c
352#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF 35#define AR_PHY_CLC_I0 0x07ff0000
353#define AR_PHY_EXT_CCA0_THRESH62_S 0 36#define AR_PHY_CLC_I0_S 16
354 37#define AR_PHY_CLC_Q0 0x0000ffd0
355#define AR_PHY_EXT_CCA 0x99bc 38#define AR_PHY_CLC_Q0_S 5
356#define AR_PHY_EXT_CCA_CYCPWR_THR1 0x0000FE00
357#define AR_PHY_EXT_CCA_CYCPWR_THR1_S 9
358#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
359#define AR_PHY_EXT_CCA_THRESH62_S 16
360#define AR_PHY_EXT_MINCCA_PWR 0xFF800000
361#define AR_PHY_EXT_MINCCA_PWR_S 23
362#define AR9280_PHY_EXT_MINCCA_PWR 0x01FF0000
363#define AR9280_PHY_EXT_MINCCA_PWR_S 16
364
365#define AR_PHY_SFCORR_EXT 0x99c0
366#define AR_PHY_SFCORR_EXT_M1_THRESH 0x0000007F
367#define AR_PHY_SFCORR_EXT_M1_THRESH_S 0
368#define AR_PHY_SFCORR_EXT_M2_THRESH 0x00003F80
369#define AR_PHY_SFCORR_EXT_M2_THRESH_S 7
370#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW 0x001FC000
371#define AR_PHY_SFCORR_EXT_M1_THRESH_LOW_S 14
372#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW 0x0FE00000
373#define AR_PHY_SFCORR_EXT_M2_THRESH_LOW_S 21
374#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
375
376#define AR_PHY_HALFGI 0x99D0
377#define AR_PHY_HALFGI_DSC_MAN 0x0007FFF0
378#define AR_PHY_HALFGI_DSC_MAN_S 4
379#define AR_PHY_HALFGI_DSC_EXP 0x0000000F
380#define AR_PHY_HALFGI_DSC_EXP_S 0
381
382#define AR_PHY_CHAN_INFO_MEMORY 0x99DC
383#define AR_PHY_CHAN_INFO_MEMORY_CAPTURE_MASK 0x0001
384
385#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
386
387#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
388#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
389
390#define AR_PHY_M_SLEEP 0x99f0
391#define AR_PHY_REFCLKDLY 0x99f4
392#define AR_PHY_REFCLKPD 0x99f8
393
394#define AR_PHY_CALMODE 0x99f0
395
396#define AR_PHY_CALMODE_IQ 0x00000000
397#define AR_PHY_CALMODE_ADC_GAIN 0x00000001
398#define AR_PHY_CALMODE_ADC_DC_PER 0x00000002
399#define AR_PHY_CALMODE_ADC_DC_INIT 0x00000003
400
401#define AR_PHY_CAL_MEAS_0(_i) (0x9c10 + ((_i) << 12))
402#define AR_PHY_CAL_MEAS_1(_i) (0x9c14 + ((_i) << 12))
403#define AR_PHY_CAL_MEAS_2(_i) (0x9c18 + ((_i) << 12))
404#define AR_PHY_CAL_MEAS_3(_i) (0x9c1c + ((_i) << 12))
405
406#define AR_PHY_CURRENT_RSSI 0x9c1c
407#define AR9280_PHY_CURRENT_RSSI 0x9c3c
408
409#define AR_PHY_RFBUS_GRANT 0x9C20
410#define AR_PHY_RFBUS_GRANT_EN 0x00000001
411
412#define AR_PHY_CHAN_INFO_GAIN_DIFF 0x9CF4
413#define AR_PHY_CHAN_INFO_GAIN_DIFF_UPPER_LIMIT 320
414
415#define AR_PHY_CHAN_INFO_GAIN 0x9CFC
416
417#define AR_PHY_MODE 0xA200
418#define AR_PHY_MODE_ASYNCFIFO 0x80
419#define AR_PHY_MODE_AR2133 0x08
420#define AR_PHY_MODE_AR5111 0x00
421#define AR_PHY_MODE_AR5112 0x08
422#define AR_PHY_MODE_DYNAMIC 0x04
423#define AR_PHY_MODE_RF2GHZ 0x02
424#define AR_PHY_MODE_RF5GHZ 0x00
425#define AR_PHY_MODE_CCK 0x01
426#define AR_PHY_MODE_OFDM 0x00
427#define AR_PHY_MODE_DYN_CCK_DISABLE 0x100
428
429#define AR_PHY_CCK_TX_CTRL 0xA204
430#define AR_PHY_CCK_TX_CTRL_JAPAN 0x00000010
431#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK 0x0000000C
432#define AR_PHY_CCK_TX_CTRL_TX_DAC_SCALE_CCK_S 2
433
434#define AR_PHY_CCK_DETECT 0xA208
435#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK 0x0000003F
436#define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S 0
437/* [12:6] settling time for antenna switch */
438#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME 0x00001FC0
439#define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME_S 6
440#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV 0x2000
441#define AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV_S 13
442
443#define AR_PHY_GAIN_2GHZ 0xA20C
444#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN 0x00FC0000
445#define AR_PHY_GAIN_2GHZ_RXTX_MARGIN_S 18
446#define AR_PHY_GAIN_2GHZ_BSW_MARGIN 0x00003C00
447#define AR_PHY_GAIN_2GHZ_BSW_MARGIN_S 10
448#define AR_PHY_GAIN_2GHZ_BSW_ATTEN 0x0000001F
449#define AR_PHY_GAIN_2GHZ_BSW_ATTEN_S 0
450
451#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN 0x003E0000
452#define AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN_S 17
453#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN 0x0001F000
454#define AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN_S 12
455#define AR_PHY_GAIN_2GHZ_XATTEN2_DB 0x00000FC0
456#define AR_PHY_GAIN_2GHZ_XATTEN2_DB_S 6
457#define AR_PHY_GAIN_2GHZ_XATTEN1_DB 0x0000003F
458#define AR_PHY_GAIN_2GHZ_XATTEN1_DB_S 0
459
460#define AR_PHY_CCK_RXCTRL4 0xA21C
461#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT 0x01F80000
462#define AR_PHY_CCK_RXCTRL4_FREQ_EST_SHORT_S 19
463
464#define AR_PHY_DAG_CTRLCCK 0xA228
465#define AR_PHY_DAG_CTRLCCK_EN_RSSI_THR 0x00000200
466#define AR_PHY_DAG_CTRLCCK_RSSI_THR 0x0001FC00
467#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
468
469#define AR_PHY_FORCE_CLKEN_CCK 0xA22C
470#define AR_PHY_FORCE_CLKEN_CCK_MRC_MUX 0x00000040
471
472#define AR_PHY_POWER_TX_RATE3 0xA234
473#define AR_PHY_POWER_TX_RATE4 0xA238
474
475#define AR_PHY_SCRM_SEQ_XR 0xA23C
476#define AR_PHY_HEADER_DETECT_XR 0xA240
477#define AR_PHY_CHIRP_DETECTED_XR 0xA244
478#define AR_PHY_BLUETOOTH 0xA254
479
480#define AR_PHY_TPCRG1 0xA258
481#define AR_PHY_TPCRG1_NUM_PD_GAIN 0x0000c000
482#define AR_PHY_TPCRG1_NUM_PD_GAIN_S 14
483
484#define AR_PHY_TPCRG1_PD_GAIN_1 0x00030000
485#define AR_PHY_TPCRG1_PD_GAIN_1_S 16
486#define AR_PHY_TPCRG1_PD_GAIN_2 0x000C0000
487#define AR_PHY_TPCRG1_PD_GAIN_2_S 18
488#define AR_PHY_TPCRG1_PD_GAIN_3 0x00300000
489#define AR_PHY_TPCRG1_PD_GAIN_3_S 20
490
491#define AR_PHY_TPCRG1_PD_CAL_ENABLE 0x00400000
492#define AR_PHY_TPCRG1_PD_CAL_ENABLE_S 22
493
494#define AR_PHY_TX_PWRCTRL4 0xa264
495#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID 0x00000001
496#define AR_PHY_TX_PWRCTRL_PD_AVG_VALID_S 0
497#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT 0x000001FE
498#define AR_PHY_TX_PWRCTRL_PD_AVG_OUT_S 1
499
500#define AR_PHY_TX_PWRCTRL6_0 0xa270
501#define AR_PHY_TX_PWRCTRL6_1 0xb270
502#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE 0x03000000
503#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
504
505#define AR_PHY_TX_PWRCTRL7 0xa274
506#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
507#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
508
509#define AR_PHY_TX_PWRCTRL9 0xa27C
510#define AR_PHY_TX_DESIRED_SCALE_CCK 0x00007C00
511#define AR_PHY_TX_DESIRED_SCALE_CCK_S 10
512#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL 0x80000000
513#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
514
515#define AR_PHY_TX_GAIN_TBL1 0xa300
516#define AR_PHY_TX_GAIN 0x0007F000
517#define AR_PHY_TX_GAIN_S 12
518
519#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
520#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
521#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP 0x0000FC00
522#define AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP_S 10
523
524#define AR_PHY_VIT_MASK2_M_46_61 0xa3a0
525#define AR_PHY_MASK2_M_31_45 0xa3a4
526#define AR_PHY_MASK2_M_16_30 0xa3a8
527#define AR_PHY_MASK2_M_00_15 0xa3ac
528#define AR_PHY_MASK2_P_15_01 0xa3b8
529#define AR_PHY_MASK2_P_30_16 0xa3bc
530#define AR_PHY_MASK2_P_45_31 0xa3c0
531#define AR_PHY_MASK2_P_61_45 0xa3c4
532#define AR_PHY_SPUR_REG 0x994c
533
534#define AR_PHY_SPUR_REG_MASK_RATE_CNTL (0xFF << 18)
535#define AR_PHY_SPUR_REG_MASK_RATE_CNTL_S 18
536
537#define AR_PHY_SPUR_REG_ENABLE_MASK_PPM 0x20000
538#define AR_PHY_SPUR_REG_MASK_RATE_SELECT (0xFF << 9)
539#define AR_PHY_SPUR_REG_MASK_RATE_SELECT_S 9
540#define AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI 0x100
541#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH 0x7F
542#define AR_PHY_SPUR_REG_SPUR_RSSI_THRESH_S 0
543
544#define AR_PHY_PILOT_MASK_01_30 0xa3b0
545#define AR_PHY_PILOT_MASK_31_60 0xa3b4
546
547#define AR_PHY_CHANNEL_MASK_01_30 0x99d4
548#define AR_PHY_CHANNEL_MASK_31_60 0x99d8
549
550#define AR_PHY_ANALOG_SWAP 0xa268
551#define AR_PHY_SWAP_ALT_CHAIN 0x00000040
552
553#define AR_PHY_TPCRG5 0xA26C
554#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP 0x0000000F
555#define AR_PHY_TPCRG5_PD_GAIN_OVERLAP_S 0
556#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1 0x000003F0
557#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1_S 4
558#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2 0x0000FC00
559#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2_S 10
560#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3 0x003F0000
561#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3_S 16
562#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4 0x0FC00000
563#define AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4_S 22
564
565/* Carrier leak calibration control, do it after AGC calibration */
566#define AR_PHY_CL_CAL_CTL 0xA358
567#define AR_PHY_CL_CAL_ENABLE 0x00000002
568#define AR_PHY_PARALLEL_CAL_ENABLE 0x00000001
569
570#define AR_PHY_POWER_TX_RATE5 0xA38C
571#define AR_PHY_POWER_TX_RATE6 0xA390
572
573#define AR_PHY_CAL_CHAINMASK 0xA39C
574
575#define AR_PHY_POWER_TX_SUB 0xA3C8
576#define AR_PHY_POWER_TX_RATE7 0xA3CC
577#define AR_PHY_POWER_TX_RATE8 0xA3D0
578#define AR_PHY_POWER_TX_RATE9 0xA3D4
579
580#define AR_PHY_XPA_CFG 0xA3D8
581#define AR_PHY_FORCE_XPA_CFG 0x000000001
582#define AR_PHY_FORCE_XPA_CFG_S 0
583
584#define AR_PHY_CH1_CCA 0xa864
585#define AR_PHY_CH1_MINCCA_PWR 0x0FF80000
586#define AR_PHY_CH1_MINCCA_PWR_S 19
587#define AR9280_PHY_CH1_MINCCA_PWR 0x1FF00000
588#define AR9280_PHY_CH1_MINCCA_PWR_S 20
589
590#define AR_PHY_CH2_CCA 0xb864
591#define AR_PHY_CH2_MINCCA_PWR 0x0FF80000
592#define AR_PHY_CH2_MINCCA_PWR_S 19
593
594#define AR_PHY_CH1_EXT_CCA 0xa9bc
595#define AR_PHY_CH1_EXT_MINCCA_PWR 0xFF800000
596#define AR_PHY_CH1_EXT_MINCCA_PWR_S 23
597#define AR9280_PHY_CH1_EXT_MINCCA_PWR 0x01FF0000
598#define AR9280_PHY_CH1_EXT_MINCCA_PWR_S 16
599
600#define AR_PHY_CH2_EXT_CCA 0xb9bc
601#define AR_PHY_CH2_EXT_MINCCA_PWR 0xFF800000
602#define AR_PHY_CH2_EXT_MINCCA_PWR_S 23
603 39
604#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \ 40#define REG_WRITE_RF_ARRAY(iniarray, regData, regWr) do { \
605 int r; \ 41 int r; \
@@ -615,6 +51,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
615#define ANTSWAP_AB 0x0001 51#define ANTSWAP_AB 0x0001
616#define REDUCE_CHAIN_0 0x00000050 52#define REDUCE_CHAIN_0 0x00000050
617#define REDUCE_CHAIN_1 0x00000051 53#define REDUCE_CHAIN_1 0x00000051
54#define AR_PHY_CHIP_ID 0x9818
618 55
619#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \ 56#define RF_BANK_SETUP(_bank, _iniarray, _col) do { \
620 int i; \ 57 int i; \
@@ -622,4 +59,7 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
622 (_bank)[i] = INI_RA((_iniarray), i, _col);; \ 59 (_bank)[i] = INI_RA((_iniarray), i, _col);; \
623 } while (0) 60 } while (0)
624 61
62#define AR_PHY_TIMING11_SPUR_FREQ_SD 0x3FF00000
63#define AR_PHY_TIMING11_SPUR_FREQ_SD_S 20
64
625#endif 65#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 244e1c629177..8519452c95f1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -691,6 +691,19 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
691 rate_table = sc->cur_rate_table; 691 rate_table = sc->cur_rate_table;
692 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe); 692 rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
693 693
694 /*
695 * If we're in HT mode and both us and our peer supports LDPC.
696 * We don't need to check our own device's capabilities as our own
697 * ht capabilities would have already been intersected with our peer's.
698 */
699 if (conf_is_ht(&sc->hw->conf) &&
700 (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
701 tx_info->flags |= IEEE80211_TX_CTL_LDPC;
702
703 if (conf_is_ht(&sc->hw->conf) &&
704 (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
705 tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
706
694 if (is_probe) { 707 if (is_probe) {
695 /* set one try for probe rates. For the 708 /* set one try for probe rates. For the
696 * probes don't enable rts */ 709 * probes don't enable rts */
@@ -1228,8 +1241,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1228 long_retry = rate->count - 1; 1241 long_retry = rate->count - 1;
1229 } 1242 }
1230 1243
1231 if (!priv_sta || !ieee80211_is_data(fc) || 1244 if (!priv_sta || !ieee80211_is_data(fc))
1232 !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC)) 1245 return;
1246
1247 /* This packet was aggregated but doesn't carry status info */
1248 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1249 !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
1233 return; 1250 return;
1234 1251
1235 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) 1252 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 4f6d6fd442f4..3d8d40cdc99e 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -110,8 +110,8 @@ struct ath_rate_table {
110 int rate_cnt; 110 int rate_cnt;
111 int mcs_start; 111 int mcs_start;
112 struct { 112 struct {
113 int valid; 113 u8 valid;
114 int valid_single_stream; 114 u8 valid_single_stream;
115 u8 phy; 115 u8 phy;
116 u32 ratekbps; 116 u32 ratekbps;
117 u32 user_ratekbps; 117 u32 user_ratekbps;
@@ -172,14 +172,13 @@ struct ath_rate_priv {
172 172
173#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0) 173#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
174#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1) 174#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
175#define ATH_TX_INFO_UPDATE_RC (1 << 2)
176#define ATH_TX_INFO_XRETRY (1 << 3) 175#define ATH_TX_INFO_XRETRY (1 << 3)
177#define ATH_TX_INFO_UNDERRUN (1 << 4) 176#define ATH_TX_INFO_UNDERRUN (1 << 4)
178 177
179enum ath9k_internal_frame_type { 178enum ath9k_internal_frame_type {
180 ATH9K_NOT_INTERNAL, 179 ATH9K_IFT_NOT_INTERNAL,
181 ATH9K_INT_PAUSE, 180 ATH9K_IFT_PAUSE,
182 ATH9K_INT_UNPAUSE 181 ATH9K_IFT_UNPAUSE
183}; 182};
184 183
185int ath_rate_control_register(void); 184int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1ca42e5148c8..ba139132c85f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -15,6 +15,9 @@
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18#include "ar9003_mac.h"
19
20#define SKB_CB_ATHBUF(__skb) (*((struct ath_buf **)__skb->cb))
18 21
19static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc, 22static struct ieee80211_hw * ath_get_virt_hw(struct ath_softc *sc,
20 struct ieee80211_hdr *hdr) 23 struct ieee80211_hdr *hdr)
@@ -115,56 +118,244 @@ static void ath_opmode_init(struct ath_softc *sc)
115 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]); 118 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
116} 119}
117 120
118int ath_rx_init(struct ath_softc *sc, int nbufs) 121static bool ath_rx_edma_buf_link(struct ath_softc *sc,
122 enum ath9k_rx_qtype qtype)
119{ 123{
120 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 124 struct ath_hw *ah = sc->sc_ah;
125 struct ath_rx_edma *rx_edma;
121 struct sk_buff *skb; 126 struct sk_buff *skb;
122 struct ath_buf *bf; 127 struct ath_buf *bf;
123 int error = 0;
124 128
125 spin_lock_init(&sc->rx.rxflushlock); 129 rx_edma = &sc->rx.rx_edma[qtype];
126 sc->sc_flags &= ~SC_OP_RXFLUSH; 130 if (skb_queue_len(&rx_edma->rx_fifo) >= rx_edma->rx_fifo_hwsize)
127 spin_lock_init(&sc->rx.rxbuflock); 131 return false;
128 132
129 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 133 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
130 min(common->cachelsz, (u16)64)); 134 list_del_init(&bf->list);
131 135
132 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 136 skb = bf->bf_mpdu;
133 common->cachelsz, common->rx_bufsize); 137
138 ATH_RXBUF_RESET(bf);
139 memset(skb->data, 0, ah->caps.rx_status_len);
140 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
141 ah->caps.rx_status_len, DMA_TO_DEVICE);
134 142
135 /* Initialize rx descriptors */ 143 SKB_CB_ATHBUF(skb) = bf;
144 ath9k_hw_addrxbuf_edma(ah, bf->bf_buf_addr, qtype);
145 skb_queue_tail(&rx_edma->rx_fifo, skb);
136 146
137 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 147 return true;
138 "rx", nbufs, 1); 148}
139 if (error != 0) { 149
140 ath_print(common, ATH_DBG_FATAL, 150static void ath_rx_addbuffer_edma(struct ath_softc *sc,
141 "failed to allocate rx descriptors: %d\n", error); 151 enum ath9k_rx_qtype qtype, int size)
142 goto err; 152{
153 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
154 u32 nbuf = 0;
155
156 if (list_empty(&sc->rx.rxbuf)) {
157 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n");
158 return;
143 } 159 }
144 160
161 while (!list_empty(&sc->rx.rxbuf)) {
162 nbuf++;
163
164 if (!ath_rx_edma_buf_link(sc, qtype))
165 break;
166
167 if (nbuf >= size)
168 break;
169 }
170}
171
172static void ath_rx_remove_buffer(struct ath_softc *sc,
173 enum ath9k_rx_qtype qtype)
174{
175 struct ath_buf *bf;
176 struct ath_rx_edma *rx_edma;
177 struct sk_buff *skb;
178
179 rx_edma = &sc->rx.rx_edma[qtype];
180
181 while ((skb = skb_dequeue(&rx_edma->rx_fifo)) != NULL) {
182 bf = SKB_CB_ATHBUF(skb);
183 BUG_ON(!bf);
184 list_add_tail(&bf->list, &sc->rx.rxbuf);
185 }
186}
187
188static void ath_rx_edma_cleanup(struct ath_softc *sc)
189{
190 struct ath_buf *bf;
191
192 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
193 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
194
145 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 195 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
196 if (bf->bf_mpdu)
197 dev_kfree_skb_any(bf->bf_mpdu);
198 }
199
200 INIT_LIST_HEAD(&sc->rx.rxbuf);
201
202 kfree(sc->rx.rx_bufptr);
203 sc->rx.rx_bufptr = NULL;
204}
205
206static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
207{
208 skb_queue_head_init(&rx_edma->rx_fifo);
209 skb_queue_head_init(&rx_edma->rx_buffers);
210 rx_edma->rx_fifo_hwsize = size;
211}
212
213static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
214{
215 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
216 struct ath_hw *ah = sc->sc_ah;
217 struct sk_buff *skb;
218 struct ath_buf *bf;
219 int error = 0, i;
220 u32 size;
221
222
223 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN +
224 ah->caps.rx_status_len,
225 min(common->cachelsz, (u16)64));
226
227 ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
228 ah->caps.rx_status_len);
229
230 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_LP],
231 ah->caps.rx_lp_qdepth);
232 ath_rx_edma_init_queue(&sc->rx.rx_edma[ATH9K_RX_QUEUE_HP],
233 ah->caps.rx_hp_qdepth);
234
235 size = sizeof(struct ath_buf) * nbufs;
236 bf = kzalloc(size, GFP_KERNEL);
237 if (!bf)
238 return -ENOMEM;
239
240 INIT_LIST_HEAD(&sc->rx.rxbuf);
241 sc->rx.rx_bufptr = bf;
242
243 for (i = 0; i < nbufs; i++, bf++) {
146 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL); 244 skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_KERNEL);
147 if (skb == NULL) { 245 if (!skb) {
148 error = -ENOMEM; 246 error = -ENOMEM;
149 goto err; 247 goto rx_init_fail;
150 } 248 }
151 249
250 memset(skb->data, 0, common->rx_bufsize);
152 bf->bf_mpdu = skb; 251 bf->bf_mpdu = skb;
252
153 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 253 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
154 common->rx_bufsize, 254 common->rx_bufsize,
155 DMA_FROM_DEVICE); 255 DMA_BIDIRECTIONAL);
156 if (unlikely(dma_mapping_error(sc->dev, 256 if (unlikely(dma_mapping_error(sc->dev,
157 bf->bf_buf_addr))) { 257 bf->bf_buf_addr))) {
158 dev_kfree_skb_any(skb); 258 dev_kfree_skb_any(skb);
159 bf->bf_mpdu = NULL; 259 bf->bf_mpdu = NULL;
260 ath_print(common, ATH_DBG_FATAL,
261 "dma_mapping_error() on RX init\n");
262 error = -ENOMEM;
263 goto rx_init_fail;
264 }
265
266 list_add_tail(&bf->list, &sc->rx.rxbuf);
267 }
268
269 return 0;
270
271rx_init_fail:
272 ath_rx_edma_cleanup(sc);
273 return error;
274}
275
276static void ath_edma_start_recv(struct ath_softc *sc)
277{
278 spin_lock_bh(&sc->rx.rxbuflock);
279
280 ath9k_hw_rxena(sc->sc_ah);
281
282 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_HP,
283 sc->rx.rx_edma[ATH9K_RX_QUEUE_HP].rx_fifo_hwsize);
284
285 ath_rx_addbuffer_edma(sc, ATH9K_RX_QUEUE_LP,
286 sc->rx.rx_edma[ATH9K_RX_QUEUE_LP].rx_fifo_hwsize);
287
288 spin_unlock_bh(&sc->rx.rxbuflock);
289
290 ath_opmode_init(sc);
291
292 ath9k_hw_startpcureceive(sc->sc_ah);
293}
294
295static void ath_edma_stop_recv(struct ath_softc *sc)
296{
297 spin_lock_bh(&sc->rx.rxbuflock);
298 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP);
299 ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP);
300 spin_unlock_bh(&sc->rx.rxbuflock);
301}
302
303int ath_rx_init(struct ath_softc *sc, int nbufs)
304{
305 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
306 struct sk_buff *skb;
307 struct ath_buf *bf;
308 int error = 0;
309
310 spin_lock_init(&sc->rx.rxflushlock);
311 sc->sc_flags &= ~SC_OP_RXFLUSH;
312 spin_lock_init(&sc->rx.rxbuflock);
313
314 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
315 return ath_rx_edma_init(sc, nbufs);
316 } else {
317 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
318 min(common->cachelsz, (u16)64));
319
320 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
321 common->cachelsz, common->rx_bufsize);
322
323 /* Initialize rx descriptors */
324
325 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
326 "rx", nbufs, 1, 0);
327 if (error != 0) {
160 ath_print(common, ATH_DBG_FATAL, 328 ath_print(common, ATH_DBG_FATAL,
161 "dma_mapping_error() on RX init\n"); 329 "failed to allocate rx descriptors: %d\n",
162 error = -ENOMEM; 330 error);
163 goto err; 331 goto err;
164 } 332 }
165 bf->bf_dmacontext = bf->bf_buf_addr; 333
334 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
335 skb = ath_rxbuf_alloc(common, common->rx_bufsize,
336 GFP_KERNEL);
337 if (skb == NULL) {
338 error = -ENOMEM;
339 goto err;
340 }
341
342 bf->bf_mpdu = skb;
343 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
344 common->rx_bufsize,
345 DMA_FROM_DEVICE);
346 if (unlikely(dma_mapping_error(sc->dev,
347 bf->bf_buf_addr))) {
348 dev_kfree_skb_any(skb);
349 bf->bf_mpdu = NULL;
350 ath_print(common, ATH_DBG_FATAL,
351 "dma_mapping_error() on RX init\n");
352 error = -ENOMEM;
353 goto err;
354 }
355 bf->bf_dmacontext = bf->bf_buf_addr;
356 }
357 sc->rx.rxlink = NULL;
166 } 358 }
167 sc->rx.rxlink = NULL;
168 359
169err: 360err:
170 if (error) 361 if (error)
@@ -180,17 +371,23 @@ void ath_rx_cleanup(struct ath_softc *sc)
180 struct sk_buff *skb; 371 struct sk_buff *skb;
181 struct ath_buf *bf; 372 struct ath_buf *bf;
182 373
183 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 374 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
184 skb = bf->bf_mpdu; 375 ath_rx_edma_cleanup(sc);
185 if (skb) { 376 return;
186 dma_unmap_single(sc->dev, bf->bf_buf_addr, 377 } else {
187 common->rx_bufsize, DMA_FROM_DEVICE); 378 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
188 dev_kfree_skb(skb); 379 skb = bf->bf_mpdu;
380 if (skb) {
381 dma_unmap_single(sc->dev, bf->bf_buf_addr,
382 common->rx_bufsize,
383 DMA_FROM_DEVICE);
384 dev_kfree_skb(skb);
385 }
189 } 386 }
190 }
191 387
192 if (sc->rx.rxdma.dd_desc_len != 0) 388 if (sc->rx.rxdma.dd_desc_len != 0)
193 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf); 389 ath_descdma_cleanup(sc, &sc->rx.rxdma, &sc->rx.rxbuf);
390 }
194} 391}
195 392
196/* 393/*
@@ -273,6 +470,11 @@ int ath_startrecv(struct ath_softc *sc)
273 struct ath_hw *ah = sc->sc_ah; 470 struct ath_hw *ah = sc->sc_ah;
274 struct ath_buf *bf, *tbf; 471 struct ath_buf *bf, *tbf;
275 472
473 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
474 ath_edma_start_recv(sc);
475 return 0;
476 }
477
276 spin_lock_bh(&sc->rx.rxbuflock); 478 spin_lock_bh(&sc->rx.rxbuflock);
277 if (list_empty(&sc->rx.rxbuf)) 479 if (list_empty(&sc->rx.rxbuf))
278 goto start_recv; 480 goto start_recv;
@@ -306,7 +508,11 @@ bool ath_stoprecv(struct ath_softc *sc)
306 ath9k_hw_stoppcurecv(ah); 508 ath9k_hw_stoppcurecv(ah);
307 ath9k_hw_setrxfilter(ah, 0); 509 ath9k_hw_setrxfilter(ah, 0);
308 stopped = ath9k_hw_stopdmarecv(ah); 510 stopped = ath9k_hw_stopdmarecv(ah);
309 sc->rx.rxlink = NULL; 511
512 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
513 ath_edma_stop_recv(sc);
514 else
515 sc->rx.rxlink = NULL;
310 516
311 return stopped; 517 return stopped;
312} 518}
@@ -315,7 +521,9 @@ void ath_flushrecv(struct ath_softc *sc)
315{ 521{
316 spin_lock_bh(&sc->rx.rxflushlock); 522 spin_lock_bh(&sc->rx.rxflushlock);
317 sc->sc_flags |= SC_OP_RXFLUSH; 523 sc->sc_flags |= SC_OP_RXFLUSH;
318 ath_rx_tasklet(sc, 1); 524 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
525 ath_rx_tasklet(sc, 1, true);
526 ath_rx_tasklet(sc, 1, false);
319 sc->sc_flags &= ~SC_OP_RXFLUSH; 527 sc->sc_flags &= ~SC_OP_RXFLUSH;
320 spin_unlock_bh(&sc->rx.rxflushlock); 528 spin_unlock_bh(&sc->rx.rxflushlock);
321} 529}
@@ -469,15 +677,148 @@ static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
469 ieee80211_rx(hw, skb); 677 ieee80211_rx(hw, skb);
470} 678}
471 679
472int ath_rx_tasklet(struct ath_softc *sc, int flush) 680static bool ath_edma_get_buffers(struct ath_softc *sc,
681 enum ath9k_rx_qtype qtype)
473{ 682{
474#define PA2DESC(_sc, _pa) \ 683 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
475 ((struct ath_desc *)((caddr_t)(_sc)->rx.rxdma.dd_desc + \ 684 struct ath_hw *ah = sc->sc_ah;
476 ((_pa) - (_sc)->rx.rxdma.dd_desc_paddr))) 685 struct ath_common *common = ath9k_hw_common(ah);
686 struct sk_buff *skb;
687 struct ath_buf *bf;
688 int ret;
689
690 skb = skb_peek(&rx_edma->rx_fifo);
691 if (!skb)
692 return false;
693
694 bf = SKB_CB_ATHBUF(skb);
695 BUG_ON(!bf);
696
697 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
698 common->rx_bufsize, DMA_FROM_DEVICE);
699
700 ret = ath9k_hw_process_rxdesc_edma(ah, NULL, skb->data);
701 if (ret == -EINPROGRESS)
702 return false;
703
704 __skb_unlink(skb, &rx_edma->rx_fifo);
705 if (ret == -EINVAL) {
706 /* corrupt descriptor, skip this one and the following one */
707 list_add_tail(&bf->list, &sc->rx.rxbuf);
708 ath_rx_edma_buf_link(sc, qtype);
709 skb = skb_peek(&rx_edma->rx_fifo);
710 if (!skb)
711 return true;
712
713 bf = SKB_CB_ATHBUF(skb);
714 BUG_ON(!bf);
715
716 __skb_unlink(skb, &rx_edma->rx_fifo);
717 list_add_tail(&bf->list, &sc->rx.rxbuf);
718 ath_rx_edma_buf_link(sc, qtype);
719 return true;
720 }
721 skb_queue_tail(&rx_edma->rx_buffers, skb);
722
723 return true;
724}
477 725
726static struct ath_buf *ath_edma_get_next_rx_buf(struct ath_softc *sc,
727 struct ath_rx_status *rs,
728 enum ath9k_rx_qtype qtype)
729{
730 struct ath_rx_edma *rx_edma = &sc->rx.rx_edma[qtype];
731 struct sk_buff *skb;
478 struct ath_buf *bf; 732 struct ath_buf *bf;
733
734 while (ath_edma_get_buffers(sc, qtype));
735 skb = __skb_dequeue(&rx_edma->rx_buffers);
736 if (!skb)
737 return NULL;
738
739 bf = SKB_CB_ATHBUF(skb);
740 ath9k_hw_process_rxdesc_edma(sc->sc_ah, rs, skb->data);
741 return bf;
742}
743
744static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
745 struct ath_rx_status *rs)
746{
747 struct ath_hw *ah = sc->sc_ah;
748 struct ath_common *common = ath9k_hw_common(ah);
479 struct ath_desc *ds; 749 struct ath_desc *ds;
480 struct ath_rx_status *rx_stats; 750 struct ath_buf *bf;
751 int ret;
752
753 if (list_empty(&sc->rx.rxbuf)) {
754 sc->rx.rxlink = NULL;
755 return NULL;
756 }
757
758 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
759 ds = bf->bf_desc;
760
761 /*
762 * Must provide the virtual address of the current
763 * descriptor, the physical address, and the virtual
764 * address of the next descriptor in the h/w chain.
765 * This allows the HAL to look ahead to see if the
766 * hardware is done with a descriptor by checking the
767 * done bit in the following descriptor and the address
768 * of the current descriptor the DMA engine is working
769 * on. All this is necessary because of our use of
770 * a self-linked list to avoid rx overruns.
771 */
772 ret = ath9k_hw_rxprocdesc(ah, ds, rs, 0);
773 if (ret == -EINPROGRESS) {
774 struct ath_rx_status trs;
775 struct ath_buf *tbf;
776 struct ath_desc *tds;
777
778 memset(&trs, 0, sizeof(trs));
779 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
780 sc->rx.rxlink = NULL;
781 return NULL;
782 }
783
784 tbf = list_entry(bf->list.next, struct ath_buf, list);
785
786 /*
787 * On some hardware the descriptor status words could
788 * get corrupted, including the done bit. Because of
789 * this, check if the next descriptor's done bit is
790 * set or not.
791 *
792 * If the next descriptor's done bit is set, the current
793 * descriptor has been corrupted. Force s/w to discard
794 * this descriptor and continue...
795 */
796
797 tds = tbf->bf_desc;
798 ret = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
799 if (ret == -EINPROGRESS)
800 return NULL;
801 }
802
803 if (!bf->bf_mpdu)
804 return bf;
805
806 /*
807 * Synchronize the DMA transfer with CPU before
808 * 1. accessing the frame
809 * 2. requeueing the same buffer to h/w
810 */
811 dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
812 common->rx_bufsize,
813 DMA_FROM_DEVICE);
814
815 return bf;
816}
817
818
819int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
820{
821 struct ath_buf *bf;
481 struct sk_buff *skb = NULL, *requeue_skb; 822 struct sk_buff *skb = NULL, *requeue_skb;
482 struct ieee80211_rx_status *rxs; 823 struct ieee80211_rx_status *rxs;
483 struct ath_hw *ah = sc->sc_ah; 824 struct ath_hw *ah = sc->sc_ah;
@@ -491,7 +832,17 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
491 struct ieee80211_hdr *hdr; 832 struct ieee80211_hdr *hdr;
492 int retval; 833 int retval;
493 bool decrypt_error = false; 834 bool decrypt_error = false;
835 struct ath_rx_status rs;
836 enum ath9k_rx_qtype qtype;
837 bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
838 int dma_type;
494 839
840 if (edma)
841 dma_type = DMA_FROM_DEVICE;
842 else
843 dma_type = DMA_BIDIRECTIONAL;
844
845 qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
495 spin_lock_bh(&sc->rx.rxbuflock); 846 spin_lock_bh(&sc->rx.rxbuflock);
496 847
497 do { 848 do {
@@ -499,79 +850,25 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
499 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0)) 850 if ((sc->sc_flags & SC_OP_RXFLUSH) && (flush == 0))
500 break; 851 break;
501 852
502 if (list_empty(&sc->rx.rxbuf)) { 853 memset(&rs, 0, sizeof(rs));
503 sc->rx.rxlink = NULL; 854 if (edma)
504 break; 855 bf = ath_edma_get_next_rx_buf(sc, &rs, qtype);
505 } 856 else
506 857 bf = ath_get_next_rx_buf(sc, &rs);
507 bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
508 ds = bf->bf_desc;
509
510 /*
511 * Must provide the virtual address of the current
512 * descriptor, the physical address, and the virtual
513 * address of the next descriptor in the h/w chain.
514 * This allows the HAL to look ahead to see if the
515 * hardware is done with a descriptor by checking the
516 * done bit in the following descriptor and the address
517 * of the current descriptor the DMA engine is working
518 * on. All this is necessary because of our use of
519 * a self-linked list to avoid rx overruns.
520 */
521 retval = ath9k_hw_rxprocdesc(ah, ds,
522 bf->bf_daddr,
523 PA2DESC(sc, ds->ds_link),
524 0);
525 if (retval == -EINPROGRESS) {
526 struct ath_buf *tbf;
527 struct ath_desc *tds;
528
529 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
530 sc->rx.rxlink = NULL;
531 break;
532 }
533 858
534 tbf = list_entry(bf->list.next, struct ath_buf, list); 859 if (!bf)
535 860 break;
536 /*
537 * On some hardware the descriptor status words could
538 * get corrupted, including the done bit. Because of
539 * this, check if the next descriptor's done bit is
540 * set or not.
541 *
542 * If the next descriptor's done bit is set, the current
543 * descriptor has been corrupted. Force s/w to discard
544 * this descriptor and continue...
545 */
546
547 tds = tbf->bf_desc;
548 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr,
549 PA2DESC(sc, tds->ds_link), 0);
550 if (retval == -EINPROGRESS) {
551 break;
552 }
553 }
554 861
555 skb = bf->bf_mpdu; 862 skb = bf->bf_mpdu;
556 if (!skb) 863 if (!skb)
557 continue; 864 continue;
558 865
559 /*
560 * Synchronize the DMA transfer with CPU before
561 * 1. accessing the frame
562 * 2. requeueing the same buffer to h/w
563 */
564 dma_sync_single_for_cpu(sc->dev, bf->bf_buf_addr,
565 common->rx_bufsize,
566 DMA_FROM_DEVICE);
567
568 hdr = (struct ieee80211_hdr *) skb->data; 866 hdr = (struct ieee80211_hdr *) skb->data;
569 rxs = IEEE80211_SKB_RXCB(skb); 867 rxs = IEEE80211_SKB_RXCB(skb);
570 868
571 hw = ath_get_virt_hw(sc, hdr); 869 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat;
573 870
574 ath_debug_stat_rx(sc, bf); 871 ath_debug_stat_rx(sc, &rs);
575 872
576 /* 873 /*
577 * If we're asked to flush receive queue, directly 874 * If we're asked to flush receive queue, directly
@@ -580,7 +877,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
580 if (flush) 877 if (flush)
581 goto requeue; 878 goto requeue;
582 879
583 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats, 880 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
584 rxs, &decrypt_error); 881 rxs, &decrypt_error);
585 if (retval) 882 if (retval)
586 goto requeue; 883 goto requeue;
@@ -599,18 +896,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
599 /* Unmap the frame */ 896 /* Unmap the frame */
600 dma_unmap_single(sc->dev, bf->bf_buf_addr, 897 dma_unmap_single(sc->dev, bf->bf_buf_addr,
601 common->rx_bufsize, 898 common->rx_bufsize,
602 DMA_FROM_DEVICE); 899 dma_type);
603 900
604 skb_put(skb, rx_stats->rs_datalen); 901 skb_put(skb, rs.rs_datalen + ah->caps.rx_status_len);
902 if (ah->caps.rx_status_len)
903 skb_pull(skb, ah->caps.rx_status_len);
605 904
606 ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats, 905 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
607 rxs, decrypt_error); 906 rxs, decrypt_error);
608 907
609 /* We will now give hardware our shiny new allocated skb */ 908 /* We will now give hardware our shiny new allocated skb */
610 bf->bf_mpdu = requeue_skb; 909 bf->bf_mpdu = requeue_skb;
611 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data, 910 bf->bf_buf_addr = dma_map_single(sc->dev, requeue_skb->data,
612 common->rx_bufsize, 911 common->rx_bufsize,
613 DMA_FROM_DEVICE); 912 dma_type);
614 if (unlikely(dma_mapping_error(sc->dev, 913 if (unlikely(dma_mapping_error(sc->dev,
615 bf->bf_buf_addr))) { 914 bf->bf_buf_addr))) {
616 dev_kfree_skb_any(requeue_skb); 915 dev_kfree_skb_any(requeue_skb);
@@ -626,9 +925,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
626 * change the default rx antenna if rx diversity chooses the 925 * change the default rx antenna if rx diversity chooses the
627 * other antenna 3 times in a row. 926 * other antenna 3 times in a row.
628 */ 927 */
629 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 928 if (sc->rx.defant != rs.rs_antenna) {
630 if (++sc->rx.rxotherant >= 3) 929 if (++sc->rx.rxotherant >= 3)
631 ath_setdefantenna(sc, rx_stats->rs_antenna); 930 ath_setdefantenna(sc, rs.rs_antenna);
632 } else { 931 } else {
633 sc->rx.rxotherant = 0; 932 sc->rx.rxotherant = 0;
634 } 933 }
@@ -641,12 +940,16 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
641 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 940 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
642 941
643requeue: 942requeue:
644 list_move_tail(&bf->list, &sc->rx.rxbuf); 943 if (edma) {
645 ath_rx_buf_link(sc, bf); 944 list_add_tail(&bf->list, &sc->rx.rxbuf);
945 ath_rx_edma_buf_link(sc, qtype);
946 } else {
947 list_move_tail(&bf->list, &sc->rx.rxbuf);
948 ath_rx_buf_link(sc, bf);
949 }
646 } while (1); 950 } while (1);
647 951
648 spin_unlock_bh(&sc->rx.rxbuflock); 952 spin_unlock_bh(&sc->rx.rxbuflock);
649 953
650 return 0; 954 return 0;
651#undef PA2DESC
652} 955}
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 72cfa8ebd9ae..d4371a43bdaa 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -20,7 +20,7 @@
20#include "../reg.h" 20#include "../reg.h"
21 21
22#define AR_CR 0x0008 22#define AR_CR 0x0008
23#define AR_CR_RXE 0x00000004 23#define AR_CR_RXE (AR_SREV_9300_20_OR_LATER(ah) ? 0x0000000c : 0x00000004)
24#define AR_CR_RXD 0x00000020 24#define AR_CR_RXD 0x00000020
25#define AR_CR_SWI 0x00000040 25#define AR_CR_SWI 0x00000040
26 26
@@ -39,6 +39,12 @@
39#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000 39#define AR_CFG_PCI_MASTER_REQ_Q_THRESH 0x00060000
40#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17 40#define AR_CFG_PCI_MASTER_REQ_Q_THRESH_S 17
41 41
42#define AR_RXBP_THRESH 0x0018
43#define AR_RXBP_THRESH_HP 0x0000000f
44#define AR_RXBP_THRESH_HP_S 0
45#define AR_RXBP_THRESH_LP 0x00003f00
46#define AR_RXBP_THRESH_LP_S 8
47
42#define AR_MIRT 0x0020 48#define AR_MIRT 0x0020
43#define AR_MIRT_VAL 0x0000ffff 49#define AR_MIRT_VAL 0x0000ffff
44#define AR_MIRT_VAL_S 16 50#define AR_MIRT_VAL_S 16
@@ -144,6 +150,9 @@
144#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15 150#define AR_MACMISC_MISC_OBS_BUS_MSB_S 15
145#define AR_MACMISC_MISC_OBS_BUS_1 1 151#define AR_MACMISC_MISC_OBS_BUS_1 1
146 152
153#define AR_DATABUF_SIZE 0x0060
154#define AR_DATABUF_SIZE_MASK 0x00000FFF
155
147#define AR_GTXTO 0x0064 156#define AR_GTXTO 0x0064
148#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF 157#define AR_GTXTO_TIMEOUT_COUNTER 0x0000FFFF
149#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000 158#define AR_GTXTO_TIMEOUT_LIMIT 0xFFFF0000
@@ -160,9 +169,14 @@
160#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000 169#define AR_CST_TIMEOUT_LIMIT 0xFFFF0000
161#define AR_CST_TIMEOUT_LIMIT_S 16 170#define AR_CST_TIMEOUT_LIMIT_S 16
162 171
172#define AR_HP_RXDP 0x0074
173#define AR_LP_RXDP 0x0078
174
163#define AR_ISR 0x0080 175#define AR_ISR 0x0080
164#define AR_ISR_RXOK 0x00000001 176#define AR_ISR_RXOK 0x00000001
165#define AR_ISR_RXDESC 0x00000002 177#define AR_ISR_RXDESC 0x00000002
178#define AR_ISR_HP_RXOK 0x00000001
179#define AR_ISR_LP_RXOK 0x00000002
166#define AR_ISR_RXERR 0x00000004 180#define AR_ISR_RXERR 0x00000004
167#define AR_ISR_RXNOPKT 0x00000008 181#define AR_ISR_RXNOPKT 0x00000008
168#define AR_ISR_RXEOL 0x00000010 182#define AR_ISR_RXEOL 0x00000010
@@ -232,7 +246,6 @@
232#define AR_ISR_S5_TIMER_THRESH 0x0007FE00 246#define AR_ISR_S5_TIMER_THRESH 0x0007FE00
233#define AR_ISR_S5_TIM_TIMER 0x00000010 247#define AR_ISR_S5_TIM_TIMER 0x00000010
234#define AR_ISR_S5_DTIM_TIMER 0x00000020 248#define AR_ISR_S5_DTIM_TIMER 0x00000020
235#define AR_ISR_S5_S 0x00d8
236#define AR_IMR_S5 0x00b8 249#define AR_IMR_S5 0x00b8
237#define AR_IMR_S5_TIM_TIMER 0x00000010 250#define AR_IMR_S5_TIM_TIMER 0x00000010
238#define AR_IMR_S5_DTIM_TIMER 0x00000020 251#define AR_IMR_S5_DTIM_TIMER 0x00000020
@@ -240,7 +253,6 @@
240#define AR_ISR_S5_GENTIMER_TRIG_S 0 253#define AR_ISR_S5_GENTIMER_TRIG_S 0
241#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000 254#define AR_ISR_S5_GENTIMER_THRESH 0xFF800000
242#define AR_ISR_S5_GENTIMER_THRESH_S 16 255#define AR_ISR_S5_GENTIMER_THRESH_S 16
243#define AR_ISR_S5_S 0x00d8
244#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80 256#define AR_IMR_S5_GENTIMER_TRIG 0x0000FF80
245#define AR_IMR_S5_GENTIMER_TRIG_S 0 257#define AR_IMR_S5_GENTIMER_TRIG_S 0
246#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000 258#define AR_IMR_S5_GENTIMER_THRESH 0xFF800000
@@ -249,6 +261,8 @@
249#define AR_IMR 0x00a0 261#define AR_IMR 0x00a0
250#define AR_IMR_RXOK 0x00000001 262#define AR_IMR_RXOK 0x00000001
251#define AR_IMR_RXDESC 0x00000002 263#define AR_IMR_RXDESC 0x00000002
264#define AR_IMR_RXOK_HP 0x00000001
265#define AR_IMR_RXOK_LP 0x00000002
252#define AR_IMR_RXERR 0x00000004 266#define AR_IMR_RXERR 0x00000004
253#define AR_IMR_RXNOPKT 0x00000008 267#define AR_IMR_RXNOPKT 0x00000008
254#define AR_IMR_RXEOL 0x00000010 268#define AR_IMR_RXEOL 0x00000010
@@ -332,10 +346,10 @@
332#define AR_ISR_S1_QCU_TXEOL 0x03FF0000 346#define AR_ISR_S1_QCU_TXEOL 0x03FF0000
333#define AR_ISR_S1_QCU_TXEOL_S 16 347#define AR_ISR_S1_QCU_TXEOL_S 16
334 348
335#define AR_ISR_S2_S 0x00cc 349#define AR_ISR_S2_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d0 : 0x00cc)
336#define AR_ISR_S3_S 0x00d0 350#define AR_ISR_S3_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d4 : 0x00d0)
337#define AR_ISR_S4_S 0x00d4 351#define AR_ISR_S4_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00d8 : 0x00d4)
338#define AR_ISR_S5_S 0x00d8 352#define AR_ISR_S5_S (AR_SREV_9300_20_OR_LATER(ah) ? 0x00dc : 0x00d8)
339#define AR_DMADBG_0 0x00e0 353#define AR_DMADBG_0 0x00e0
340#define AR_DMADBG_1 0x00e4 354#define AR_DMADBG_1 0x00e4
341#define AR_DMADBG_2 0x00e8 355#define AR_DMADBG_2 0x00e8
@@ -369,6 +383,9 @@
369#define AR_Q9_TXDP 0x0824 383#define AR_Q9_TXDP 0x0824
370#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2)) 384#define AR_QTXDP(_i) (AR_Q0_TXDP + ((_i)<<2))
371 385
386#define AR_Q_STATUS_RING_START 0x830
387#define AR_Q_STATUS_RING_END 0x834
388
372#define AR_Q_TXE 0x0840 389#define AR_Q_TXE 0x0840
373#define AR_Q_TXE_M 0x000003FF 390#define AR_Q_TXE_M 0x000003FF
374 391
@@ -461,6 +478,10 @@
461#define AR_Q_RDYTIMESHDN 0x0a40 478#define AR_Q_RDYTIMESHDN 0x0a40
462#define AR_Q_RDYTIMESHDN_M 0x000003FF 479#define AR_Q_RDYTIMESHDN_M 0x000003FF
463 480
481/* MAC Descriptor CRC check */
482#define AR_Q_DESC_CRCCHK 0xa44
483/* Enable CRC check on the descriptor fetched from host */
484#define AR_Q_DESC_CRCCHK_EN 1
464 485
465#define AR_NUM_DCU 10 486#define AR_NUM_DCU 10
466#define AR_DCU_0 0x0001 487#define AR_DCU_0 0x0001
@@ -679,7 +700,7 @@
679 700
680#define AR_WA 0x4004 701#define AR_WA 0x4004
681#define AR_WA_D3_L1_DISABLE (1 << 14) 702#define AR_WA_D3_L1_DISABLE (1 << 14)
682#define AR9285_WA_DEFAULT 0x004a05cb 703#define AR9285_WA_DEFAULT 0x004a050b
683#define AR9280_WA_DEFAULT 0x0040073b 704#define AR9280_WA_DEFAULT 0x0040073b
684#define AR_WA_DEFAULT 0x0000073f 705#define AR_WA_DEFAULT 0x0000073f
685 706
@@ -759,6 +780,8 @@
759#define AR_SREV_VERSION_9271 0x140 780#define AR_SREV_VERSION_9271 0x140
760#define AR_SREV_REVISION_9271_10 0 781#define AR_SREV_REVISION_9271_10 0
761#define AR_SREV_REVISION_9271_11 1 782#define AR_SREV_REVISION_9271_11 1
783#define AR_SREV_VERSION_9300 0x1c0
784#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
762 785
763#define AR_SREV_5416(_ah) \ 786#define AR_SREV_5416(_ah) \
764 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 787 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -844,6 +867,19 @@
844#define AR_SREV_9271_11(_ah) \ 867#define AR_SREV_9271_11(_ah) \
845 (AR_SREV_9271(_ah) && \ 868 (AR_SREV_9271(_ah) && \
846 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) 869 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
870#define AR_SREV_9300(_ah) \
871 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300))
872#define AR_SREV_9300_20(_ah) \
873 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
874 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9300_20))
875#define AR_SREV_9300_20_OR_LATER(_ah) \
876 (((_ah)->hw_version.macVersion > AR_SREV_VERSION_9300) || \
877 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
878 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
879
880#define AR_SREV_9285E_20(_ah) \
881 (AR_SREV_9285_12_OR_LATER(_ah) && \
882 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
847 883
848#define AR_RADIO_SREV_MAJOR 0xf0 884#define AR_RADIO_SREV_MAJOR 0xf0
849#define AR_RAD5133_SREV_MAJOR 0xc0 885#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -940,6 +976,8 @@ enum {
940#define AR928X_NUM_GPIO 10 976#define AR928X_NUM_GPIO 10
941#define AR9285_NUM_GPIO 12 977#define AR9285_NUM_GPIO 12
942#define AR9287_NUM_GPIO 11 978#define AR9287_NUM_GPIO 11
979#define AR9271_NUM_GPIO 16
980#define AR9300_NUM_GPIO 17
943 981
944#define AR_GPIO_IN_OUT 0x4048 982#define AR_GPIO_IN_OUT 0x4048
945#define AR_GPIO_IN_VAL 0x0FFFC000 983#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -950,19 +988,23 @@ enum {
950#define AR9285_GPIO_IN_VAL_S 12 988#define AR9285_GPIO_IN_VAL_S 12
951#define AR9287_GPIO_IN_VAL 0x003FF800 989#define AR9287_GPIO_IN_VAL 0x003FF800
952#define AR9287_GPIO_IN_VAL_S 11 990#define AR9287_GPIO_IN_VAL_S 11
991#define AR9271_GPIO_IN_VAL 0xFFFF0000
992#define AR9271_GPIO_IN_VAL_S 16
993#define AR9300_GPIO_IN_VAL 0x0001FFFF
994#define AR9300_GPIO_IN_VAL_S 0
953 995
954#define AR_GPIO_OE_OUT 0x404c 996#define AR_GPIO_OE_OUT (AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c)
955#define AR_GPIO_OE_OUT_DRV 0x3 997#define AR_GPIO_OE_OUT_DRV 0x3
956#define AR_GPIO_OE_OUT_DRV_NO 0x0 998#define AR_GPIO_OE_OUT_DRV_NO 0x0
957#define AR_GPIO_OE_OUT_DRV_LOW 0x1 999#define AR_GPIO_OE_OUT_DRV_LOW 0x1
958#define AR_GPIO_OE_OUT_DRV_HI 0x2 1000#define AR_GPIO_OE_OUT_DRV_HI 0x2
959#define AR_GPIO_OE_OUT_DRV_ALL 0x3 1001#define AR_GPIO_OE_OUT_DRV_ALL 0x3
960 1002
961#define AR_GPIO_INTR_POL 0x4050 1003#define AR_GPIO_INTR_POL (AR_SREV_9300_20_OR_LATER(ah) ? 0x4058 : 0x4050)
962#define AR_GPIO_INTR_POL_VAL 0x00001FFF 1004#define AR_GPIO_INTR_POL_VAL 0x0001FFFF
963#define AR_GPIO_INTR_POL_VAL_S 0 1005#define AR_GPIO_INTR_POL_VAL_S 0
964 1006
965#define AR_GPIO_INPUT_EN_VAL 0x4054 1007#define AR_GPIO_INPUT_EN_VAL (AR_SREV_9300_20_OR_LATER(ah) ? 0x405c : 0x4054)
966#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004 1008#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF 0x00000004
967#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2 1009#define AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_S 2
968#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008 1010#define AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF 0x00000008
@@ -980,13 +1022,13 @@ enum {
980#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000 1022#define AR_GPIO_RTC_RESET_OVERRIDE_ENABLE 0x00010000
981#define AR_GPIO_JTAG_DISABLE 0x00020000 1023#define AR_GPIO_JTAG_DISABLE 0x00020000
982 1024
983#define AR_GPIO_INPUT_MUX1 0x4058 1025#define AR_GPIO_INPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4060 : 0x4058)
984#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000 1026#define AR_GPIO_INPUT_MUX1_BT_ACTIVE 0x000f0000
985#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16 1027#define AR_GPIO_INPUT_MUX1_BT_ACTIVE_S 16
986#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00 1028#define AR_GPIO_INPUT_MUX1_BT_PRIORITY 0x00000f00
987#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8 1029#define AR_GPIO_INPUT_MUX1_BT_PRIORITY_S 8
988 1030
989#define AR_GPIO_INPUT_MUX2 0x405c 1031#define AR_GPIO_INPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4064 : 0x405c)
990#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f 1032#define AR_GPIO_INPUT_MUX2_CLK25 0x0000000f
991#define AR_GPIO_INPUT_MUX2_CLK25_S 0 1033#define AR_GPIO_INPUT_MUX2_CLK25_S 0
992#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0 1034#define AR_GPIO_INPUT_MUX2_RFSILENT 0x000000f0
@@ -994,13 +1036,13 @@ enum {
994#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00 1036#define AR_GPIO_INPUT_MUX2_RTC_RESET 0x00000f00
995#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8 1037#define AR_GPIO_INPUT_MUX2_RTC_RESET_S 8
996 1038
997#define AR_GPIO_OUTPUT_MUX1 0x4060 1039#define AR_GPIO_OUTPUT_MUX1 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4068 : 0x4060)
998#define AR_GPIO_OUTPUT_MUX2 0x4064 1040#define AR_GPIO_OUTPUT_MUX2 (AR_SREV_9300_20_OR_LATER(ah) ? 0x406c : 0x4064)
999#define AR_GPIO_OUTPUT_MUX3 0x4068 1041#define AR_GPIO_OUTPUT_MUX3 (AR_SREV_9300_20_OR_LATER(ah) ? 0x4070 : 0x4068)
1000 1042
1001#define AR_INPUT_STATE 0x406c 1043#define AR_INPUT_STATE (AR_SREV_9300_20_OR_LATER(ah) ? 0x4074 : 0x406c)
1002 1044
1003#define AR_EEPROM_STATUS_DATA 0x407c 1045#define AR_EEPROM_STATUS_DATA (AR_SREV_9300_20_OR_LATER(ah) ? 0x4084 : 0x407c)
1004#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff 1046#define AR_EEPROM_STATUS_DATA_VAL 0x0000ffff
1005#define AR_EEPROM_STATUS_DATA_VAL_S 0 1047#define AR_EEPROM_STATUS_DATA_VAL_S 0
1006#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000 1048#define AR_EEPROM_STATUS_DATA_BUSY 0x00010000
@@ -1008,13 +1050,24 @@ enum {
1008#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000 1050#define AR_EEPROM_STATUS_DATA_PROT_ACCESS 0x00040000
1009#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000 1051#define AR_EEPROM_STATUS_DATA_ABSENT_ACCESS 0x00080000
1010 1052
1011#define AR_OBS 0x4080 1053#define AR_OBS (AR_SREV_9300_20_OR_LATER(ah) ? 0x4088 : 0x4080)
1012 1054
1013#define AR_GPIO_PDPU 0x4088 1055#define AR_GPIO_PDPU (AR_SREV_9300_20_OR_LATER(ah) ? 0x4090 : 0x4088)
1014 1056
1015#define AR_PCIE_MSI 0x4094 1057#define AR_PCIE_MSI (AR_SREV_9300_20_OR_LATER(ah) ? 0x40a4 : 0x4094)
1016#define AR_PCIE_MSI_ENABLE 0x00000001 1058#define AR_PCIE_MSI_ENABLE 0x00000001
1017 1059
1060#define AR_INTR_PRIO_SYNC_ENABLE 0x40c4
1061#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
1062#define AR_INTR_PRIO_SYNC_MASK 0x40cc
1063#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
1064
1065#define AR_RTC_9300_PLL_DIV 0x000003ff
1066#define AR_RTC_9300_PLL_DIV_S 0
1067#define AR_RTC_9300_PLL_REFDIV 0x00003C00
1068#define AR_RTC_9300_PLL_REFDIV_S 10
1069#define AR_RTC_9300_PLL_CLKSEL 0x0000C000
1070#define AR_RTC_9300_PLL_CLKSEL_S 14
1018 1071
1019#define AR_RTC_9160_PLL_DIV 0x000003ff 1072#define AR_RTC_9160_PLL_DIV 0x000003ff
1020#define AR_RTC_9160_PLL_DIV_S 0 1073#define AR_RTC_9160_PLL_DIV_S 0
@@ -1032,6 +1085,16 @@ enum {
1032#define AR_RTC_RC_COLD_RESET 0x00000004 1085#define AR_RTC_RC_COLD_RESET 0x00000004
1033#define AR_RTC_RC_WARM_RESET 0x00000008 1086#define AR_RTC_RC_WARM_RESET 0x00000008
1034 1087
1088/* Crystal Control */
1089#define AR_RTC_XTAL_CONTROL 0x7004
1090
1091/* Reg Control 0 */
1092#define AR_RTC_REG_CONTROL0 0x7008
1093
1094/* Reg Control 1 */
1095#define AR_RTC_REG_CONTROL1 0x700c
1096#define AR_RTC_REG_CONTROL1_SWREG_PROGRAM 0x00000001
1097
1035#define AR_RTC_PLL_CONTROL \ 1098#define AR_RTC_PLL_CONTROL \
1036 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) 1099 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
1037 1100
@@ -1062,6 +1125,7 @@ enum {
1062#define AR_RTC_SLEEP_CLK \ 1125#define AR_RTC_SLEEP_CLK \
1063 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048) 1126 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0048) : 0x7048)
1064#define AR_RTC_FORCE_DERIVED_CLK 0x2 1127#define AR_RTC_FORCE_DERIVED_CLK 0x2
1128#define AR_RTC_FORCE_SWREG_PRD 0x00000004
1065 1129
1066#define AR_RTC_FORCE_WAKE \ 1130#define AR_RTC_FORCE_WAKE \
1067 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c) 1131 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x004c) : 0x704c)
@@ -1178,6 +1242,13 @@ enum {
1178#define AR9285_AN_RF2G4_DB2_4 0x00003800 1242#define AR9285_AN_RF2G4_DB2_4 0x00003800
1179#define AR9285_AN_RF2G4_DB2_4_S 11 1243#define AR9285_AN_RF2G4_DB2_4_S 11
1180 1244
1245#define AR9285_RF2G5 0x7830
1246#define AR9285_RF2G5_IC50TX 0xfffff8ff
1247#define AR9285_RF2G5_IC50TX_SET 0x00000400
1248#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
1249#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
1250#define AR9285_RF2G5_IC50TX_CLEAR_S 8
1251
1181/* AR9271 : 0x7828, 0x782c different setting from AR9285 */ 1252/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
1182#define AR9271_AN_RF2G3_OB_cck 0x001C0000 1253#define AR9271_AN_RF2G3_OB_cck 0x001C0000
1183#define AR9271_AN_RF2G3_OB_cck_S 18 1254#define AR9271_AN_RF2G3_OB_cck_S 18
@@ -1519,7 +1590,7 @@ enum {
1519#define AR_TSFOOR_THRESHOLD 0x813c 1590#define AR_TSFOOR_THRESHOLD 0x813c
1520#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF 1591#define AR_TSFOOR_THRESHOLD_VAL 0x0000FFFF
1521 1592
1522#define AR_PHY_ERR_EIFS_MASK 8144 1593#define AR_PHY_ERR_EIFS_MASK 0x8144
1523 1594
1524#define AR_PHY_ERR_3 0x8168 1595#define AR_PHY_ERR_3 0x8168
1525#define AR_PHY_ERR_3_COUNT 0x00FFFFFF 1596#define AR_PHY_ERR_3_COUNT 0x00FFFFFF
@@ -1585,24 +1656,26 @@ enum {
1585#define AR_FIRST_NDP_TIMER 7 1656#define AR_FIRST_NDP_TIMER 7
1586#define AR_NDP2_PERIOD 0x81a0 1657#define AR_NDP2_PERIOD 0x81a0
1587#define AR_NDP2_TIMER_MODE 0x81c0 1658#define AR_NDP2_TIMER_MODE 0x81c0
1588#define AR_NEXT_TBTT_TIMER 0x8200 1659
1589#define AR_NEXT_DMA_BEACON_ALERT 0x8204 1660#define AR_GEN_TIMERS(_i) (0x8200 + ((_i) << 2))
1590#define AR_NEXT_SWBA 0x8208 1661#define AR_NEXT_TBTT_TIMER AR_GEN_TIMERS(0)
1591#define AR_NEXT_CFP 0x8208 1662#define AR_NEXT_DMA_BEACON_ALERT AR_GEN_TIMERS(1)
1592#define AR_NEXT_HCF 0x820C 1663#define AR_NEXT_SWBA AR_GEN_TIMERS(2)
1593#define AR_NEXT_TIM 0x8210 1664#define AR_NEXT_CFP AR_GEN_TIMERS(2)
1594#define AR_NEXT_DTIM 0x8214 1665#define AR_NEXT_HCF AR_GEN_TIMERS(3)
1595#define AR_NEXT_QUIET_TIMER 0x8218 1666#define AR_NEXT_TIM AR_GEN_TIMERS(4)
1596#define AR_NEXT_NDP_TIMER 0x821C 1667#define AR_NEXT_DTIM AR_GEN_TIMERS(5)
1597 1668#define AR_NEXT_QUIET_TIMER AR_GEN_TIMERS(6)
1598#define AR_BEACON_PERIOD 0x8220 1669#define AR_NEXT_NDP_TIMER AR_GEN_TIMERS(7)
1599#define AR_DMA_BEACON_PERIOD 0x8224 1670
1600#define AR_SWBA_PERIOD 0x8228 1671#define AR_BEACON_PERIOD AR_GEN_TIMERS(8)
1601#define AR_HCF_PERIOD 0x822C 1672#define AR_DMA_BEACON_PERIOD AR_GEN_TIMERS(9)
1602#define AR_TIM_PERIOD 0x8230 1673#define AR_SWBA_PERIOD AR_GEN_TIMERS(10)
1603#define AR_DTIM_PERIOD 0x8234 1674#define AR_HCF_PERIOD AR_GEN_TIMERS(11)
1604#define AR_QUIET_PERIOD 0x8238 1675#define AR_TIM_PERIOD AR_GEN_TIMERS(12)
1605#define AR_NDP_PERIOD 0x823C 1676#define AR_DTIM_PERIOD AR_GEN_TIMERS(13)
1677#define AR_QUIET_PERIOD AR_GEN_TIMERS(14)
1678#define AR_NDP_PERIOD AR_GEN_TIMERS(15)
1606 1679
1607#define AR_TIMER_MODE 0x8240 1680#define AR_TIMER_MODE 0x8240
1608#define AR_TBTT_TIMER_EN 0x00000001 1681#define AR_TBTT_TIMER_EN 0x00000001
@@ -1716,4 +1789,32 @@ enum {
1716#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */ 1789#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
1717#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */ 1790#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
1718 1791
1792#define AR_AGG_WEP_ENABLE_FIX 0x00000008 /* This allows the use of AR_AGG_WEP_ENABLE */
1793#define AR_ADHOC_MCAST_KEYID_ENABLE 0x00000040 /* This bit enables the Multicast search
1794 * based on both MAC Address and Key ID.
1795 * If bit is 0, then Multicast search is
1796 * based on MAC address only.
1797 * For Merlin and above only.
1798 */
1799#define AR_AGG_WEP_ENABLE 0x00020000 /* This field enables AGG_WEP feature,
1800 * when it is enable, AGG_WEP would takes
1801 * charge of the encryption interface of
1802 * pcu_txsm.
1803 */
1804
1805#define AR9300_SM_BASE 0xa200
1806#define AR9002_PHY_AGC_CONTROL 0x9860
1807#define AR9003_PHY_AGC_CONTROL AR9300_SM_BASE + 0xc4
1808#define AR_PHY_AGC_CONTROL (AR_SREV_9300_20_OR_LATER(ah) ? AR9003_PHY_AGC_CONTROL : AR9002_PHY_AGC_CONTROL)
1809#define AR_PHY_AGC_CONTROL_CAL 0x00000001 /* do internal calibration */
1810#define AR_PHY_AGC_CONTROL_NF 0x00000002 /* do noise-floor calibration */
1811#define AR_PHY_AGC_CONTROL_OFFSET_CAL 0x00000800 /* allow offset calibration */
1812#define AR_PHY_AGC_CONTROL_ENABLE_NF 0x00008000 /* enable noise floor calibration to happen */
1813#define AR_PHY_AGC_CONTROL_FLTR_CAL 0x00010000 /* allow tx filter calibration */
1814#define AR_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 /* don't update noise floor automatically */
1815#define AR_PHY_AGC_CONTROL_EXT_NF_PWR_MEAS 0x00040000 /* extend noise floor power measurement */
1816#define AR_PHY_AGC_CONTROL_CLC_SUCCESS 0x00080000 /* carrier leak calibration done */
1817#define AR_PHY_AGC_CONTROL_YCOK_MAX 0x000003c0
1818#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
1819
1719#endif 1820#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 00c0e21a4af7..105ad40968f6 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
220 220
221 memset(&txctl, 0, sizeof(struct ath_tx_control)); 221 memset(&txctl, 0, sizeof(struct ath_tx_control));
222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; 222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
223 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE; 223 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
224 224
225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0) 225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
226 goto exit; 226 goto exit;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 000000000000..e23172c9caaf
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,336 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
20{
21 switch (wmi_cmd) {
22 case WMI_ECHO_CMDID:
23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID:
25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_DISABLE_INTR_CMDID:
27 return "WMI_DISABLE_INTR_CMDID";
28 case WMI_ENABLE_INTR_CMDID:
29 return "WMI_ENABLE_INTR_CMDID";
30 case WMI_RX_LINK_CMDID:
31 return "WMI_RX_LINK_CMDID";
32 case WMI_ATH_INIT_CMDID:
33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID:
35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID:
37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_STOP_DMA_RECV_CMDID:
39 return "WMI_STOP_DMA_RECV_CMDID";
40 case WMI_ABORT_TX_DMA_CMDID:
41 return "WMI_ABORT_TX_DMA_CMDID";
42 case WMI_DRAIN_TXQ_CMDID:
43 return "WMI_DRAIN_TXQ_CMDID";
44 case WMI_DRAIN_TXQ_ALL_CMDID:
45 return "WMI_DRAIN_TXQ_ALL_CMDID";
46 case WMI_START_RECV_CMDID:
47 return "WMI_START_RECV_CMDID";
48 case WMI_STOP_RECV_CMDID:
49 return "WMI_STOP_RECV_CMDID";
50 case WMI_FLUSH_RECV_CMDID:
51 return "WMI_FLUSH_RECV_CMDID";
52 case WMI_SET_MODE_CMDID:
53 return "WMI_SET_MODE_CMDID";
54 case WMI_RESET_CMDID:
55 return "WMI_RESET_CMDID";
56 case WMI_NODE_CREATE_CMDID:
57 return "WMI_NODE_CREATE_CMDID";
58 case WMI_NODE_REMOVE_CMDID:
59 return "WMI_NODE_REMOVE_CMDID";
60 case WMI_VAP_REMOVE_CMDID:
61 return "WMI_VAP_REMOVE_CMDID";
62 case WMI_VAP_CREATE_CMDID:
63 return "WMI_VAP_CREATE_CMDID";
64 case WMI_BEACON_UPDATE_CMDID:
65 return "WMI_BEACON_UPDATE_CMDID";
66 case WMI_REG_READ_CMDID:
67 return "WMI_REG_READ_CMDID";
68 case WMI_REG_WRITE_CMDID:
69 return "WMI_REG_WRITE_CMDID";
70 case WMI_RC_STATE_CHANGE_CMDID:
71 return "WMI_RC_STATE_CHANGE_CMDID";
72 case WMI_RC_RATE_UPDATE_CMDID:
73 return "WMI_RC_RATE_UPDATE_CMDID";
74 case WMI_DEBUG_INFO_CMDID:
75 return "WMI_DEBUG_INFO_CMDID";
76 case WMI_HOST_ATTACH:
77 return "WMI_HOST_ATTACH";
78 case WMI_TARGET_IC_UPDATE_CMDID:
79 return "WMI_TARGET_IC_UPDATE_CMDID";
80 case WMI_TGT_STATS_CMDID:
81 return "WMI_TGT_STATS_CMDID";
82 case WMI_TX_AGGR_ENABLE_CMDID:
83 return "WMI_TX_AGGR_ENABLE_CMDID";
84 case WMI_TGT_DETACH_CMDID:
85 return "WMI_TGT_DETACH_CMDID";
86 case WMI_TGT_TXQ_ENABLE_CMDID:
87 return "WMI_TGT_TXQ_ENABLE_CMDID";
88 }
89
90 return "Bogus";
91}
92
93struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
94{
95 struct wmi *wmi;
96
97 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
98 if (!wmi)
99 return NULL;
100
101 wmi->drv_priv = priv;
102 wmi->stopped = false;
103 mutex_init(&wmi->op_mutex);
104 mutex_init(&wmi->multi_write_mutex);
105 init_completion(&wmi->cmd_wait);
106
107 return wmi;
108}
109
110void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
111{
112 struct wmi *wmi = priv->wmi;
113
114 mutex_lock(&wmi->op_mutex);
115 wmi->stopped = true;
116 mutex_unlock(&wmi->op_mutex);
117
118 kfree(priv->wmi);
119}
120
121void ath9k_wmi_tasklet(unsigned long data)
122{
123 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
124 struct ath_common *common = ath9k_hw_common(priv->ah);
125 struct wmi_cmd_hdr *hdr;
126 struct wmi_swba *swba_hdr;
127 enum wmi_event_id event;
128 struct sk_buff *skb;
129 void *wmi_event;
130 unsigned long flags;
131#ifdef CONFIG_ATH9K_HTC_DEBUGFS
132 __be32 txrate;
133#endif
134
135 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
136 skb = priv->wmi->wmi_skb;
137 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
138
139 hdr = (struct wmi_cmd_hdr *) skb->data;
140 event = be16_to_cpu(hdr->command_id);
141 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
142
143 ath_print(common, ATH_DBG_WMI,
144 "WMI Event: 0x%x\n", event);
145
146 switch (event) {
147 case WMI_TGT_RDY_EVENTID:
148 break;
149 case WMI_SWBA_EVENTID:
150 swba_hdr = (struct wmi_swba *) wmi_event;
151 ath9k_htc_swba(priv, swba_hdr->beacon_pending);
152 break;
153 case WMI_FATAL_EVENTID:
154 break;
155 case WMI_TXTO_EVENTID:
156 break;
157 case WMI_BMISS_EVENTID:
158 break;
159 case WMI_WLAN_TXCOMP_EVENTID:
160 break;
161 case WMI_DELBA_EVENTID:
162 break;
163 case WMI_TXRATE_EVENTID:
164#ifdef CONFIG_ATH9K_HTC_DEBUGFS
165 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
166 priv->debug.txrate = be32_to_cpu(txrate);
167#endif
168 break;
169 default:
170 break;
171 }
172
173 kfree_skb(skb);
174}
175
176static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
177{
178 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
179
180 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
181 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
182
183 complete(&wmi->cmd_wait);
184}
185
186static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
187 enum htc_endpoint_id epid)
188{
189 struct wmi *wmi = (struct wmi *) priv;
190 struct wmi_cmd_hdr *hdr;
191 u16 cmd_id;
192
193 if (unlikely(wmi->stopped))
194 goto free_skb;
195
196 hdr = (struct wmi_cmd_hdr *) skb->data;
197 cmd_id = be16_to_cpu(hdr->command_id);
198
199 if (cmd_id & 0x1000) {
200 spin_lock(&wmi->wmi_lock);
201 wmi->wmi_skb = skb;
202 spin_unlock(&wmi->wmi_lock);
203 tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
204 return;
205 }
206
207 /* Check if there has been a timeout. */
208 spin_lock(&wmi->wmi_lock);
209 if (cmd_id != wmi->last_cmd_id) {
210 spin_unlock(&wmi->wmi_lock);
211 goto free_skb;
212 }
213 spin_unlock(&wmi->wmi_lock);
214
215 /* WMI command response */
216 ath9k_wmi_rsp_callback(wmi, skb);
217
218free_skb:
219 kfree_skb(skb);
220}
221
222static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
223 enum htc_endpoint_id epid, bool txok)
224{
225 kfree_skb(skb);
226}
227
228int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
229 enum htc_endpoint_id *wmi_ctrl_epid)
230{
231 struct htc_service_connreq connect;
232 int ret;
233
234 wmi->htc = htc;
235
236 memset(&connect, 0, sizeof(connect));
237
238 connect.ep_callbacks.priv = wmi;
239 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
240 connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
241 connect.service_id = WMI_CONTROL_SVC;
242
243 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
244 if (ret)
245 return ret;
246
247 *wmi_ctrl_epid = wmi->ctrl_epid;
248
249 return 0;
250}
251
252static int ath9k_wmi_cmd_issue(struct wmi *wmi,
253 struct sk_buff *skb,
254 enum wmi_cmd_id cmd, u16 len)
255{
256 struct wmi_cmd_hdr *hdr;
257
258 hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
259 hdr->command_id = cpu_to_be16(cmd);
260 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
261
262 return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL);
263}
264
265int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
266 u8 *cmd_buf, u32 cmd_len,
267 u8 *rsp_buf, u32 rsp_len,
268 u32 timeout)
269{
270 struct ath_hw *ah = wmi->drv_priv->ah;
271 struct ath_common *common = ath9k_hw_common(ah);
272 u16 headroom = sizeof(struct htc_frame_hdr) +
273 sizeof(struct wmi_cmd_hdr);
274 struct sk_buff *skb;
275 u8 *data;
276 int time_left, ret = 0;
277 unsigned long flags;
278
279 if (wmi->drv_priv->op_flags & OP_UNPLUGGED)
280 return 0;
281
282 if (!wmi)
283 return -EINVAL;
284
285 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
286 if (!skb)
287 return -ENOMEM;
288
289 skb_reserve(skb, headroom);
290
291 if (cmd_len != 0 && cmd_buf != NULL) {
292 data = (u8 *) skb_put(skb, cmd_len);
293 memcpy(data, cmd_buf, cmd_len);
294 }
295
296 mutex_lock(&wmi->op_mutex);
297
298 /* check if wmi stopped flag is set */
299 if (unlikely(wmi->stopped)) {
300 ret = -EPROTO;
301 goto out;
302 }
303
304 /* record the rsp buffer and length */
305 wmi->cmd_rsp_buf = rsp_buf;
306 wmi->cmd_rsp_len = rsp_len;
307
308 spin_lock_irqsave(&wmi->wmi_lock, flags);
309 wmi->last_cmd_id = cmd_id;
310 spin_unlock_irqrestore(&wmi->wmi_lock, flags);
311
312 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
313 if (ret)
314 goto out;
315
316 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
317 if (!time_left) {
318 ath_print(common, ATH_DBG_WMI,
319 "Timeout waiting for WMI command: %s\n",
320 wmi_cmd_to_name(cmd_id));
321 mutex_unlock(&wmi->op_mutex);
322 return -ETIMEDOUT;
323 }
324
325 mutex_unlock(&wmi->op_mutex);
326
327 return 0;
328
329out:
330 ath_print(common, ATH_DBG_WMI,
331 "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
332 mutex_unlock(&wmi->op_mutex);
333 kfree_skb(skb);
334
335 return ret;
336}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 000000000000..765db5faa2d3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,139 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef WMI_H
18#define WMI_H
19
20
21struct wmi_event_txrate {
22 __be32 txrate;
23 struct {
24 u8 rssi_thresh;
25 u8 per;
26 } rc_stats;
27} __packed;
28
29struct wmi_cmd_hdr {
30 __be16 command_id;
31 __be16 seq_no;
32} __packed;
33
34struct wmi_swba {
35 u8 beacon_pending;
36} __packed;
37
38enum wmi_cmd_id {
39 WMI_ECHO_CMDID = 0x0001,
40 WMI_ACCESS_MEMORY_CMDID,
41
42 /* Commands to Target */
43 WMI_DISABLE_INTR_CMDID,
44 WMI_ENABLE_INTR_CMDID,
45 WMI_RX_LINK_CMDID,
46 WMI_ATH_INIT_CMDID,
47 WMI_ABORT_TXQ_CMDID,
48 WMI_STOP_TX_DMA_CMDID,
49 WMI_STOP_DMA_RECV_CMDID,
50 WMI_ABORT_TX_DMA_CMDID,
51 WMI_DRAIN_TXQ_CMDID,
52 WMI_DRAIN_TXQ_ALL_CMDID,
53 WMI_START_RECV_CMDID,
54 WMI_STOP_RECV_CMDID,
55 WMI_FLUSH_RECV_CMDID,
56 WMI_SET_MODE_CMDID,
57 WMI_RESET_CMDID,
58 WMI_NODE_CREATE_CMDID,
59 WMI_NODE_REMOVE_CMDID,
60 WMI_VAP_REMOVE_CMDID,
61 WMI_VAP_CREATE_CMDID,
62 WMI_BEACON_UPDATE_CMDID,
63 WMI_REG_READ_CMDID,
64 WMI_REG_WRITE_CMDID,
65 WMI_RC_STATE_CHANGE_CMDID,
66 WMI_RC_RATE_UPDATE_CMDID,
67 WMI_DEBUG_INFO_CMDID,
68 WMI_HOST_ATTACH,
69 WMI_TARGET_IC_UPDATE_CMDID,
70 WMI_TGT_STATS_CMDID,
71 WMI_TX_AGGR_ENABLE_CMDID,
72 WMI_TGT_DETACH_CMDID,
73 WMI_TGT_TXQ_ENABLE_CMDID,
74};
75
76enum wmi_event_id {
77 WMI_TGT_RDY_EVENTID = 0x1001,
78 WMI_SWBA_EVENTID,
79 WMI_FATAL_EVENTID,
80 WMI_TXTO_EVENTID,
81 WMI_BMISS_EVENTID,
82 WMI_WLAN_TXCOMP_EVENTID,
83 WMI_DELBA_EVENTID,
84 WMI_TXRATE_EVENTID,
85};
86
87#define MAX_CMD_NUMBER 62
88
89struct register_write {
90 __be32 reg;
91 __be32 val;
92};
93
94struct wmi {
95 struct ath9k_htc_priv *drv_priv;
96 struct htc_target *htc;
97 enum htc_endpoint_id ctrl_epid;
98 struct mutex op_mutex;
99 struct completion cmd_wait;
100 enum wmi_cmd_id last_cmd_id;
101 u16 tx_seq_id;
102 u8 *cmd_rsp_buf;
103 u32 cmd_rsp_len;
104 bool stopped;
105
106 struct sk_buff *wmi_skb;
107 spinlock_t wmi_lock;
108
109 atomic_t mwrite_cnt;
110 struct register_write multi_write[MAX_CMD_NUMBER];
111 u32 multi_write_idx;
112 struct mutex multi_write_mutex;
113};
114
115struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
116void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
117int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
118 enum htc_endpoint_id *wmi_ctrl_epid);
119int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
120 u8 *cmd_buf, u32 cmd_len,
121 u8 *rsp_buf, u32 rsp_len,
122 u32 timeout);
123void ath9k_wmi_tasklet(unsigned long data);
124
125#define WMI_CMD(_wmi_cmd) \
126 do { \
127 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
128 (u8 *) &cmd_rsp, \
129 sizeof(cmd_rsp), HZ*2); \
130 } while (0)
131
132#define WMI_CMD_BUF(_wmi_cmd, _buf) \
133 do { \
134 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
135 (u8 *) _buf, sizeof(*_buf), \
136 &cmd_rsp, sizeof(cmd_rsp), HZ*2); \
137 } while (0)
138
139#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 294b486bc3ed..3db19172b43b 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -15,10 +15,11 @@
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "ath9k.h"
18#include "ar9003_mac.h"
18 19
19#define BITS_PER_BYTE 8 20#define BITS_PER_BYTE 8
20#define OFDM_PLCP_BITS 22 21#define OFDM_PLCP_BITS 22
21#define HT_RC_2_MCS(_rc) ((_rc) & 0x0f) 22#define HT_RC_2_MCS(_rc) ((_rc) & 0x1f)
22#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1) 23#define HT_RC_2_STREAMS(_rc) ((((_rc) & 0x78) >> 3) + 1)
23#define L_STF 8 24#define L_STF 8
24#define L_LTF 8 25#define L_LTF 8
@@ -33,7 +34,7 @@
33 34
34#define OFDM_SIFS_TIME 16 35#define OFDM_SIFS_TIME 16
35 36
36static u32 bits_per_symbol[][2] = { 37static u16 bits_per_symbol[][2] = {
37 /* 20MHz 40MHz */ 38 /* 20MHz 40MHz */
38 { 26, 54 }, /* 0: BPSK */ 39 { 26, 54 }, /* 0: BPSK */
39 { 52, 108 }, /* 1: QPSK 1/2 */ 40 { 52, 108 }, /* 1: QPSK 1/2 */
@@ -43,14 +44,6 @@ static u32 bits_per_symbol[][2] = {
43 { 208, 432 }, /* 5: 64-QAM 2/3 */ 44 { 208, 432 }, /* 5: 64-QAM 2/3 */
44 { 234, 486 }, /* 6: 64-QAM 3/4 */ 45 { 234, 486 }, /* 6: 64-QAM 3/4 */
45 { 260, 540 }, /* 7: 64-QAM 5/6 */ 46 { 260, 540 }, /* 7: 64-QAM 5/6 */
46 { 52, 108 }, /* 8: BPSK */
47 { 104, 216 }, /* 9: QPSK 1/2 */
48 { 156, 324 }, /* 10: QPSK 3/4 */
49 { 208, 432 }, /* 11: 16-QAM 1/2 */
50 { 312, 648 }, /* 12: 16-QAM 3/4 */
51 { 416, 864 }, /* 13: 64-QAM 2/3 */
52 { 468, 972 }, /* 14: 64-QAM 3/4 */
53 { 520, 1080 }, /* 15: 64-QAM 5/6 */
54}; 47};
55 48
56#define IS_HT_RATE(_rate) ((_rate) & 0x80) 49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
@@ -59,40 +52,50 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
59 struct ath_atx_tid *tid, 52 struct ath_atx_tid *tid,
60 struct list_head *bf_head); 53 struct list_head *bf_head);
61static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
62 struct ath_txq *txq, 55 struct ath_txq *txq, struct list_head *bf_q,
63 struct list_head *bf_q, 56 struct ath_tx_status *ts, int txok, int sendbar);
64 int txok, int sendbar);
65static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
66 struct list_head *head); 58 struct list_head *head);
67static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
68static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
69 int txok); 61 struct ath_tx_status *ts, int txok);
70static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
71 int nbad, int txok, bool update_rc); 63 int nbad, int txok, bool update_rc);
72 64
73enum { 65enum {
74 MCS_DEFAULT, 66 MCS_HT20,
67 MCS_HT20_SGI,
75 MCS_HT40, 68 MCS_HT40,
76 MCS_HT40_SGI, 69 MCS_HT40_SGI,
77}; 70};
78 71
79static int ath_max_4ms_framelen[3][16] = { 72static int ath_max_4ms_framelen[4][32] = {
80 [MCS_DEFAULT] = { 73 [MCS_HT20] = {
81 3216, 6434, 9650, 12868, 19304, 25740, 28956, 32180, 74 3212, 6432, 9648, 12864, 19300, 25736, 28952, 32172,
82 6430, 12860, 19300, 25736, 38600, 51472, 57890, 64320, 75 6424, 12852, 19280, 25708, 38568, 51424, 57852, 64280,
76 9628, 19260, 28896, 38528, 57792, 65532, 65532, 65532,
77 12828, 25656, 38488, 51320, 65532, 65532, 65532, 65532,
78 },
79 [MCS_HT20_SGI] = {
80 3572, 7144, 10720, 14296, 21444, 28596, 32172, 35744,
81 7140, 14284, 21428, 28568, 42856, 57144, 64288, 65532,
82 10700, 21408, 32112, 42816, 64228, 65532, 65532, 65532,
83 14256, 28516, 42780, 57040, 65532, 65532, 65532, 65532,
83 }, 84 },
84 [MCS_HT40] = { 85 [MCS_HT40] = {
85 6684, 13368, 20052, 26738, 40104, 53476, 60156, 66840, 86 6680, 13360, 20044, 26724, 40092, 53456, 60140, 65532,
86 13360, 26720, 40080, 53440, 80160, 106880, 120240, 133600, 87 13348, 26700, 40052, 53400, 65532, 65532, 65532, 65532,
88 20004, 40008, 60016, 65532, 65532, 65532, 65532, 65532,
89 26644, 53292, 65532, 65532, 65532, 65532, 65532, 65532,
87 }, 90 },
88 [MCS_HT40_SGI] = { 91 [MCS_HT40_SGI] = {
89 /* TODO: Only MCS 7 and 15 updated, recalculate the rest */ 92 7420, 14844, 22272, 29696, 44544, 59396, 65532, 65532,
90 6684, 13368, 20052, 26738, 40104, 53476, 60156, 74200, 93 14832, 29668, 44504, 59340, 65532, 65532, 65532, 65532,
91 13360, 26720, 40080, 53440, 80160, 106880, 120240, 148400, 94 22232, 44464, 65532, 65532, 65532, 65532, 65532, 65532,
95 29616, 59232, 65532, 65532, 65532, 65532, 65532, 65532,
92 } 96 }
93}; 97};
94 98
95
96/*********************/ 99/*********************/
97/* Aggregation logic */ 100/* Aggregation logic */
98/*********************/ 101/*********************/
@@ -223,6 +226,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
223{ 226{
224 struct ath_buf *bf; 227 struct ath_buf *bf;
225 struct list_head bf_head; 228 struct list_head bf_head;
229 struct ath_tx_status ts;
230
231 memset(&ts, 0, sizeof(ts));
226 INIT_LIST_HEAD(&bf_head); 232 INIT_LIST_HEAD(&bf_head);
227 233
228 for (;;) { 234 for (;;) {
@@ -236,7 +242,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
236 ath_tx_update_baw(sc, tid, bf->bf_seqno); 242 ath_tx_update_baw(sc, tid, bf->bf_seqno);
237 243
238 spin_unlock(&txq->axq_lock); 244 spin_unlock(&txq->axq_lock);
239 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 245 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
240 spin_lock(&txq->axq_lock); 246 spin_lock(&txq->axq_lock);
241 } 247 }
242 248
@@ -259,25 +265,46 @@ static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
259 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 265 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
260} 266}
261 267
262static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf) 268static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc)
263{ 269{
264 struct ath_buf *tbf; 270 struct ath_buf *bf = NULL;
265 271
266 spin_lock_bh(&sc->tx.txbuflock); 272 spin_lock_bh(&sc->tx.txbuflock);
267 if (WARN_ON(list_empty(&sc->tx.txbuf))) { 273
274 if (unlikely(list_empty(&sc->tx.txbuf))) {
268 spin_unlock_bh(&sc->tx.txbuflock); 275 spin_unlock_bh(&sc->tx.txbuflock);
269 return NULL; 276 return NULL;
270 } 277 }
271 tbf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); 278
272 list_del(&tbf->list); 279 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
280 list_del(&bf->list);
281
273 spin_unlock_bh(&sc->tx.txbuflock); 282 spin_unlock_bh(&sc->tx.txbuflock);
274 283
284 return bf;
285}
286
287static void ath_tx_return_buffer(struct ath_softc *sc, struct ath_buf *bf)
288{
289 spin_lock_bh(&sc->tx.txbuflock);
290 list_add_tail(&bf->list, &sc->tx.txbuf);
291 spin_unlock_bh(&sc->tx.txbuflock);
292}
293
294static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
295{
296 struct ath_buf *tbf;
297
298 tbf = ath_tx_get_buffer(sc);
299 if (WARN_ON(!tbf))
300 return NULL;
301
275 ATH_TXBUF_RESET(tbf); 302 ATH_TXBUF_RESET(tbf);
276 303
277 tbf->aphy = bf->aphy; 304 tbf->aphy = bf->aphy;
278 tbf->bf_mpdu = bf->bf_mpdu; 305 tbf->bf_mpdu = bf->bf_mpdu;
279 tbf->bf_buf_addr = bf->bf_buf_addr; 306 tbf->bf_buf_addr = bf->bf_buf_addr;
280 *(tbf->bf_desc) = *(bf->bf_desc); 307 memcpy(tbf->bf_desc, bf->bf_desc, sc->sc_ah->caps.tx_desc_len);
281 tbf->bf_state = bf->bf_state; 308 tbf->bf_state = bf->bf_state;
282 tbf->bf_dmacontext = bf->bf_dmacontext; 309 tbf->bf_dmacontext = bf->bf_dmacontext;
283 310
@@ -286,7 +313,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
286 313
287static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 314static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
288 struct ath_buf *bf, struct list_head *bf_q, 315 struct ath_buf *bf, struct list_head *bf_q,
289 int txok) 316 struct ath_tx_status *ts, int txok)
290{ 317{
291 struct ath_node *an = NULL; 318 struct ath_node *an = NULL;
292 struct sk_buff *skb; 319 struct sk_buff *skb;
@@ -296,7 +323,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
296 struct ieee80211_tx_info *tx_info; 323 struct ieee80211_tx_info *tx_info;
297 struct ath_atx_tid *tid = NULL; 324 struct ath_atx_tid *tid = NULL;
298 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 325 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
299 struct ath_desc *ds = bf_last->bf_desc;
300 struct list_head bf_head, bf_pending; 326 struct list_head bf_head, bf_pending;
301 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 327 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
302 u32 ba[WME_BA_BMP_SIZE >> 5]; 328 u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +351,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
325 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 351 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
326 352
327 if (isaggr && txok) { 353 if (isaggr && txok) {
328 if (ATH_DS_TX_BA(ds)) { 354 if (ts->ts_flags & ATH9K_TX_BA) {
329 seq_st = ATH_DS_BA_SEQ(ds); 355 seq_st = ts->ts_seqnum;
330 memcpy(ba, ATH_DS_BA_BITMAP(ds), 356 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
331 WME_BA_BMP_SIZE >> 3);
332 } else { 357 } else {
333 /* 358 /*
334 * AR5416 can become deaf/mute when BA 359 * AR5416 can become deaf/mute when BA
@@ -345,7 +370,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
345 INIT_LIST_HEAD(&bf_pending); 370 INIT_LIST_HEAD(&bf_pending);
346 INIT_LIST_HEAD(&bf_head); 371 INIT_LIST_HEAD(&bf_head);
347 372
348 nbad = ath_tx_num_badfrms(sc, bf, txok); 373 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
349 while (bf) { 374 while (bf) {
350 txfail = txpending = 0; 375 txfail = txpending = 0;
351 bf_next = bf->bf_next; 376 bf_next = bf->bf_next;
@@ -359,7 +384,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
359 acked_cnt++; 384 acked_cnt++;
360 } else { 385 } else {
361 if (!(tid->state & AGGR_CLEANUP) && 386 if (!(tid->state & AGGR_CLEANUP) &&
362 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 387 !bf_last->bf_tx_aborted) {
363 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 388 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
364 ath_tx_set_retry(sc, txq, bf); 389 ath_tx_set_retry(sc, txq, bf);
365 txpending = 1; 390 txpending = 1;
@@ -378,7 +403,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
378 } 403 }
379 } 404 }
380 405
381 if (bf_next == NULL) { 406 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
407 bf_next == NULL) {
382 /* 408 /*
383 * Make sure the last desc is reclaimed if it 409 * Make sure the last desc is reclaimed if it
384 * not a holding desc. 410 * not a holding desc.
@@ -402,45 +428,53 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
402 spin_unlock_bh(&txq->axq_lock); 428 spin_unlock_bh(&txq->axq_lock);
403 429
404 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 430 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
405 ath_tx_rc_status(bf, ds, nbad, txok, true); 431 ath_tx_rc_status(bf, ts, nbad, txok, true);
406 rc_update = false; 432 rc_update = false;
407 } else { 433 } else {
408 ath_tx_rc_status(bf, ds, nbad, txok, false); 434 ath_tx_rc_status(bf, ts, nbad, txok, false);
409 } 435 }
410 436
411 ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar); 437 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
438 !txfail, sendbar);
412 } else { 439 } else {
413 /* retry the un-acked ones */ 440 /* retry the un-acked ones */
414 if (bf->bf_next == NULL && bf_last->bf_stale) { 441 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
415 struct ath_buf *tbf; 442 if (bf->bf_next == NULL && bf_last->bf_stale) {
416 443 struct ath_buf *tbf;
417 tbf = ath_clone_txbuf(sc, bf_last); 444
418 /* 445 tbf = ath_clone_txbuf(sc, bf_last);
419 * Update tx baw and complete the frame with 446 /*
420 * failed status if we run out of tx buf 447 * Update tx baw and complete the
421 */ 448 * frame with failed status if we
422 if (!tbf) { 449 * run out of tx buf.
423 spin_lock_bh(&txq->axq_lock); 450 */
424 ath_tx_update_baw(sc, tid, 451 if (!tbf) {
425 bf->bf_seqno); 452 spin_lock_bh(&txq->axq_lock);
426 spin_unlock_bh(&txq->axq_lock); 453 ath_tx_update_baw(sc, tid,
427 454 bf->bf_seqno);
428 bf->bf_state.bf_type |= BUF_XRETRY; 455 spin_unlock_bh(&txq->axq_lock);
429 ath_tx_rc_status(bf, ds, nbad, 456
430 0, false); 457 bf->bf_state.bf_type |=
431 ath_tx_complete_buf(sc, bf, txq, 458 BUF_XRETRY;
432 &bf_head, 0, 0); 459 ath_tx_rc_status(bf, ts, nbad,
433 break; 460 0, false);
461 ath_tx_complete_buf(sc, bf, txq,
462 &bf_head,
463 ts, 0, 0);
464 break;
465 }
466
467 ath9k_hw_cleartxdesc(sc->sc_ah,
468 tbf->bf_desc);
469 list_add_tail(&tbf->list, &bf_head);
470 } else {
471 /*
472 * Clear descriptor status words for
473 * software retry
474 */
475 ath9k_hw_cleartxdesc(sc->sc_ah,
476 bf->bf_desc);
434 } 477 }
435
436 ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
437 list_add_tail(&tbf->list, &bf_head);
438 } else {
439 /*
440 * Clear descriptor status words for
441 * software retry
442 */
443 ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
444 } 478 }
445 479
446 /* 480 /*
@@ -508,12 +542,13 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
508 break; 542 break;
509 } 543 }
510 544
511 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI) 545 if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
512 modeidx = MCS_HT40_SGI;
513 else if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
514 modeidx = MCS_HT40; 546 modeidx = MCS_HT40;
515 else 547 else
516 modeidx = MCS_DEFAULT; 548 modeidx = MCS_HT20;
549
550 if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
551 modeidx++;
517 552
518 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx]; 553 frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
519 max_4ms_framelen = min(max_4ms_framelen, frmlen); 554 max_4ms_framelen = min(max_4ms_framelen, frmlen);
@@ -558,7 +593,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
558 u32 nsymbits, nsymbols; 593 u32 nsymbits, nsymbols;
559 u16 minlen; 594 u16 minlen;
560 u8 flags, rix; 595 u8 flags, rix;
561 int width, half_gi, ndelim, mindelim; 596 int width, streams, half_gi, ndelim, mindelim;
562 597
563 /* Select standard number of delimiters based on frame length alone */ 598 /* Select standard number of delimiters based on frame length alone */
564 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 599 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -598,7 +633,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
598 if (nsymbols == 0) 633 if (nsymbols == 0)
599 nsymbols = 1; 634 nsymbols = 1;
600 635
601 nsymbits = bits_per_symbol[rix][width]; 636 streams = HT_RC_2_STREAMS(rix);
637 nsymbits = bits_per_symbol[rix % 8][width] * streams;
602 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE; 638 minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
603 639
604 if (frmlen < minlen) { 640 if (frmlen < minlen) {
@@ -664,7 +700,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
664 bpad = PADBYTES(al_delta) + (ndelim << 2); 700 bpad = PADBYTES(al_delta) + (ndelim << 2);
665 701
666 bf->bf_next = NULL; 702 bf->bf_next = NULL;
667 bf->bf_desc->ds_link = 0; 703 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
668 704
669 /* link buffers of this frame to the aggregate */ 705 /* link buffers of this frame to the aggregate */
670 ath_tx_addto_baw(sc, tid, bf); 706 ath_tx_addto_baw(sc, tid, bf);
@@ -672,7 +708,8 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
672 list_move_tail(&bf->list, bf_q); 708 list_move_tail(&bf->list, bf_q);
673 if (bf_prev) { 709 if (bf_prev) {
674 bf_prev->bf_next = bf; 710 bf_prev->bf_next = bf;
675 bf_prev->bf_desc->ds_link = bf->bf_daddr; 711 ath9k_hw_set_desc_link(sc->sc_ah, bf_prev->bf_desc,
712 bf->bf_daddr);
676 } 713 }
677 bf_prev = bf; 714 bf_prev = bf;
678 715
@@ -752,8 +789,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
752 struct ath_node *an = (struct ath_node *)sta->drv_priv; 789 struct ath_node *an = (struct ath_node *)sta->drv_priv;
753 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 790 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
754 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 791 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
792 struct ath_tx_status ts;
755 struct ath_buf *bf; 793 struct ath_buf *bf;
756 struct list_head bf_head; 794 struct list_head bf_head;
795
796 memset(&ts, 0, sizeof(ts));
757 INIT_LIST_HEAD(&bf_head); 797 INIT_LIST_HEAD(&bf_head);
758 798
759 if (txtid->state & AGGR_CLEANUP) 799 if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +820,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
780 } 820 }
781 list_move_tail(&bf->list, &bf_head); 821 list_move_tail(&bf->list, &bf_head);
782 ath_tx_update_baw(sc, txtid, bf->bf_seqno); 822 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
783 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 823 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
784 } 824 }
785 spin_unlock_bh(&txq->axq_lock); 825 spin_unlock_bh(&txq->axq_lock);
786 826
@@ -849,7 +889,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
849 struct ath_hw *ah = sc->sc_ah; 889 struct ath_hw *ah = sc->sc_ah;
850 struct ath_common *common = ath9k_hw_common(ah); 890 struct ath_common *common = ath9k_hw_common(ah);
851 struct ath9k_tx_queue_info qi; 891 struct ath9k_tx_queue_info qi;
852 int qnum; 892 int qnum, i;
853 893
854 memset(&qi, 0, sizeof(qi)); 894 memset(&qi, 0, sizeof(qi));
855 qi.tqi_subtype = subtype; 895 qi.tqi_subtype = subtype;
@@ -873,11 +913,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
873 * The UAPSD queue is an exception, since we take a desc- 913 * The UAPSD queue is an exception, since we take a desc-
874 * based intr on the EOSP frames. 914 * based intr on the EOSP frames.
875 */ 915 */
876 if (qtype == ATH9K_TX_QUEUE_UAPSD) 916 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
877 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; 917 qi.tqi_qflags = TXQ_FLAG_TXOKINT_ENABLE |
878 else 918 TXQ_FLAG_TXERRINT_ENABLE;
879 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | 919 } else {
880 TXQ_FLAG_TXDESCINT_ENABLE; 920 if (qtype == ATH9K_TX_QUEUE_UAPSD)
921 qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
922 else
923 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
924 TXQ_FLAG_TXDESCINT_ENABLE;
925 }
881 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi); 926 qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
882 if (qnum == -1) { 927 if (qnum == -1) {
883 /* 928 /*
@@ -904,6 +949,11 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
904 txq->axq_depth = 0; 949 txq->axq_depth = 0;
905 txq->axq_tx_inprogress = false; 950 txq->axq_tx_inprogress = false;
906 sc->tx.txqsetup |= 1<<qnum; 951 sc->tx.txqsetup |= 1<<qnum;
952
953 txq->txq_headidx = txq->txq_tailidx = 0;
954 for (i = 0; i < ATH_TXFIFO_DEPTH; i++)
955 INIT_LIST_HEAD(&txq->txq_fifo[i]);
956 INIT_LIST_HEAD(&txq->txq_fifo_pending);
907 } 957 }
908 return &sc->tx.txq[qnum]; 958 return &sc->tx.txq[qnum];
909} 959}
@@ -1028,45 +1078,63 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1028{ 1078{
1029 struct ath_buf *bf, *lastbf; 1079 struct ath_buf *bf, *lastbf;
1030 struct list_head bf_head; 1080 struct list_head bf_head;
1081 struct ath_tx_status ts;
1031 1082
1083 memset(&ts, 0, sizeof(ts));
1032 INIT_LIST_HEAD(&bf_head); 1084 INIT_LIST_HEAD(&bf_head);
1033 1085
1034 for (;;) { 1086 for (;;) {
1035 spin_lock_bh(&txq->axq_lock); 1087 spin_lock_bh(&txq->axq_lock);
1036 1088
1037 if (list_empty(&txq->axq_q)) { 1089 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1038 txq->axq_link = NULL; 1090 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
1039 spin_unlock_bh(&txq->axq_lock); 1091 txq->txq_headidx = txq->txq_tailidx = 0;
1040 break; 1092 spin_unlock_bh(&txq->axq_lock);
1041 } 1093 break;
1042 1094 } else {
1043 bf = list_first_entry(&txq->axq_q, struct ath_buf, list); 1095 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
1096 struct ath_buf, list);
1097 }
1098 } else {
1099 if (list_empty(&txq->axq_q)) {
1100 txq->axq_link = NULL;
1101 spin_unlock_bh(&txq->axq_lock);
1102 break;
1103 }
1104 bf = list_first_entry(&txq->axq_q, struct ath_buf,
1105 list);
1044 1106
1045 if (bf->bf_stale) { 1107 if (bf->bf_stale) {
1046 list_del(&bf->list); 1108 list_del(&bf->list);
1047 spin_unlock_bh(&txq->axq_lock); 1109 spin_unlock_bh(&txq->axq_lock);
1048 1110
1049 spin_lock_bh(&sc->tx.txbuflock); 1111 ath_tx_return_buffer(sc, bf);
1050 list_add_tail(&bf->list, &sc->tx.txbuf); 1112 continue;
1051 spin_unlock_bh(&sc->tx.txbuflock); 1113 }
1052 continue;
1053 } 1114 }
1054 1115
1055 lastbf = bf->bf_lastbf; 1116 lastbf = bf->bf_lastbf;
1056 if (!retry_tx) 1117 if (!retry_tx)
1057 lastbf->bf_desc->ds_txstat.ts_flags = 1118 lastbf->bf_tx_aborted = true;
1058 ATH9K_TX_SW_ABORTED; 1119
1120 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1121 list_cut_position(&bf_head,
1122 &txq->txq_fifo[txq->txq_tailidx],
1123 &lastbf->list);
1124 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
1125 } else {
1126 /* remove ath_buf's of the same mpdu from txq */
1127 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1128 }
1059 1129
1060 /* remove ath_buf's of the same mpdu from txq */
1061 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
1062 txq->axq_depth--; 1130 txq->axq_depth--;
1063 1131
1064 spin_unlock_bh(&txq->axq_lock); 1132 spin_unlock_bh(&txq->axq_lock);
1065 1133
1066 if (bf_isampdu(bf)) 1134 if (bf_isampdu(bf))
1067 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); 1135 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
1068 else 1136 else
1069 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 1137 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1070 } 1138 }
1071 1139
1072 spin_lock_bh(&txq->axq_lock); 1140 spin_lock_bh(&txq->axq_lock);
@@ -1081,6 +1149,27 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1081 spin_unlock_bh(&txq->axq_lock); 1149 spin_unlock_bh(&txq->axq_lock);
1082 } 1150 }
1083 } 1151 }
1152
1153 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1154 spin_lock_bh(&txq->axq_lock);
1155 while (!list_empty(&txq->txq_fifo_pending)) {
1156 bf = list_first_entry(&txq->txq_fifo_pending,
1157 struct ath_buf, list);
1158 list_cut_position(&bf_head,
1159 &txq->txq_fifo_pending,
1160 &bf->bf_lastbf->list);
1161 spin_unlock_bh(&txq->axq_lock);
1162
1163 if (bf_isampdu(bf))
1164 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1165 &ts, 0);
1166 else
1167 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1168 &ts, 0, 0);
1169 spin_lock_bh(&txq->axq_lock);
1170 }
1171 spin_unlock_bh(&txq->axq_lock);
1172 }
1084} 1173}
1085 1174
1086void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1175void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1218,44 +1307,47 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1218 1307
1219 bf = list_first_entry(head, struct ath_buf, list); 1308 bf = list_first_entry(head, struct ath_buf, list);
1220 1309
1221 list_splice_tail_init(head, &txq->axq_q);
1222 txq->axq_depth++;
1223
1224 ath_print(common, ATH_DBG_QUEUE, 1310 ath_print(common, ATH_DBG_QUEUE,
1225 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1311 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1226 1312
1227 if (txq->axq_link == NULL) { 1313 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1314 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
1315 list_splice_tail_init(head, &txq->txq_fifo_pending);
1316 return;
1317 }
1318 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1319 ath_print(common, ATH_DBG_XMIT,
1320 "Initializing tx fifo %d which "
1321 "is non-empty\n",
1322 txq->txq_headidx);
1323 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1324 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1325 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1228 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1326 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1229 ath_print(common, ATH_DBG_XMIT, 1327 ath_print(common, ATH_DBG_XMIT,
1230 "TXDP[%u] = %llx (%p)\n", 1328 "TXDP[%u] = %llx (%p)\n",
1231 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1329 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1232 } else { 1330 } else {
1233 *txq->axq_link = bf->bf_daddr; 1331 list_splice_tail_init(head, &txq->axq_q);
1234 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1235 txq->axq_qnum, txq->axq_link,
1236 ito64(bf->bf_daddr), bf->bf_desc);
1237 }
1238 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1239 ath9k_hw_txstart(ah, txq->axq_qnum);
1240}
1241 1332
1242static struct ath_buf *ath_tx_get_buffer(struct ath_softc *sc) 1333 if (txq->axq_link == NULL) {
1243{ 1334 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1244 struct ath_buf *bf = NULL; 1335 ath_print(common, ATH_DBG_XMIT,
1245 1336 "TXDP[%u] = %llx (%p)\n",
1246 spin_lock_bh(&sc->tx.txbuflock); 1337 txq->axq_qnum, ito64(bf->bf_daddr),
1247 1338 bf->bf_desc);
1248 if (unlikely(list_empty(&sc->tx.txbuf))) { 1339 } else {
1249 spin_unlock_bh(&sc->tx.txbuflock); 1340 *txq->axq_link = bf->bf_daddr;
1250 return NULL; 1341 ath_print(common, ATH_DBG_XMIT,
1342 "link[%u] (%p)=%llx (%p)\n",
1343 txq->axq_qnum, txq->axq_link,
1344 ito64(bf->bf_daddr), bf->bf_desc);
1345 }
1346 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1347 &txq->axq_link);
1348 ath9k_hw_txstart(ah, txq->axq_qnum);
1251 } 1349 }
1252 1350 txq->axq_depth++;
1253 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list);
1254 list_del(&bf->list);
1255
1256 spin_unlock_bh(&sc->tx.txbuflock);
1257
1258 return bf;
1259} 1351}
1260 1352
1261static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1353static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -1402,8 +1494,7 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
1402 INCR(tid->seq_next, IEEE80211_SEQ_MAX); 1494 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1403} 1495}
1404 1496
1405static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb, 1497static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1406 struct ath_txq *txq)
1407{ 1498{
1408 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1499 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1409 int flags = 0; 1500 int flags = 0;
@@ -1414,6 +1505,9 @@ static int setup_tx_flags(struct ath_softc *sc, struct sk_buff *skb,
1414 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1505 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1415 flags |= ATH9K_TXDESC_NOACK; 1506 flags |= ATH9K_TXDESC_NOACK;
1416 1507
1508 if (use_ldpc)
1509 flags |= ATH9K_TXDESC_LDPC;
1510
1417 return flags; 1511 return flags;
1418} 1512}
1419 1513
@@ -1432,8 +1526,9 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1432 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen; 1526 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1433 1527
1434 /* find number of symbols: PLCP + data */ 1528 /* find number of symbols: PLCP + data */
1529 streams = HT_RC_2_STREAMS(rix);
1435 nbits = (pktlen << 3) + OFDM_PLCP_BITS; 1530 nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1436 nsymbits = bits_per_symbol[rix][width]; 1531 nsymbits = bits_per_symbol[rix % 8][width] * streams;
1437 nsymbols = (nbits + nsymbits - 1) / nsymbits; 1532 nsymbols = (nbits + nsymbits - 1) / nsymbits;
1438 1533
1439 if (!half_gi) 1534 if (!half_gi)
@@ -1442,7 +1537,6 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1442 duration = SYMBOL_TIME_HALFGI(nsymbols); 1537 duration = SYMBOL_TIME_HALFGI(nsymbols);
1443 1538
1444 /* addup duration for legacy/ht training and signal fields */ 1539 /* addup duration for legacy/ht training and signal fields */
1445 streams = HT_RC_2_STREAMS(rix);
1446 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams); 1540 duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1447 1541
1448 return duration; 1542 return duration;
@@ -1513,6 +1607,8 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1513 series[i].Rate = rix | 0x80; 1607 series[i].Rate = rix | 0x80;
1514 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1608 series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1515 is_40, is_sgi, is_sp); 1609 is_40, is_sgi, is_sp);
1610 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1611 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1516 continue; 1612 continue;
1517 } 1613 }
1518 1614
@@ -1565,15 +1661,16 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1565 int hdrlen; 1661 int hdrlen;
1566 __le16 fc; 1662 __le16 fc;
1567 int padpos, padsize; 1663 int padpos, padsize;
1664 bool use_ldpc = false;
1568 1665
1569 tx_info->pad[0] = 0; 1666 tx_info->pad[0] = 0;
1570 switch (txctl->frame_type) { 1667 switch (txctl->frame_type) {
1571 case ATH9K_NOT_INTERNAL: 1668 case ATH9K_IFT_NOT_INTERNAL:
1572 break; 1669 break;
1573 case ATH9K_INT_PAUSE: 1670 case ATH9K_IFT_PAUSE:
1574 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1671 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1575 /* fall through */ 1672 /* fall through */
1576 case ATH9K_INT_UNPAUSE: 1673 case ATH9K_IFT_UNPAUSE:
1577 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1674 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1578 break; 1675 break;
1579 } 1676 }
@@ -1591,10 +1688,13 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1591 bf->bf_frmlen -= padsize; 1688 bf->bf_frmlen -= padsize;
1592 } 1689 }
1593 1690
1594 if (conf_is_ht(&hw->conf)) 1691 if (conf_is_ht(&hw->conf)) {
1595 bf->bf_state.bf_type |= BUF_HT; 1692 bf->bf_state.bf_type |= BUF_HT;
1693 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1694 use_ldpc = true;
1695 }
1596 1696
1597 bf->bf_flags = setup_tx_flags(sc, skb, txctl->txq); 1697 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1598 1698
1599 bf->bf_keytype = get_hw_crypto_keytype(skb); 1699 bf->bf_keytype = get_hw_crypto_keytype(skb);
1600 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) { 1700 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
@@ -1653,8 +1753,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1653 list_add_tail(&bf->list, &bf_head); 1753 list_add_tail(&bf->list, &bf_head);
1654 1754
1655 ds = bf->bf_desc; 1755 ds = bf->bf_desc;
1656 ds->ds_link = 0; 1756 ath9k_hw_set_desc_link(ah, ds, 0);
1657 ds->ds_data = bf->bf_buf_addr;
1658 1757
1659 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1758 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER,
1660 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1759 bf->bf_keyix, bf->bf_keytype, bf->bf_flags);
@@ -1663,7 +1762,9 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1663 skb->len, /* segment length */ 1762 skb->len, /* segment length */
1664 true, /* first segment */ 1763 true, /* first segment */
1665 true, /* last segment */ 1764 true, /* last segment */
1666 ds); /* first descriptor */ 1765 ds, /* first descriptor */
1766 bf->bf_buf_addr,
1767 txctl->txq->axq_qnum);
1667 1768
1668 spin_lock_bh(&txctl->txq->axq_lock); 1769 spin_lock_bh(&txctl->txq->axq_lock);
1669 1770
@@ -1732,9 +1833,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1732 } 1833 }
1733 spin_unlock_bh(&txq->axq_lock); 1834 spin_unlock_bh(&txq->axq_lock);
1734 1835
1735 spin_lock_bh(&sc->tx.txbuflock); 1836 ath_tx_return_buffer(sc, bf);
1736 list_add_tail(&bf->list, &sc->tx.txbuf);
1737 spin_unlock_bh(&sc->tx.txbuflock);
1738 1837
1739 return r; 1838 return r;
1740 } 1839 }
@@ -1852,9 +1951,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1852} 1951}
1853 1952
1854static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1953static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1855 struct ath_txq *txq, 1954 struct ath_txq *txq, struct list_head *bf_q,
1856 struct list_head *bf_q, 1955 struct ath_tx_status *ts, int txok, int sendbar)
1857 int txok, int sendbar)
1858{ 1956{
1859 struct sk_buff *skb = bf->bf_mpdu; 1957 struct sk_buff *skb = bf->bf_mpdu;
1860 unsigned long flags; 1958 unsigned long flags;
@@ -1872,7 +1970,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1872 1970
1873 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1971 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1874 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1972 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1875 ath_debug_stat_tx(sc, txq, bf); 1973 ath_debug_stat_tx(sc, txq, bf, ts);
1876 1974
1877 /* 1975 /*
1878 * Return the list of ath_buf of this mpdu to free queue 1976 * Return the list of ath_buf of this mpdu to free queue
@@ -1883,23 +1981,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1883} 1981}
1884 1982
1885static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1983static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1886 int txok) 1984 struct ath_tx_status *ts, int txok)
1887{ 1985{
1888 struct ath_buf *bf_last = bf->bf_lastbf;
1889 struct ath_desc *ds = bf_last->bf_desc;
1890 u16 seq_st = 0; 1986 u16 seq_st = 0;
1891 u32 ba[WME_BA_BMP_SIZE >> 5]; 1987 u32 ba[WME_BA_BMP_SIZE >> 5];
1892 int ba_index; 1988 int ba_index;
1893 int nbad = 0; 1989 int nbad = 0;
1894 int isaggr = 0; 1990 int isaggr = 0;
1895 1991
1896 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 1992 if (bf->bf_tx_aborted)
1897 return 0; 1993 return 0;
1898 1994
1899 isaggr = bf_isaggr(bf); 1995 isaggr = bf_isaggr(bf);
1900 if (isaggr) { 1996 if (isaggr) {
1901 seq_st = ATH_DS_BA_SEQ(ds); 1997 seq_st = ts->ts_seqnum;
1902 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 1998 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
1903 } 1999 }
1904 2000
1905 while (bf) { 2001 while (bf) {
@@ -1913,7 +2009,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1913 return nbad; 2009 return nbad;
1914} 2010}
1915 2011
1916static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 2012static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1917 int nbad, int txok, bool update_rc) 2013 int nbad, int txok, bool update_rc)
1918{ 2014{
1919 struct sk_buff *skb = bf->bf_mpdu; 2015 struct sk_buff *skb = bf->bf_mpdu;
@@ -1923,24 +2019,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1923 u8 i, tx_rateindex; 2019 u8 i, tx_rateindex;
1924 2020
1925 if (txok) 2021 if (txok)
1926 tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; 2022 tx_info->status.ack_signal = ts->ts_rssi;
1927 2023
1928 tx_rateindex = ds->ds_txstat.ts_rateindex; 2024 tx_rateindex = ts->ts_rateindex;
1929 WARN_ON(tx_rateindex >= hw->max_rates); 2025 WARN_ON(tx_rateindex >= hw->max_rates);
1930 2026
1931 if (update_rc) 2027 if (ts->ts_status & ATH9K_TXERR_FILT)
1932 tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
1933 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1934 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2028 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2029 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
2030 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1935 2031
1936 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 2032 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
1937 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 2033 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1938 if (ieee80211_is_data(hdr->frame_control)) { 2034 if (ieee80211_is_data(hdr->frame_control)) {
1939 if (ds->ds_txstat.ts_flags & 2035 if (ts->ts_flags &
1940 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 2036 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
1941 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 2037 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
1942 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) || 2038 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
1943 (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)) 2039 (ts->ts_status & ATH9K_TXERR_FIFO))
1944 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 2040 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
1945 tx_info->status.ampdu_len = bf->bf_nframes; 2041 tx_info->status.ampdu_len = bf->bf_nframes;
1946 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 2042 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -1978,6 +2074,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1978 struct ath_buf *bf, *lastbf, *bf_held = NULL; 2074 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1979 struct list_head bf_head; 2075 struct list_head bf_head;
1980 struct ath_desc *ds; 2076 struct ath_desc *ds;
2077 struct ath_tx_status ts;
1981 int txok; 2078 int txok;
1982 int status; 2079 int status;
1983 2080
@@ -2017,7 +2114,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2017 lastbf = bf->bf_lastbf; 2114 lastbf = bf->bf_lastbf;
2018 ds = lastbf->bf_desc; 2115 ds = lastbf->bf_desc;
2019 2116
2020 status = ath9k_hw_txprocdesc(ah, ds); 2117 memset(&ts, 0, sizeof(ts));
2118 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2021 if (status == -EINPROGRESS) { 2119 if (status == -EINPROGRESS) {
2022 spin_unlock_bh(&txq->axq_lock); 2120 spin_unlock_bh(&txq->axq_lock);
2023 break; 2121 break;
@@ -2028,7 +2126,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2028 * can disable RX. 2126 * can disable RX.
2029 */ 2127 */
2030 if (bf->bf_isnullfunc && 2128 if (bf->bf_isnullfunc &&
2031 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2129 (ts.ts_status & ATH9K_TX_ACKED)) {
2032 if ((sc->ps_flags & PS_ENABLED)) 2130 if ((sc->ps_flags & PS_ENABLED))
2033 ath9k_enable_ps(sc); 2131 ath9k_enable_ps(sc);
2034 else 2132 else
@@ -2047,31 +2145,30 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2047 &txq->axq_q, lastbf->list.prev); 2145 &txq->axq_q, lastbf->list.prev);
2048 2146
2049 txq->axq_depth--; 2147 txq->axq_depth--;
2050 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); 2148 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2051 txq->axq_tx_inprogress = false; 2149 txq->axq_tx_inprogress = false;
2150 if (bf_held)
2151 list_del(&bf_held->list);
2052 spin_unlock_bh(&txq->axq_lock); 2152 spin_unlock_bh(&txq->axq_lock);
2053 2153
2054 if (bf_held) { 2154 if (bf_held)
2055 spin_lock_bh(&sc->tx.txbuflock); 2155 ath_tx_return_buffer(sc, bf_held);
2056 list_move_tail(&bf_held->list, &sc->tx.txbuf);
2057 spin_unlock_bh(&sc->tx.txbuflock);
2058 }
2059 2156
2060 if (!bf_isampdu(bf)) { 2157 if (!bf_isampdu(bf)) {
2061 /* 2158 /*
2062 * This frame is sent out as a single frame. 2159 * This frame is sent out as a single frame.
2063 * Use hardware retry status for this frame. 2160 * Use hardware retry status for this frame.
2064 */ 2161 */
2065 bf->bf_retries = ds->ds_txstat.ts_longretry; 2162 bf->bf_retries = ts.ts_longretry;
2066 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 2163 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2067 bf->bf_state.bf_type |= BUF_XRETRY; 2164 bf->bf_state.bf_type |= BUF_XRETRY;
2068 ath_tx_rc_status(bf, ds, 0, txok, true); 2165 ath_tx_rc_status(bf, &ts, 0, txok, true);
2069 } 2166 }
2070 2167
2071 if (bf_isampdu(bf)) 2168 if (bf_isampdu(bf))
2072 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); 2169 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2073 else 2170 else
2074 ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0); 2171 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2075 2172
2076 ath_wake_mac80211_queue(sc, txq); 2173 ath_wake_mac80211_queue(sc, txq);
2077 2174
@@ -2133,10 +2230,121 @@ void ath_tx_tasklet(struct ath_softc *sc)
2133 } 2230 }
2134} 2231}
2135 2232
2233void ath_tx_edma_tasklet(struct ath_softc *sc)
2234{
2235 struct ath_tx_status txs;
2236 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2237 struct ath_hw *ah = sc->sc_ah;
2238 struct ath_txq *txq;
2239 struct ath_buf *bf, *lastbf;
2240 struct list_head bf_head;
2241 int status;
2242 int txok;
2243
2244 for (;;) {
2245 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2246 if (status == -EINPROGRESS)
2247 break;
2248 if (status == -EIO) {
2249 ath_print(common, ATH_DBG_XMIT,
2250 "Error processing tx status\n");
2251 break;
2252 }
2253
2254 /* Skip beacon completions */
2255 if (txs.qid == sc->beacon.beaconq)
2256 continue;
2257
2258 txq = &sc->tx.txq[txs.qid];
2259
2260 spin_lock_bh(&txq->axq_lock);
2261 if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
2262 spin_unlock_bh(&txq->axq_lock);
2263 return;
2264 }
2265
2266 bf = list_first_entry(&txq->txq_fifo[txq->txq_tailidx],
2267 struct ath_buf, list);
2268 lastbf = bf->bf_lastbf;
2269
2270 INIT_LIST_HEAD(&bf_head);
2271 list_cut_position(&bf_head, &txq->txq_fifo[txq->txq_tailidx],
2272 &lastbf->list);
2273 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2274 txq->axq_depth--;
2275 txq->axq_tx_inprogress = false;
2276 spin_unlock_bh(&txq->axq_lock);
2277
2278 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
2279
2280 if (!bf_isampdu(bf)) {
2281 bf->bf_retries = txs.ts_longretry;
2282 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2283 bf->bf_state.bf_type |= BUF_XRETRY;
2284 ath_tx_rc_status(bf, &txs, 0, txok, true);
2285 }
2286
2287 if (bf_isampdu(bf))
2288 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok);
2289 else
2290 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2291 &txs, txok, 0);
2292
2293 ath_wake_mac80211_queue(sc, txq);
2294
2295 spin_lock_bh(&txq->axq_lock);
2296 if (!list_empty(&txq->txq_fifo_pending)) {
2297 INIT_LIST_HEAD(&bf_head);
2298 bf = list_first_entry(&txq->txq_fifo_pending,
2299 struct ath_buf, list);
2300 list_cut_position(&bf_head, &txq->txq_fifo_pending,
2301 &bf->bf_lastbf->list);
2302 ath_tx_txqaddbuf(sc, txq, &bf_head);
2303 } else if (sc->sc_flags & SC_OP_TXAGGR)
2304 ath_txq_schedule(sc, txq);
2305 spin_unlock_bh(&txq->axq_lock);
2306 }
2307}
2308
2136/*****************/ 2309/*****************/
2137/* Init, Cleanup */ 2310/* Init, Cleanup */
2138/*****************/ 2311/*****************/
2139 2312
2313static int ath_txstatus_setup(struct ath_softc *sc, int size)
2314{
2315 struct ath_descdma *dd = &sc->txsdma;
2316 u8 txs_len = sc->sc_ah->caps.txs_len;
2317
2318 dd->dd_desc_len = size * txs_len;
2319 dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
2320 &dd->dd_desc_paddr, GFP_KERNEL);
2321 if (!dd->dd_desc)
2322 return -ENOMEM;
2323
2324 return 0;
2325}
2326
2327static int ath_tx_edma_init(struct ath_softc *sc)
2328{
2329 int err;
2330
2331 err = ath_txstatus_setup(sc, ATH_TXSTATUS_RING_SIZE);
2332 if (!err)
2333 ath9k_hw_setup_statusring(sc->sc_ah, sc->txsdma.dd_desc,
2334 sc->txsdma.dd_desc_paddr,
2335 ATH_TXSTATUS_RING_SIZE);
2336
2337 return err;
2338}
2339
2340static void ath_tx_edma_cleanup(struct ath_softc *sc)
2341{
2342 struct ath_descdma *dd = &sc->txsdma;
2343
2344 dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
2345 dd->dd_desc_paddr);
2346}
2347
2140int ath_tx_init(struct ath_softc *sc, int nbufs) 2348int ath_tx_init(struct ath_softc *sc, int nbufs)
2141{ 2349{
2142 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 2350 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -2145,7 +2353,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2145 spin_lock_init(&sc->tx.txbuflock); 2353 spin_lock_init(&sc->tx.txbuflock);
2146 2354
2147 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2355 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2148 "tx", nbufs, 1); 2356 "tx", nbufs, 1, 1);
2149 if (error != 0) { 2357 if (error != 0) {
2150 ath_print(common, ATH_DBG_FATAL, 2358 ath_print(common, ATH_DBG_FATAL,
2151 "Failed to allocate tx descriptors: %d\n", error); 2359 "Failed to allocate tx descriptors: %d\n", error);
@@ -2153,7 +2361,7 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2153 } 2361 }
2154 2362
2155 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2363 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2156 "beacon", ATH_BCBUF, 1); 2364 "beacon", ATH_BCBUF, 1, 1);
2157 if (error != 0) { 2365 if (error != 0) {
2158 ath_print(common, ATH_DBG_FATAL, 2366 ath_print(common, ATH_DBG_FATAL,
2159 "Failed to allocate beacon descriptors: %d\n", error); 2367 "Failed to allocate beacon descriptors: %d\n", error);
@@ -2162,6 +2370,12 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2162 2370
2163 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work); 2371 INIT_DELAYED_WORK(&sc->tx_complete_work, ath_tx_complete_poll_work);
2164 2372
2373 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
2374 error = ath_tx_edma_init(sc);
2375 if (error)
2376 goto err;
2377 }
2378
2165err: 2379err:
2166 if (error != 0) 2380 if (error != 0)
2167 ath_tx_cleanup(sc); 2381 ath_tx_cleanup(sc);
@@ -2176,6 +2390,9 @@ void ath_tx_cleanup(struct ath_softc *sc)
2176 2390
2177 if (sc->tx.txdma.dd_desc_len != 0) 2391 if (sc->tx.txdma.dd_desc_len != 0)
2178 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf); 2392 ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
2393
2394 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
2395 ath_tx_edma_cleanup(sc);
2179} 2396}
2180 2397
2181void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) 2398void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 8263633c003c..873bf526e11f 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -59,6 +59,7 @@ enum ATH_DEBUG {
59 ATH_DBG_PS = 0x00000800, 59 ATH_DBG_PS = 0x00000800,
60 ATH_DBG_HWTIMER = 0x00001000, 60 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000, 61 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_WMI = 0x00004000,
62 ATH_DBG_ANY = 0xffffffff 63 ATH_DBG_ANY = 0xffffffff
63}; 64};
64 65
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index ecc9eb01f4fa..a8f81ea09f14 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -19,8 +19,8 @@
19#include "ath.h" 19#include "ath.h"
20#include "reg.h" 20#include "reg.h"
21 21
22#define REG_READ common->ops->read 22#define REG_READ (common->ops->read)
23#define REG_WRITE common->ops->write 23#define REG_WRITE (common->ops->write)
24 24
25/** 25/**
26 * ath_hw_set_bssid_mask - filter out bssids we listen 26 * ath_hw_set_bssid_mask - filter out bssids we listen
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 00489c40be0c..3f4244f56ce5 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -50,6 +50,7 @@
50 50
51#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \ 51#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
52 ATH9K_5GHZ_5470_5850 52 ATH9K_5GHZ_5470_5850
53
53/* This one skips what we call "mid band" */ 54/* This one skips what we call "mid band" */
54#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \ 55#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
55 ATH9K_5GHZ_5725_5850 56 ATH9K_5GHZ_5725_5850
@@ -332,7 +333,6 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
332 ath_reg_apply_active_scan_flags(wiphy, initiator); 333 ath_reg_apply_active_scan_flags(wiphy, initiator);
333 break; 334 break;
334 } 335 }
335 return;
336} 336}
337 337
338int ath_reg_notifier_apply(struct wiphy *wiphy, 338int ath_reg_notifier_apply(struct wiphy *wiphy,
@@ -360,7 +360,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
360 360
361static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg) 361static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
362{ 362{
363 u16 rd = ath_regd_get_eepromRD(reg); 363 u16 rd = ath_regd_get_eepromRD(reg);
364 int i; 364 int i;
365 365
366 if (rd & COUNTRY_ERD_FLAG) { 366 if (rd & COUNTRY_ERD_FLAG) {
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index 3edbbcf0f548..c8f7090b27d3 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -865,7 +865,6 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
865 865
866 /* low bit of first byte of destination tells us if broadcast */ 866 /* low bit of first byte of destination tells us if broadcast */
867 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA); 867 tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
868 dev->trans_start = jiffies;
869 dev->stats.tx_bytes += len; 868 dev->stats.tx_bytes += len;
870 869
871 spin_unlock_irqrestore(&priv->irqlock, flags); 870 spin_unlock_irqrestore(&priv->irqlock, flags);
diff --git a/drivers/net/wireless/atmel_cs.c b/drivers/net/wireless/atmel_cs.c
index 32407911842f..c2746fc7f2be 100644
--- a/drivers/net/wireless/atmel_cs.c
+++ b/drivers/net/wireless/atmel_cs.c
@@ -85,41 +85,7 @@ static void atmel_release(struct pcmcia_device *link);
85 85
86static void atmel_detach(struct pcmcia_device *p_dev); 86static void atmel_detach(struct pcmcia_device *p_dev);
87 87
88/*
89 You'll also need to prototype all the functions that will actually
90 be used to talk to your device. See 'pcmem_cs' for a good example
91 of a fully self-sufficient driver; the other drivers rely more or
92 less on other parts of the kernel.
93*/
94
95/*
96 A linked list of "instances" of the atmelnet device. Each actual
97 PCMCIA card corresponds to one device instance, and is described
98 by one struct pcmcia_device structure (defined in ds.h).
99
100 You may not want to use a linked list for this -- for example, the
101 memory card driver uses an array of struct pcmcia_device pointers, where minor
102 device numbers are used to derive the corresponding array index.
103*/
104
105/*
106 A driver needs to provide a dev_node_t structure for each device
107 on a card. In some cases, there is only one device per card (for
108 example, ethernet cards, modems). In other cases, there may be
109 many actual or logical devices (SCSI adapters, memory cards with
110 multiple partitions). The dev_node_t structures need to be kept
111 in a linked list starting at the 'dev' field of a struct pcmcia_device
112 structure. We allocate them in the card's private data structure,
113 because they generally shouldn't be allocated dynamically.
114
115 In this case, we also provide a flag to indicate if a device is
116 "stopped" due to a power management event, or card ejection. The
117 device IO routines can use a flag like this to throttle IO to a
118 card that is not ready to accept it.
119*/
120
121typedef struct local_info_t { 88typedef struct local_info_t {
122 dev_node_t node;
123 struct net_device *eth_dev; 89 struct net_device *eth_dev;
124} local_info_t; 90} local_info_t;
125 91
@@ -141,10 +107,6 @@ static int atmel_probe(struct pcmcia_device *p_dev)
141 107
142 dev_dbg(&p_dev->dev, "atmel_attach()\n"); 108 dev_dbg(&p_dev->dev, "atmel_attach()\n");
143 109
144 /* Interrupt setup */
145 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
146 p_dev->irq.Handler = NULL;
147
148 /* 110 /*
149 General socket configuration defaults can go here. In this 111 General socket configuration defaults can go here. In this
150 client, we assume very little, and rely on the CIS for almost 112 client, we assume very little, and rely on the CIS for almost
@@ -226,9 +188,7 @@ static int atmel_config_check(struct pcmcia_device *p_dev,
226 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM)) 188 else if (dflt->vpp1.present & (1<<CISTPL_POWER_VNOM))
227 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000; 189 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM]/10000;
228 190
229 /* Do we need to allocate an interrupt? */ 191 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
230 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1)
231 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
232 192
233 /* IO window settings */ 193 /* IO window settings */
234 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0; 194 p_dev->io.NumPorts1 = p_dev->io.NumPorts2 = 0;
@@ -278,15 +238,9 @@ static int atmel_config(struct pcmcia_device *link)
278 if (pcmcia_loop_config(link, atmel_config_check, NULL)) 238 if (pcmcia_loop_config(link, atmel_config_check, NULL))
279 goto failed; 239 goto failed;
280 240
281 /* 241 if (!link->irq) {
282 Allocate an interrupt line. Note that this does not assign a 242 dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
283 handler to the interrupt, unless the 'Handler' member of the 243 goto failed;
284 irq structure is initialized.
285 */
286 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
287 ret = pcmcia_request_irq(link, &link->irq);
288 if (ret)
289 goto failed;
290 } 244 }
291 245
292 /* 246 /*
@@ -298,14 +252,8 @@ static int atmel_config(struct pcmcia_device *link)
298 if (ret) 252 if (ret)
299 goto failed; 253 goto failed;
300 254
301 if (link->irq.AssignedIRQ == 0) {
302 printk(KERN_ALERT
303 "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
304 goto failed;
305 }
306
307 ((local_info_t*)link->priv)->eth_dev = 255 ((local_info_t*)link->priv)->eth_dev =
308 init_atmel_card(link->irq.AssignedIRQ, 256 init_atmel_card(link->irq,
309 link->io.BasePort1, 257 link->io.BasePort1,
310 did ? did->driver_info : ATMEL_FW_TYPE_NONE, 258 did ? did->driver_info : ATMEL_FW_TYPE_NONE,
311 &link->dev, 259 &link->dev,
@@ -315,14 +263,6 @@ static int atmel_config(struct pcmcia_device *link)
315 goto failed; 263 goto failed;
316 264
317 265
318 /*
319 At this point, the dev_node_t structure(s) need to be
320 initialized and arranged in a linked list at link->dev_node.
321 */
322 strcpy(dev->node.dev_name, ((local_info_t*)link->priv)->eth_dev->name );
323 dev->node.major = dev->node.minor = 0;
324 link->dev_node = &dev->node;
325
326 return 0; 266 return 0;
327 267
328 failed: 268 failed:
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b8807fb12c92..3a003e6803a5 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -104,6 +104,7 @@
104#define B43_MMIO_MACFILTER_CONTROL 0x420 104#define B43_MMIO_MACFILTER_CONTROL 0x420
105#define B43_MMIO_MACFILTER_DATA 0x422 105#define B43_MMIO_MACFILTER_DATA 0x422
106#define B43_MMIO_RCMTA_COUNT 0x43C 106#define B43_MMIO_RCMTA_COUNT 0x43C
107#define B43_MMIO_PSM_PHY_HDR 0x492
107#define B43_MMIO_RADIO_HWENABLED_LO 0x49A 108#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
108#define B43_MMIO_GPIO_CONTROL 0x49C 109#define B43_MMIO_GPIO_CONTROL 0x49C
109#define B43_MMIO_GPIO_MASK 0x49E 110#define B43_MMIO_GPIO_MASK 0x49E
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9a374ef83a22..7965b70efbab 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4349,11 +4349,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4349 b43_set_phytxctl_defaults(dev); 4349 b43_set_phytxctl_defaults(dev);
4350 4350
4351 /* Minimum Contention Window */ 4351 /* Minimum Contention Window */
4352 if (phy->type == B43_PHYTYPE_B) { 4352 if (phy->type == B43_PHYTYPE_B)
4353 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); 4353 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F);
4354 } else { 4354 else
4355 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); 4355 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF);
4356 }
4357 /* Maximum Contention Window */ 4356 /* Maximum Contention Window */
4358 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 4357 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
4359 4358
@@ -4572,6 +4571,23 @@ static void b43_op_sw_scan_complete_notifier(struct ieee80211_hw *hw)
4572 mutex_unlock(&wl->mutex); 4571 mutex_unlock(&wl->mutex);
4573} 4572}
4574 4573
4574static int b43_op_get_survey(struct ieee80211_hw *hw, int idx,
4575 struct survey_info *survey)
4576{
4577 struct b43_wl *wl = hw_to_b43_wl(hw);
4578 struct b43_wldev *dev = wl->current_dev;
4579 struct ieee80211_conf *conf = &hw->conf;
4580
4581 if (idx != 0)
4582 return -ENOENT;
4583
4584 survey->channel = conf->channel;
4585 survey->filled = SURVEY_INFO_NOISE_DBM;
4586 survey->noise = dev->stats.link_noise;
4587
4588 return 0;
4589}
4590
4575static const struct ieee80211_ops b43_hw_ops = { 4591static const struct ieee80211_ops b43_hw_ops = {
4576 .tx = b43_op_tx, 4592 .tx = b43_op_tx,
4577 .conf_tx = b43_op_conf_tx, 4593 .conf_tx = b43_op_conf_tx,
@@ -4591,6 +4607,7 @@ static const struct ieee80211_ops b43_hw_ops = {
4591 .sta_notify = b43_op_sta_notify, 4607 .sta_notify = b43_op_sta_notify,
4592 .sw_scan_start = b43_op_sw_scan_start_notifier, 4608 .sw_scan_start = b43_op_sw_scan_start_notifier,
4593 .sw_scan_complete = b43_op_sw_scan_complete_notifier, 4609 .sw_scan_complete = b43_op_sw_scan_complete_notifier,
4610 .get_survey = b43_op_get_survey,
4594 .rfkill_poll = b43_rfkill_poll, 4611 .rfkill_poll = b43_rfkill_poll,
4595}; 4612};
4596 4613
@@ -4906,8 +4923,7 @@ static int b43_wireless_init(struct ssb_device *dev)
4906 4923
4907 /* fill hw info */ 4924 /* fill hw info */
4908 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 4925 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
4909 IEEE80211_HW_SIGNAL_DBM | 4926 IEEE80211_HW_SIGNAL_DBM;
4910 IEEE80211_HW_NOISE_DBM;
4911 4927
4912 hw->wiphy->interface_modes = 4928 hw->wiphy->interface_modes =
4913 BIT(NL80211_IFTYPE_AP) | 4929 BIT(NL80211_IFTYPE_AP) |
diff --git a/drivers/net/wireless/b43/pcmcia.c b/drivers/net/wireless/b43/pcmcia.c
index 609e7051e018..0e99b634267c 100644
--- a/drivers/net/wireless/b43/pcmcia.c
+++ b/drivers/net/wireless/b43/pcmcia.c
@@ -98,10 +98,7 @@ static int __devinit b43_pcmcia_probe(struct pcmcia_device *dev)
98 if (res != 0) 98 if (res != 0)
99 goto err_disable; 99 goto err_disable;
100 100
101 dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING; 101 if (!dev->irq)
102 dev->irq.Handler = NULL; /* The handler is registered later. */
103 res = pcmcia_request_irq(dev, &dev->irq);
104 if (res != 0)
105 goto err_disable; 102 goto err_disable;
106 103
107 res = pcmcia_request_configuration(dev, &dev->conf); 104 res = pcmcia_request_configuration(dev, &dev->conf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9c7cd282e46c..3d6b33775964 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -73,6 +73,22 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
73 u16 value, u8 core, bool off); 73 u16 value, u8 core, bool off);
74static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, 74static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
75 u16 value, u8 core); 75 u16 value, u8 core);
76static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
77
78static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec)
79{
80 return !chanspec->channel && !chanspec->sideband &&
81 !chanspec->b_width && !chanspec->b_freq;
82}
83
84static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
85 struct b43_chanspec *chanspec2)
86{
87 return (chanspec1->channel == chanspec2->channel &&
88 chanspec1->sideband == chanspec2->sideband &&
89 chanspec1->b_width == chanspec2->b_width &&
90 chanspec1->b_freq == chanspec2->b_freq);
91}
76 92
77void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 93void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
78{//TODO 94{//TODO
@@ -89,34 +105,44 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
89} 105}
90 106
91static void b43_chantab_radio_upload(struct b43_wldev *dev, 107static void b43_chantab_radio_upload(struct b43_wldev *dev,
92 const struct b43_nphy_channeltab_entry *e) 108 const struct b43_nphy_channeltab_entry_rev2 *e)
93{ 109{
94 b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref); 110 b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
95 b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); 111 b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
96 b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); 112 b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
97 b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); 113 b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
98 b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1); 114 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
99 b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2); 115
100 b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); 116 b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
101 b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); 117 b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
102 b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); 118 b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
103 b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); 119 b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
104 b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); 120 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
105 b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); 121
106 b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); 122 b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
107 b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); 123 b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
108 b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); 124 b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
109 b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); 125 b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
110 b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); 126 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
111 b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); 127
112 b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); 128 b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
113 b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); 129 b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
114 b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); 130 b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
115 b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); 131 b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
132 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
133
134 b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
135 b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
136 b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
137 b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
138 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
139
140 b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
141 b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
116} 142}
117 143
118static void b43_chantab_phy_upload(struct b43_wldev *dev, 144static void b43_chantab_phy_upload(struct b43_wldev *dev,
119 const struct b43_nphy_channeltab_entry *e) 145 const struct b43_phy_n_sfo_cfg *e)
120{ 146{
121 b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); 147 b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
122 b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); 148 b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@@ -131,34 +157,20 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
131 //TODO 157 //TODO
132} 158}
133 159
134/* Tune the hardware to a new channel. */
135static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
136{
137 const struct b43_nphy_channeltab_entry *tabent;
138 160
139 tabent = b43_nphy_get_chantabent(dev, channel); 161/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
140 if (!tabent) 162static void b43_radio_2055_setup(struct b43_wldev *dev,
141 return -ESRCH; 163 const struct b43_nphy_channeltab_entry_rev2 *e)
164{
165 B43_WARN_ON(dev->phy.rev >= 3);
142 166
143 //FIXME enable/disable band select upper20 in RXCTL 167 b43_chantab_radio_upload(dev, e);
144 if (0 /*FIXME 5Ghz*/)
145 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20);
146 else
147 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50);
148 b43_chantab_radio_upload(dev, tabent);
149 udelay(50); 168 udelay(50);
150 b43_radio_write16(dev, B2055_VCO_CAL10, 5); 169 b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
151 b43_radio_write16(dev, B2055_VCO_CAL10, 45); 170 b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
152 b43_radio_write16(dev, B2055_VCO_CAL10, 65); 171 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
172 b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
153 udelay(300); 173 udelay(300);
154 if (0 /*FIXME 5Ghz*/)
155 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
156 else
157 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
158 b43_chantab_phy_upload(dev, tabent);
159 b43_nphy_tx_power_fix(dev);
160
161 return 0;
162} 174}
163 175
164static void b43_radio_init2055_pre(struct b43_wldev *dev) 176static void b43_radio_init2055_pre(struct b43_wldev *dev)
@@ -174,52 +186,64 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
174 186
175static void b43_radio_init2055_post(struct b43_wldev *dev) 187static void b43_radio_init2055_post(struct b43_wldev *dev)
176{ 188{
189 struct b43_phy_n *nphy = dev->phy.n;
177 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 190 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
178 struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo); 191 struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo);
179 int i; 192 int i;
180 u16 val; 193 u16 val;
194 bool workaround = false;
195
196 if (sprom->revision < 4)
197 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
198 binfo->type != 0x46D ||
199 binfo->rev < 0x41);
200 else
201 workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
181 202
182 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); 203 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
183 msleep(1); 204 if (workaround) {
184 if ((sprom->revision != 4) || 205 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
185 !(sprom->boardflags_hi & B43_BFH_RSSIINV)) { 206 b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F);
186 if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) ||
187 (binfo->type != 0x46D) ||
188 (binfo->rev < 0x41)) {
189 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
190 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
191 msleep(1);
192 }
193 } 207 }
194 b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C); 208 b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
195 msleep(1); 209 b43_radio_write(dev, B2055_CAL_MISC, 0x3C);
196 b43_radio_write16(dev, B2055_CAL_MISC, 0x3C);
197 msleep(1);
198 b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE); 210 b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE);
199 msleep(1);
200 b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80); 211 b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80);
201 msleep(1);
202 b43_radio_set(dev, B2055_CAL_MISC, 0x1); 212 b43_radio_set(dev, B2055_CAL_MISC, 0x1);
203 msleep(1); 213 msleep(1);
204 b43_radio_set(dev, B2055_CAL_MISC, 0x40); 214 b43_radio_set(dev, B2055_CAL_MISC, 0x40);
205 msleep(1); 215 for (i = 0; i < 200; i++) {
206 for (i = 0; i < 100; i++) { 216 val = b43_radio_read(dev, B2055_CAL_COUT2);
207 val = b43_radio_read16(dev, B2055_CAL_COUT2); 217 if (val & 0x80) {
208 if (val & 0x80) 218 i = 0;
209 break; 219 break;
220 }
210 udelay(10); 221 udelay(10);
211 } 222 }
212 msleep(1); 223 if (i)
224 b43err(dev->wl, "radio post init timeout\n");
213 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 225 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
214 msleep(1);
215 nphy_channel_switch(dev, dev->phy.channel); 226 nphy_channel_switch(dev, dev->phy.channel);
216 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9); 227 b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
217 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9); 228 b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
218 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83); 229 b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
219 b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83); 230 b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
231 b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
232 b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
233 if (!nphy->gain_boost) {
234 b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2);
235 b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2);
236 } else {
237 b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD);
238 b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD);
239 }
240 udelay(2);
220} 241}
221 242
222/* Initialize a Broadcom 2055 N-radio */ 243/*
244 * Initialize a Broadcom 2055 N-radio
245 * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
246 */
223static void b43_radio_init2055(struct b43_wldev *dev) 247static void b43_radio_init2055(struct b43_wldev *dev)
224{ 248{
225 b43_radio_init2055_pre(dev); 249 b43_radio_init2055_pre(dev);
@@ -230,16 +254,15 @@ static void b43_radio_init2055(struct b43_wldev *dev)
230 b43_radio_init2055_post(dev); 254 b43_radio_init2055_post(dev);
231} 255}
232 256
233void b43_nphy_radio_turn_on(struct b43_wldev *dev) 257/*
258 * Initialize a Broadcom 2056 N-radio
259 * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
260 */
261static void b43_radio_init2056(struct b43_wldev *dev)
234{ 262{
235 b43_radio_init2055(dev); 263 /* TODO */
236} 264}
237 265
238void b43_nphy_radio_turn_off(struct b43_wldev *dev)
239{
240 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
241 ~B43_NPHY_RFCTL_CMD_EN);
242}
243 266
244/* 267/*
245 * Upload the N-PHY tables. 268 * Upload the N-PHY tables.
@@ -647,6 +670,41 @@ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
647 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); 670 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
648} 671}
649 672
673/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
674static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
675{
676 if (dev->phy.rev >= 3) {
677 if (!init)
678 return;
679 if (0 /* FIXME */) {
680 b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
681 b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
682 b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
683 b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
684 }
685 } else {
686 b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
687 b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
688
689 ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00,
690 0xFC00);
691 b43_write32(dev, B43_MMIO_MACCTL,
692 b43_read32(dev, B43_MMIO_MACCTL) &
693 ~B43_MACCTL_GPOUTSMSK);
694 b43_write16(dev, B43_MMIO_GPIO_MASK,
695 b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
696 b43_write16(dev, B43_MMIO_GPIO_CONTROL,
697 b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
698
699 if (init) {
700 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
701 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
702 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
703 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
704 }
705 }
706}
707
650/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ 708/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
651static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) 709static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
652{ 710{
@@ -723,7 +781,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
723{ 781{
724 struct b43_phy_n *nphy = dev->phy.n; 782 struct b43_phy_n *nphy = dev->phy.n;
725 783
726 unsigned int channel; 784 u8 channel = nphy->radio_chanspec.channel;
727 int tone[2] = { 57, 58 }; 785 int tone[2] = { 57, 58 };
728 u32 noise[2] = { 0x3FF, 0x3FF }; 786 u32 noise[2] = { 0x3FF, 0x3FF };
729 787
@@ -732,8 +790,6 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
732 if (nphy->hang_avoid) 790 if (nphy->hang_avoid)
733 b43_nphy_stay_in_carrier_search(dev, 1); 791 b43_nphy_stay_in_carrier_search(dev, 1);
734 792
735 /* FIXME: channel = radio_chanspec */
736
737 if (nphy->gband_spurwar_en) { 793 if (nphy->gband_spurwar_en) {
738 /* TODO: N PHY Adjust Analog Pfbw (7) */ 794 /* TODO: N PHY Adjust Analog Pfbw (7) */
739 if (channel == 11 && dev->phy.is_40mhz) 795 if (channel == 11 && dev->phy.is_40mhz)
@@ -779,6 +835,62 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
779 b43_nphy_stay_in_carrier_search(dev, 0); 835 b43_nphy_stay_in_carrier_search(dev, 0);
780} 836}
781 837
838/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
839static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
840{
841 struct b43_phy_n *nphy = dev->phy.n;
842
843 u8 i;
844 s16 tmp;
845 u16 data[4];
846 s16 gain[2];
847 u16 minmax[2];
848 u16 lna_gain[4] = { -2, 10, 19, 25 };
849
850 if (nphy->hang_avoid)
851 b43_nphy_stay_in_carrier_search(dev, 1);
852
853 if (nphy->gain_boost) {
854 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
855 gain[0] = 6;
856 gain[1] = 6;
857 } else {
858 tmp = 40370 - 315 * nphy->radio_chanspec.channel;
859 gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
860 tmp = 23242 - 224 * nphy->radio_chanspec.channel;
861 gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
862 }
863 } else {
864 gain[0] = 0;
865 gain[1] = 0;
866 }
867
868 for (i = 0; i < 2; i++) {
869 if (nphy->elna_gain_config) {
870 data[0] = 19 + gain[i];
871 data[1] = 25 + gain[i];
872 data[2] = 25 + gain[i];
873 data[3] = 25 + gain[i];
874 } else {
875 data[0] = lna_gain[0] + gain[i];
876 data[1] = lna_gain[1] + gain[i];
877 data[2] = lna_gain[2] + gain[i];
878 data[3] = lna_gain[3] + gain[i];
879 }
880 b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
881
882 minmax[i] = 23 + gain[i];
883 }
884
885 b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
886 minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
887 b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
888 minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
889
890 if (nphy->hang_avoid)
891 b43_nphy_stay_in_carrier_search(dev, 0);
892}
893
782/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 894/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
783static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) 895static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
784{ 896{
@@ -863,7 +975,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
863 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 975 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
864 (code << 8 | 0x7C)); 976 (code << 8 | 0x7C));
865 977
866 /* TODO: b43_nphy_adjust_lna_gain_table(dev); */ 978 b43_nphy_adjust_lna_gain_table(dev);
867 979
868 if (nphy->elna_gain_config) { 980 if (nphy->elna_gain_config) {
869 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); 981 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
@@ -1970,12 +2082,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
1970 u16 *rssical_phy_regs = NULL; 2082 u16 *rssical_phy_regs = NULL;
1971 2083
1972 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2084 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1973 if (!nphy->rssical_chanspec_2G) 2085 if (b43_empty_chanspec(&nphy->rssical_chanspec_2G))
1974 return; 2086 return;
1975 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; 2087 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
1976 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; 2088 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
1977 } else { 2089 } else {
1978 if (!nphy->rssical_chanspec_5G) 2090 if (b43_empty_chanspec(&nphy->rssical_chanspec_5G))
1979 return; 2091 return;
1980 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; 2092 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1981 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; 2093 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2395,7 +2507,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
2395 2507
2396 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 2508 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2397 u16 *txcal_radio_regs = NULL; 2509 u16 *txcal_radio_regs = NULL;
2398 u8 *iqcal_chanspec; 2510 struct b43_chanspec *iqcal_chanspec;
2399 u16 *table = NULL; 2511 u16 *table = NULL;
2400 2512
2401 if (nphy->hang_avoid) 2513 if (nphy->hang_avoid)
@@ -2451,12 +2563,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
2451 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 2563 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2452 2564
2453 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2565 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2454 if (nphy->iqcal_chanspec_2G == 0) 2566 if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G))
2455 return; 2567 return;
2456 table = nphy->cal_cache.txcal_coeffs_2G; 2568 table = nphy->cal_cache.txcal_coeffs_2G;
2457 loft = &nphy->cal_cache.txcal_coeffs_2G[5]; 2569 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
2458 } else { 2570 } else {
2459 if (nphy->iqcal_chanspec_5G == 0) 2571 if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G))
2460 return; 2572 return;
2461 table = nphy->cal_cache.txcal_coeffs_5G; 2573 table = nphy->cal_cache.txcal_coeffs_5G;
2462 loft = &nphy->cal_cache.txcal_coeffs_5G[5]; 2574 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2689,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2689 } 2801 }
2690 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, 2802 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
2691 buffer); 2803 buffer);
2692 b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2, 2804 b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
2693 buffer); 2805 buffer);
2694 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, 2806 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
2695 buffer); 2807 buffer);
@@ -2701,8 +2813,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2701 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, 2813 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2702 nphy->txiqlocal_bestc); 2814 nphy->txiqlocal_bestc);
2703 nphy->txiqlocal_coeffsvalid = true; 2815 nphy->txiqlocal_coeffsvalid = true;
2704 /* TODO: Set nphy->txiqlocal_chanspec to 2816 nphy->txiqlocal_chanspec = nphy->radio_chanspec;
2705 the current channel */
2706 } else { 2817 } else {
2707 length = 11; 2818 length = 11;
2708 if (dev->phy.rev < 3) 2819 if (dev->phy.rev < 3)
@@ -2737,7 +2848,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
2737 u16 buffer[7]; 2848 u16 buffer[7];
2738 bool equal = true; 2849 bool equal = true;
2739 2850
2740 if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */) 2851 if (!nphy->txiqlocal_coeffsvalid ||
2852 b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec))
2741 return; 2853 return;
2742 2854
2743 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); 2855 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3092,9 +3204,11 @@ int b43_phy_initn(struct b43_wldev *dev)
3092 do_rssi_cal = false; 3204 do_rssi_cal = false;
3093 if (phy->rev >= 3) { 3205 if (phy->rev >= 3) {
3094 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3206 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3095 do_rssi_cal = (nphy->rssical_chanspec_2G == 0); 3207 do_rssi_cal =
3208 b43_empty_chanspec(&nphy->rssical_chanspec_2G);
3096 else 3209 else
3097 do_rssi_cal = (nphy->rssical_chanspec_5G == 0); 3210 do_rssi_cal =
3211 b43_empty_chanspec(&nphy->rssical_chanspec_5G);
3098 3212
3099 if (do_rssi_cal) 3213 if (do_rssi_cal)
3100 b43_nphy_rssi_cal(dev); 3214 b43_nphy_rssi_cal(dev);
@@ -3106,9 +3220,9 @@ int b43_phy_initn(struct b43_wldev *dev)
3106 3220
3107 if (!((nphy->measure_hold & 0x6) != 0)) { 3221 if (!((nphy->measure_hold & 0x6) != 0)) {
3108 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3222 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3109 do_cal = (nphy->iqcal_chanspec_2G == 0); 3223 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G);
3110 else 3224 else
3111 do_cal = (nphy->iqcal_chanspec_5G == 0); 3225 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G);
3112 3226
3113 if (nphy->mute) 3227 if (nphy->mute)
3114 do_cal = false; 3228 do_cal = false;
@@ -3117,7 +3231,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3117 target = b43_nphy_get_tx_gains(dev); 3231 target = b43_nphy_get_tx_gains(dev);
3118 3232
3119 if (nphy->antsel_type == 2) 3233 if (nphy->antsel_type == 2)
3120 ;/*TODO NPHY Superswitch Init with argument 1*/ 3234 b43_nphy_superswitch_init(dev, true);
3121 if (nphy->perical != 2) { 3235 if (nphy->perical != 2) {
3122 b43_nphy_rssi_cal(dev); 3236 b43_nphy_rssi_cal(dev);
3123 if (phy->rev >= 3) { 3237 if (phy->rev >= 3) {
@@ -3155,6 +3269,133 @@ int b43_phy_initn(struct b43_wldev *dev)
3155 return 0; 3269 return 0;
3156} 3270}
3157 3271
3272/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
3273static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
3274 const struct b43_phy_n_sfo_cfg *e,
3275 struct b43_chanspec chanspec)
3276{
3277 struct b43_phy *phy = &dev->phy;
3278 struct b43_phy_n *nphy = dev->phy.n;
3279
3280 u16 tmp;
3281 u32 tmp32;
3282
3283 tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
3284 if (chanspec.b_freq == 1 && tmp == 0) {
3285 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3286 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3287 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
3288 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3289 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
3290 } else if (chanspec.b_freq == 1) {
3291 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
3292 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3293 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3294 b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000);
3295 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3296 }
3297
3298 b43_chantab_phy_upload(dev, e);
3299
3300 tmp = chanspec.channel;
3301 if (chanspec.b_freq == 1)
3302 tmp |= 0x0100;
3303 if (chanspec.b_width == 3)
3304 tmp |= 0x0200;
3305 b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
3306
3307 if (nphy->radio_chanspec.channel == 14) {
3308 b43_nphy_classifier(dev, 2, 0);
3309 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
3310 } else {
3311 b43_nphy_classifier(dev, 2, 2);
3312 if (chanspec.b_freq == 2)
3313 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
3314 }
3315
3316 if (nphy->txpwrctrl)
3317 b43_nphy_tx_power_fix(dev);
3318
3319 if (dev->phy.rev < 3)
3320 b43_nphy_adjust_lna_gain_table(dev);
3321
3322 b43_nphy_tx_lp_fbw(dev);
3323
3324 if (dev->phy.rev >= 3 && 0) {
3325 /* TODO */
3326 }
3327
3328 b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
3329
3330 if (phy->rev >= 3)
3331 b43_nphy_spur_workaround(dev);
3332}
3333
3334/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
3335static int b43_nphy_set_chanspec(struct b43_wldev *dev,
3336 struct b43_chanspec chanspec)
3337{
3338 struct b43_phy_n *nphy = dev->phy.n;
3339
3340 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
3341 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
3342
3343 u8 tmp;
3344 u8 channel = chanspec.channel;
3345
3346 if (dev->phy.rev >= 3) {
3347 /* TODO */
3348 tabent_r3 = NULL;
3349 if (!tabent_r3)
3350 return -ESRCH;
3351 } else {
3352 tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
3353 if (!tabent_r2)
3354 return -ESRCH;
3355 }
3356
3357 nphy->radio_chanspec = chanspec;
3358
3359 if (chanspec.b_width != nphy->b_width)
3360 ; /* TODO: BMAC BW Set (chanspec.b_width) */
3361
3362 /* TODO: use defines */
3363 if (chanspec.b_width == 3) {
3364 if (chanspec.sideband == 2)
3365 b43_phy_set(dev, B43_NPHY_RXCTL,
3366 B43_NPHY_RXCTL_BSELU20);
3367 else
3368 b43_phy_mask(dev, B43_NPHY_RXCTL,
3369 ~B43_NPHY_RXCTL_BSELU20);
3370 }
3371
3372 if (dev->phy.rev >= 3) {
3373 tmp = (chanspec.b_freq == 1) ? 4 : 0;
3374 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
3375 /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
3376 b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
3377 } else {
3378 tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
3379 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
3380 b43_radio_2055_setup(dev, tabent_r2);
3381 b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
3382 }
3383
3384 return 0;
3385}
3386
3387/* Tune the hardware to a new channel */
3388static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
3389{
3390 struct b43_phy_n *nphy = dev->phy.n;
3391
3392 struct b43_chanspec chanspec;
3393 chanspec = nphy->radio_chanspec;
3394 chanspec.channel = channel;
3395
3396 return b43_nphy_set_chanspec(dev, chanspec);
3397}
3398
3158static int b43_nphy_op_allocate(struct b43_wldev *dev) 3399static int b43_nphy_op_allocate(struct b43_wldev *dev)
3159{ 3400{
3160 struct b43_phy_n *nphy; 3401 struct b43_phy_n *nphy;
@@ -3243,9 +3484,43 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
3243 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); 3484 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
3244} 3485}
3245 3486
3487/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
3246static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 3488static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3247 bool blocked) 3489 bool blocked)
3248{//TODO 3490{
3491 struct b43_phy_n *nphy = dev->phy.n;
3492
3493 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
3494 b43err(dev->wl, "MAC not suspended\n");
3495
3496 if (blocked) {
3497 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
3498 ~B43_NPHY_RFCTL_CMD_CHIP0PU);
3499 if (dev->phy.rev >= 3) {
3500 b43_radio_mask(dev, 0x09, ~0x2);
3501
3502 b43_radio_write(dev, 0x204D, 0);
3503 b43_radio_write(dev, 0x2053, 0);
3504 b43_radio_write(dev, 0x2058, 0);
3505 b43_radio_write(dev, 0x205E, 0);
3506 b43_radio_mask(dev, 0x2062, ~0xF0);
3507 b43_radio_write(dev, 0x2064, 0);
3508
3509 b43_radio_write(dev, 0x304D, 0);
3510 b43_radio_write(dev, 0x3053, 0);
3511 b43_radio_write(dev, 0x3058, 0);
3512 b43_radio_write(dev, 0x305E, 0);
3513 b43_radio_mask(dev, 0x3062, ~0xF0);
3514 b43_radio_write(dev, 0x3064, 0);
3515 }
3516 } else {
3517 if (dev->phy.rev >= 3) {
3518 b43_radio_init2056(dev);
3519 b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
3520 } else {
3521 b43_radio_init2055(dev);
3522 }
3523 }
3249} 3524}
3250 3525
3251static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) 3526static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 403aad3f894f..8b6d570dd0aa 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -711,6 +711,8 @@
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */ 711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */ 712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
713 713
714#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
715#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
714 716
715 717
716/* Broadcom 2055 radio registers */ 718/* Broadcom 2055 radio registers */
@@ -924,6 +926,13 @@
924 926
925struct b43_wldev; 927struct b43_wldev;
926 928
929struct b43_chanspec {
930 u8 channel;
931 u8 sideband;
932 u8 b_width;
933 u8 b_freq;
934};
935
927struct b43_phy_n_iq_comp { 936struct b43_phy_n_iq_comp {
928 s16 a0; 937 s16 a0;
929 s16 b0; 938 s16 b0;
@@ -975,7 +984,8 @@ struct b43_phy_n {
975 u16 papd_epsilon_offset[2]; 984 u16 papd_epsilon_offset[2];
976 s32 preamble_override; 985 s32 preamble_override;
977 u32 bb_mult_save; 986 u32 bb_mult_save;
978 u16 radio_chanspec; 987 u8 b_width;
988 struct b43_chanspec radio_chanspec;
979 989
980 bool gain_boost; 990 bool gain_boost;
981 bool elna_gain_config; 991 bool elna_gain_config;
@@ -991,6 +1001,7 @@ struct b43_phy_n {
991 u16 txiqlocal_bestc[11]; 1001 u16 txiqlocal_bestc[11];
992 bool txiqlocal_coeffsvalid; 1002 bool txiqlocal_coeffsvalid;
993 struct b43_phy_n_txpwrindex txpwrindex[2]; 1003 struct b43_phy_n_txpwrindex txpwrindex[2];
1004 struct b43_chanspec txiqlocal_chanspec;
994 1005
995 u8 txrx_chain; 1006 u8 txrx_chain;
996 u16 tx_rx_cal_phy_saveregs[11]; 1007 u16 tx_rx_cal_phy_saveregs[11];
@@ -1006,12 +1017,12 @@ struct b43_phy_n {
1006 bool gband_spurwar_en; 1017 bool gband_spurwar_en;
1007 1018
1008 bool ipa2g_on; 1019 bool ipa2g_on;
1009 u8 iqcal_chanspec_2G; 1020 struct b43_chanspec iqcal_chanspec_2G;
1010 u8 rssical_chanspec_2G; 1021 struct b43_chanspec rssical_chanspec_2G;
1011 1022
1012 bool ipa5g_on; 1023 bool ipa5g_on;
1013 u8 iqcal_chanspec_5G; 1024 struct b43_chanspec iqcal_chanspec_5G;
1014 u8 rssical_chanspec_5G; 1025 struct b43_chanspec rssical_chanspec_5G;
1015 1026
1016 struct b43_phy_n_rssical_cache rssical_cache; 1027 struct b43_phy_n_rssical_cache rssical_cache;
1017 struct b43_phy_n_cal_cache cal_cache; 1028 struct b43_phy_n_cal_cache cal_cache;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index a00d509150f7..d96e870ab8fe 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
318 .radio_c2_tx_mxbgtrim = r21 318 .radio_c2_tx_mxbgtrim = r21
319 319
320#define PHYREGS(r0, r1, r2, r3, r4, r5) \ 320#define PHYREGS(r0, r1, r2, r3, r4, r5) \
321 .phy_bw1a = r0, \ 321 .phy_regs.phy_bw1a = r0, \
322 .phy_bw2 = r1, \ 322 .phy_regs.phy_bw2 = r1, \
323 .phy_bw3 = r2, \ 323 .phy_regs.phy_bw3 = r2, \
324 .phy_bw4 = r3, \ 324 .phy_regs.phy_bw4 = r3, \
325 .phy_bw5 = r4, \ 325 .phy_regs.phy_bw5 = r4, \
326 .phy_bw6 = r5 326 .phy_regs.phy_bw6 = r5
327 327
328static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = { 328static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
329 { .channel = 184, 329 { .channel = 184,
330 .freq = 4920, /* MHz */ 330 .freq = 4920, /* MHz */
331 .unk2 = 3280, 331 .unk2 = 3280,
@@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
1320 }, 1320 },
1321}; 1321};
1322 1322
1323const struct b43_nphy_channeltab_entry * 1323const struct b43_nphy_channeltab_entry_rev2 *
1324b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel) 1324b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
1325{ 1325{
1326 const struct b43_nphy_channeltab_entry *e; 1326 const struct b43_nphy_channeltab_entry_rev2 *e;
1327 unsigned int i; 1327 unsigned int i;
1328 1328
1329 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) { 1329 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9c1c6ecd3672..8fc1da9f8fe5 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -4,9 +4,22 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6 6
7struct b43_nphy_channeltab_entry { 7struct b43_phy_n_sfo_cfg {
8 u16 phy_bw1a;
9 u16 phy_bw2;
10 u16 phy_bw3;
11 u16 phy_bw4;
12 u16 phy_bw5;
13 u16 phy_bw6;
14};
15
16struct b43_nphy_channeltab_entry_rev2 {
8 /* The channel number */ 17 /* The channel number */
9 u8 channel; 18 u8 channel;
19 /* The channel frequency in MHz */
20 u16 freq;
21 /* An unknown value */
22 u16 unk2;
10 /* Radio register values on channelswitch */ 23 /* Radio register values on channelswitch */
11 u8 radio_pll_ref; 24 u8 radio_pll_ref;
12 u8 radio_rf_pllmod0; 25 u8 radio_rf_pllmod0;
@@ -31,16 +44,18 @@ struct b43_nphy_channeltab_entry {
31 u8 radio_c2_tx_pgapadtn; 44 u8 radio_c2_tx_pgapadtn;
32 u8 radio_c2_tx_mxbgtrim; 45 u8 radio_c2_tx_mxbgtrim;
33 /* PHY register values on channelswitch */ 46 /* PHY register values on channelswitch */
34 u16 phy_bw1a; 47 struct b43_phy_n_sfo_cfg phy_regs;
35 u16 phy_bw2; 48};
36 u16 phy_bw3; 49
37 u16 phy_bw4; 50struct b43_nphy_channeltab_entry_rev3 {
38 u16 phy_bw5; 51 /* The channel number */
39 u16 phy_bw6; 52 u8 channel;
40 /* The channel frequency in MHz */ 53 /* The channel frequency in MHz */
41 u16 freq; 54 u16 freq;
42 /* An unknown value */ 55 /* Radio register values on channelswitch */
43 u16 unk2; 56 /* TODO */
57 /* PHY register values on channelswitch */
58 struct b43_phy_n_sfo_cfg phy_regs;
44}; 59};
45 60
46 61
@@ -77,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
77 92
78/* Get the NPHY Channel Switch Table entry for a channel number. 93/* Get the NPHY Channel Switch Table entry for a channel number.
79 * Returns NULL on failure to find an entry. */ 94 * Returns NULL on failure to find an entry. */
80const struct b43_nphy_channeltab_entry * 95const struct b43_nphy_channeltab_entry_rev2 *
81b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel); 96b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
82 97
83 98
84/* The N-PHY tables. */ 99/* The N-PHY tables. */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index eda06529ef5f..e6b0528f3b52 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -610,7 +610,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
610 } 610 }
611 611
612 /* Link quality statistics */ 612 /* Link quality statistics */
613 status.noise = dev->stats.link_noise;
614 if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) { 613 if ((chanstat & B43_RX_CHAN_PHYTYPE) == B43_PHYTYPE_N) {
615// s8 rssi = max(rxhdr->power0, rxhdr->power1); 614// s8 rssi = max(rxhdr->power0, rxhdr->power1);
616 //TODO: Find out what the rssi value is (dBm or percentage?) 615 //TODO: Find out what the rssi value is (dBm or percentage?)
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index bb2dd9329aa0..1713f5f7a58b 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3482,6 +3482,23 @@ static int b43legacy_op_beacon_set_tim(struct ieee80211_hw *hw,
3482 return 0; 3482 return 0;
3483} 3483}
3484 3484
3485static int b43legacy_op_get_survey(struct ieee80211_hw *hw, int idx,
3486 struct survey_info *survey)
3487{
3488 struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
3489 struct b43legacy_wldev *dev = wl->current_dev;
3490 struct ieee80211_conf *conf = &hw->conf;
3491
3492 if (idx != 0)
3493 return -ENOENT;
3494
3495 survey->channel = conf->channel;
3496 survey->filled = SURVEY_INFO_NOISE_DBM;
3497 survey->noise = dev->stats.link_noise;
3498
3499 return 0;
3500}
3501
3485static const struct ieee80211_ops b43legacy_hw_ops = { 3502static const struct ieee80211_ops b43legacy_hw_ops = {
3486 .tx = b43legacy_op_tx, 3503 .tx = b43legacy_op_tx,
3487 .conf_tx = b43legacy_op_conf_tx, 3504 .conf_tx = b43legacy_op_conf_tx,
@@ -3494,6 +3511,7 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
3494 .start = b43legacy_op_start, 3511 .start = b43legacy_op_start,
3495 .stop = b43legacy_op_stop, 3512 .stop = b43legacy_op_stop,
3496 .set_tim = b43legacy_op_beacon_set_tim, 3513 .set_tim = b43legacy_op_beacon_set_tim,
3514 .get_survey = b43legacy_op_get_survey,
3497 .rfkill_poll = b43legacy_rfkill_poll, 3515 .rfkill_poll = b43legacy_rfkill_poll,
3498}; 3516};
3499 3517
@@ -3769,8 +3787,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
3769 3787
3770 /* fill hw info */ 3788 /* fill hw info */
3771 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS | 3789 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3772 IEEE80211_HW_SIGNAL_DBM | 3790 IEEE80211_HW_SIGNAL_DBM;
3773 IEEE80211_HW_NOISE_DBM;
3774 hw->wiphy->interface_modes = 3791 hw->wiphy->interface_modes =
3775 BIT(NL80211_IFTYPE_AP) | 3792 BIT(NL80211_IFTYPE_AP) |
3776 BIT(NL80211_IFTYPE_STATION) | 3793 BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 9c8882d9275e..7d177d97f1f7 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -548,7 +548,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
548 (phystat0 & B43legacy_RX_PHYST0_OFDM), 548 (phystat0 & B43legacy_RX_PHYST0_OFDM),
549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
551 status.noise = dev->stats.link_noise;
552 /* change to support A PHY */ 551 /* change to support A PHY */
553 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 552 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
554 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 553 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/hostap_80211_rx.c b/drivers/net/wireless/hostap/hostap_80211_rx.c
index f4c56121d387..e0b3e8d406b3 100644
--- a/drivers/net/wireless/hostap/hostap_80211_rx.c
+++ b/drivers/net/wireless/hostap/hostap_80211_rx.c
@@ -355,8 +355,7 @@ static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid,
355 list_del(&bss->list); 355 list_del(&bss->list);
356 local->num_bss_info--; 356 local->num_bss_info--;
357 } else { 357 } else {
358 bss = (struct hostap_bss_info *) 358 bss = kmalloc(sizeof(*bss), GFP_ATOMIC);
359 kmalloc(sizeof(*bss), GFP_ATOMIC);
360 if (bss == NULL) 359 if (bss == NULL)
361 return NULL; 360 return NULL;
362 } 361 }
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 7e72ac1de49b..231dbd77f5f5 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -349,7 +349,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
349 default: 349 default:
350 policy_txt = "unknown"; 350 policy_txt = "unknown";
351 break; 351 break;
352 }; 352 }
353 p += sprintf(p, "MAC policy: %s\n", policy_txt); 353 p += sprintf(p, "MAC policy: %s\n", policy_txt);
354 p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries); 354 p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
355 p += sprintf(p, "MAC list:\n"); 355 p += sprintf(p, "MAC list:\n");
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index a36501dbbe02..db72461c486b 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -39,7 +39,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry");
39 39
40/* struct local_info::hw_priv */ 40/* struct local_info::hw_priv */
41struct hostap_cs_priv { 41struct hostap_cs_priv {
42 dev_node_t node;
43 struct pcmcia_device *link; 42 struct pcmcia_device *link;
44 int sandisk_connectplus; 43 int sandisk_connectplus;
45}; 44};
@@ -556,15 +555,7 @@ static int prism2_config_check(struct pcmcia_device *p_dev,
556 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000; 555 p_dev->conf.Vpp = dflt->vpp1.param[CISTPL_POWER_VNOM] / 10000;
557 556
558 /* Do we need to allocate an interrupt? */ 557 /* Do we need to allocate an interrupt? */
559 if (cfg->irq.IRQInfo1 || dflt->irq.IRQInfo1) 558 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
560 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
561 else if (!(p_dev->conf.Attributes & CONF_ENABLE_IRQ)) {
562 /* At least Compaq WL200 does not have IRQInfo1 set,
563 * but it does not work without interrupts.. */
564 printk(KERN_WARNING "Config has no IRQ info, but trying to "
565 "enable IRQ anyway..\n");
566 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
567 }
568 559
569 /* IO window settings */ 560 /* IO window settings */
570 PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d " 561 PDEBUG(DEBUG_EXTRA, "IO window settings: cfg->io.nwin=%d "
@@ -633,21 +624,10 @@ static int prism2_config(struct pcmcia_device *link)
633 local = iface->local; 624 local = iface->local;
634 local->hw_priv = hw_priv; 625 local->hw_priv = hw_priv;
635 hw_priv->link = link; 626 hw_priv->link = link;
636 strcpy(hw_priv->node.dev_name, dev->name);
637 link->dev_node = &hw_priv->node;
638 627
639 /* 628 ret = pcmcia_request_irq(link, prism2_interrupt);
640 * Allocate an interrupt line. Note that this does not assign a 629 if (ret)
641 * handler to the interrupt, unless the 'Handler' member of the 630 goto failed;
642 * irq structure is initialized.
643 */
644 if (link->conf.Attributes & CONF_ENABLE_IRQ) {
645 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
646 link->irq.Handler = prism2_interrupt;
647 ret = pcmcia_request_irq(link, &link->irq);
648 if (ret)
649 goto failed;
650 }
651 631
652 /* 632 /*
653 * This actually configures the PCMCIA socket -- setting up 633 * This actually configures the PCMCIA socket -- setting up
@@ -658,7 +638,7 @@ static int prism2_config(struct pcmcia_device *link)
658 if (ret) 638 if (ret)
659 goto failed; 639 goto failed;
660 640
661 dev->irq = link->irq.AssignedIRQ; 641 dev->irq = link->irq;
662 dev->base_addr = link->io.BasePort1; 642 dev->base_addr = link->io.BasePort1;
663 643
664 /* Finally, report what we've done */ 644 /* Finally, report what we've done */
@@ -668,7 +648,7 @@ static int prism2_config(struct pcmcia_device *link)
668 printk(", Vpp %d.%d", link->conf.Vpp / 10, 648 printk(", Vpp %d.%d", link->conf.Vpp / 10,
669 link->conf.Vpp % 10); 649 link->conf.Vpp % 10);
670 if (link->conf.Attributes & CONF_ENABLE_IRQ) 650 if (link->conf.Attributes & CONF_ENABLE_IRQ)
671 printk(", irq %d", link->irq.AssignedIRQ); 651 printk(", irq %d", link->irq);
672 if (link->io.NumPorts1) 652 if (link->io.NumPorts1)
673 printk(", io 0x%04x-0x%04x", link->io.BasePort1, 653 printk(", io 0x%04x-0x%04x", link->io.BasePort1,
674 link->io.BasePort1+link->io.NumPorts1-1); 654 link->io.BasePort1+link->io.NumPorts1-1);
@@ -682,11 +662,9 @@ static int prism2_config(struct pcmcia_device *link)
682 sandisk_enable_wireless(dev); 662 sandisk_enable_wireless(dev);
683 663
684 ret = prism2_hw_config(dev, 1); 664 ret = prism2_hw_config(dev, 1);
685 if (!ret) { 665 if (!ret)
686 ret = hostap_hw_ready(dev); 666 ret = hostap_hw_ready(dev);
687 if (ret == 0 && local->ddev) 667
688 strcpy(hw_priv->node.dev_name, local->ddev->name);
689 }
690 return ret; 668 return ret;
691 669
692 failed: 670 failed:
diff --git a/drivers/net/wireless/hostap/hostap_download.c b/drivers/net/wireless/hostap/hostap_download.c
index 89d3849abfe0..e73bf739fd9b 100644
--- a/drivers/net/wireless/hostap/hostap_download.c
+++ b/drivers/net/wireless/hostap/hostap_download.c
@@ -744,7 +744,7 @@ static int prism2_download(local_info_t *local,
744 local->dev->name, param->dl_cmd); 744 local->dev->name, param->dl_cmd);
745 ret = -EINVAL; 745 ret = -EINVAL;
746 break; 746 break;
747 }; 747 }
748 748
749 out: 749 out:
750 if (ret == 0 && dl && 750 if (ret == 0 && dl &&
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 9a082308a9d4..a85e43a8d758 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3039,8 +3039,7 @@ static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p)
3039 p->length > 1024 || !p->pointer) 3039 p->length > 1024 || !p->pointer)
3040 return -EINVAL; 3040 return -EINVAL;
3041 3041
3042 param = (struct prism2_download_param *) 3042 param = kmalloc(p->length, GFP_KERNEL);
3043 kmalloc(p->length, GFP_KERNEL);
3044 if (param == NULL) 3043 if (param == NULL)
3045 return -ENOMEM; 3044 return -ENOMEM;
3046 3045
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a7748..0bd4dfa59a8a 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -174,6 +174,8 @@ that only one external action is invoked at a time.
174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" 174#define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver"
175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" 175#define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation"
176 176
177struct pm_qos_request_list *ipw2100_pm_qos_req;
178
177/* Debugging stuff */ 179/* Debugging stuff */
178#ifdef CONFIG_IPW2100_DEBUG 180#ifdef CONFIG_IPW2100_DEBUG
179#define IPW2100_RX_DEBUG /* Reception debugging */ 181#define IPW2100_RX_DEBUG /* Reception debugging */
@@ -1739,7 +1741,7 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1739 /* the ipw2100 hardware really doesn't want power management delays 1741 /* the ipw2100 hardware really doesn't want power management delays
1740 * longer than 175usec 1742 * longer than 175usec
1741 */ 1743 */
1742 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 175); 1744 pm_qos_update_request(ipw2100_pm_qos_req, 175);
1743 1745
1744 /* If the interrupt is enabled, turn it off... */ 1746 /* If the interrupt is enabled, turn it off... */
1745 spin_lock_irqsave(&priv->low_lock, flags); 1747 spin_lock_irqsave(&priv->low_lock, flags);
@@ -1887,8 +1889,7 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1887 ipw2100_disable_interrupts(priv); 1889 ipw2100_disable_interrupts(priv);
1888 spin_unlock_irqrestore(&priv->low_lock, flags); 1890 spin_unlock_irqrestore(&priv->low_lock, flags);
1889 1891
1890 pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 1892 pm_qos_update_request(ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE);
1891 PM_QOS_DEFAULT_VALUE);
1892 1893
1893 /* We have to signal any supplicant if we are disassociating */ 1894 /* We have to signal any supplicant if we are disassociating */
1894 if (associated) 1895 if (associated)
@@ -2140,7 +2141,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2140 DECLARE_SSID_BUF(ssid); 2141 DECLARE_SSID_BUF(ssid);
2141 2142
2142 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 2143 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
2143 "disassociated: '%s' %pM \n", 2144 "disassociated: '%s' %pM\n",
2144 print_ssid(ssid, priv->essid, priv->essid_len), 2145 print_ssid(ssid, priv->essid, priv->essid_len),
2145 priv->bssid); 2146 priv->bssid);
2146 2147
@@ -3239,7 +3240,6 @@ static void ipw2100_tx_send_data(struct ipw2100_priv *priv)
3239 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, 3240 IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX,
3240 txq->next); 3241 txq->next);
3241 } 3242 }
3242 return;
3243} 3243}
3244 3244
3245static void ipw2100_irq_tasklet(struct ipw2100_priv *priv) 3245static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
@@ -3285,7 +3285,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
3285 3285
3286 if (inta & IPW2100_INTA_PARITY_ERROR) { 3286 if (inta & IPW2100_INTA_PARITY_ERROR) {
3287 printk(KERN_ERR DRV_NAME 3287 printk(KERN_ERR DRV_NAME
3288 ": ***** PARITY ERROR INTERRUPT !!!! \n"); 3288 ": ***** PARITY ERROR INTERRUPT !!!!\n");
3289 priv->inta_other++; 3289 priv->inta_other++;
3290 write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR); 3290 write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
3291 } 3291 }
@@ -6102,7 +6102,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6102 .ndo_validate_addr = eth_validate_addr, 6102 .ndo_validate_addr = eth_validate_addr,
6103}; 6103};
6104 6104
6105/* Look into using netdev destructor to shutdown ieee80211? */ 6105/* Look into using netdev destructor to shutdown libipw? */
6106 6106
6107static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, 6107static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6108 void __iomem * base_addr, 6108 void __iomem * base_addr,
@@ -6112,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6112 struct ipw2100_priv *priv; 6112 struct ipw2100_priv *priv;
6113 struct net_device *dev; 6113 struct net_device *dev;
6114 6114
6115 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); 6115 dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
6116 if (!dev) 6116 if (!dev)
6117 return NULL; 6117 return NULL;
6118 priv = libipw_priv(dev); 6118 priv = libipw_priv(dev);
@@ -6425,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6425 sysfs_remove_group(&pci_dev->dev.kobj, 6425 sysfs_remove_group(&pci_dev->dev.kobj,
6426 &ipw2100_attribute_group); 6426 &ipw2100_attribute_group);
6427 6427
6428 free_ieee80211(dev, 0); 6428 free_libipw(dev, 0);
6429 pci_set_drvdata(pci_dev, NULL); 6429 pci_set_drvdata(pci_dev, NULL);
6430 } 6430 }
6431 6431
@@ -6483,10 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6483 if (dev->base_addr) 6483 if (dev->base_addr)
6484 iounmap((void __iomem *)dev->base_addr); 6484 iounmap((void __iomem *)dev->base_addr);
6485 6485
6486 /* wiphy_unregister needs to be here, before free_ieee80211 */ 6486 /* wiphy_unregister needs to be here, before free_libipw */
6487 wiphy_unregister(priv->ieee->wdev.wiphy); 6487 wiphy_unregister(priv->ieee->wdev.wiphy);
6488 kfree(priv->ieee->bg_band.channels); 6488 kfree(priv->ieee->bg_band.channels);
6489 free_ieee80211(dev, 0); 6489 free_libipw(dev, 0);
6490 } 6490 }
6491 6491
6492 pci_release_regions(pci_dev); 6492 pci_release_regions(pci_dev);
@@ -6669,7 +6669,7 @@ static int __init ipw2100_init(void)
6669 if (ret) 6669 if (ret)
6670 goto out; 6670 goto out;
6671 6671
6672 pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100", 6672 ipw2100_pm_qos_req = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY,
6673 PM_QOS_DEFAULT_VALUE); 6673 PM_QOS_DEFAULT_VALUE);
6674#ifdef CONFIG_IPW2100_DEBUG 6674#ifdef CONFIG_IPW2100_DEBUG
6675 ipw2100_debug_level = debug; 6675 ipw2100_debug_level = debug;
@@ -6692,7 +6692,7 @@ static void __exit ipw2100_exit(void)
6692 &driver_attr_debug_level); 6692 &driver_attr_debug_level);
6693#endif 6693#endif
6694 pci_unregister_driver(&ipw2100_pci_driver); 6694 pci_unregister_driver(&ipw2100_pci_driver);
6695 pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "ipw2100"); 6695 pm_qos_remove_request(ipw2100_pm_qos_req);
6696} 6696}
6697 6697
6698module_init(ipw2100_init); 6698module_init(ipw2100_init);
@@ -6753,7 +6753,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
6753 err = -EOPNOTSUPP; 6753 err = -EOPNOTSUPP;
6754 goto done; 6754 goto done;
6755 } else { /* Set the channel */ 6755 } else { /* Set the channel */
6756 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 6756 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
6757 err = ipw2100_set_channel(priv, fwrq->m, 0); 6757 err = ipw2100_set_channel(priv, fwrq->m, 0);
6758 } 6758 }
6759 6759
@@ -6782,7 +6782,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
6782 else 6782 else
6783 wrqu->freq.m = 0; 6783 wrqu->freq.m = 0;
6784 6784
6785 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 6785 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
6786 return 0; 6786 return 0;
6787 6787
6788} 6788}
@@ -6794,7 +6794,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
6794 struct ipw2100_priv *priv = libipw_priv(dev); 6794 struct ipw2100_priv *priv = libipw_priv(dev);
6795 int err = 0; 6795 int err = 0;
6796 6796
6797 IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode); 6797 IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
6798 6798
6799 if (wrqu->mode == priv->ieee->iw_mode) 6799 if (wrqu->mode == priv->ieee->iw_mode)
6800 return 0; 6800 return 0;
@@ -7149,7 +7149,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
7149 memset(priv->nick, 0, sizeof(priv->nick)); 7149 memset(priv->nick, 0, sizeof(priv->nick));
7150 memcpy(priv->nick, extra, wrqu->data.length); 7150 memcpy(priv->nick, extra, wrqu->data.length);
7151 7151
7152 IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick); 7152 IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
7153 7153
7154 return 0; 7154 return 0;
7155} 7155}
@@ -7168,7 +7168,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
7168 memcpy(extra, priv->nick, wrqu->data.length); 7168 memcpy(extra, priv->nick, wrqu->data.length);
7169 wrqu->data.flags = 1; /* active */ 7169 wrqu->data.flags = 1; /* active */
7170 7170
7171 IPW_DEBUG_WX("GET Nickname -> %s \n", extra); 7171 IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
7172 7172
7173 return 0; 7173 return 0;
7174} 7174}
@@ -7207,7 +7207,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
7207 7207
7208 err = ipw2100_set_tx_rates(priv, rate, 0); 7208 err = ipw2100_set_tx_rates(priv, rate, 0);
7209 7209
7210 IPW_DEBUG_WX("SET Rate -> %04X \n", rate); 7210 IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
7211 done: 7211 done:
7212 mutex_unlock(&priv->action_mutex); 7212 mutex_unlock(&priv->action_mutex);
7213 return err; 7213 return err;
@@ -7258,7 +7258,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7258 wrqu->bitrate.value = 0; 7258 wrqu->bitrate.value = 0;
7259 } 7259 }
7260 7260
7261 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 7261 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
7262 7262
7263 done: 7263 done:
7264 mutex_unlock(&priv->action_mutex); 7264 mutex_unlock(&priv->action_mutex);
@@ -7294,7 +7294,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
7294 7294
7295 err = ipw2100_set_rts_threshold(priv, value); 7295 err = ipw2100_set_rts_threshold(priv, value);
7296 7296
7297 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); 7297 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
7298 done: 7298 done:
7299 mutex_unlock(&priv->action_mutex); 7299 mutex_unlock(&priv->action_mutex);
7300 return err; 7300 return err;
@@ -7316,7 +7316,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
7316 /* If RTS is set to the default value, then it is disabled */ 7316 /* If RTS is set to the default value, then it is disabled */
7317 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0; 7317 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
7318 7318
7319 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value); 7319 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
7320 7320
7321 return 0; 7321 return 0;
7322} 7322}
@@ -7355,7 +7355,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
7355 7355
7356 err = ipw2100_set_tx_power(priv, value); 7356 err = ipw2100_set_tx_power(priv, value);
7357 7357
7358 IPW_DEBUG_WX("SET TX Power -> %d \n", value); 7358 IPW_DEBUG_WX("SET TX Power -> %d\n", value);
7359 7359
7360 done: 7360 done:
7361 mutex_unlock(&priv->action_mutex); 7361 mutex_unlock(&priv->action_mutex);
@@ -7384,7 +7384,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
7384 7384
7385 wrqu->txpower.flags = IW_TXPOW_DBM; 7385 wrqu->txpower.flags = IW_TXPOW_DBM;
7386 7386
7387 IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value); 7387 IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
7388 7388
7389 return 0; 7389 return 0;
7390} 7390}
@@ -7414,7 +7414,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
7414 priv->frag_threshold = priv->ieee->fts; 7414 priv->frag_threshold = priv->ieee->fts;
7415 } 7415 }
7416 7416
7417 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts); 7417 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
7418 7418
7419 return 0; 7419 return 0;
7420} 7420}
@@ -7432,7 +7432,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
7432 wrqu->frag.fixed = 0; /* no auto select */ 7432 wrqu->frag.fixed = 0; /* no auto select */
7433 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0; 7433 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
7434 7434
7435 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 7435 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
7436 7436
7437 return 0; 7437 return 0;
7438} 7438}
@@ -7458,14 +7458,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7458 7458
7459 if (wrqu->retry.flags & IW_RETRY_SHORT) { 7459 if (wrqu->retry.flags & IW_RETRY_SHORT) {
7460 err = ipw2100_set_short_retry(priv, wrqu->retry.value); 7460 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7461 IPW_DEBUG_WX("SET Short Retry Limit -> %d \n", 7461 IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
7462 wrqu->retry.value); 7462 wrqu->retry.value);
7463 goto done; 7463 goto done;
7464 } 7464 }
7465 7465
7466 if (wrqu->retry.flags & IW_RETRY_LONG) { 7466 if (wrqu->retry.flags & IW_RETRY_LONG) {
7467 err = ipw2100_set_long_retry(priv, wrqu->retry.value); 7467 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7468 IPW_DEBUG_WX("SET Long Retry Limit -> %d \n", 7468 IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
7469 wrqu->retry.value); 7469 wrqu->retry.value);
7470 goto done; 7470 goto done;
7471 } 7471 }
@@ -7474,7 +7474,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7474 if (!err) 7474 if (!err)
7475 err = ipw2100_set_long_retry(priv, wrqu->retry.value); 7475 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7476 7476
7477 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); 7477 IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
7478 7478
7479 done: 7479 done:
7480 mutex_unlock(&priv->action_mutex); 7480 mutex_unlock(&priv->action_mutex);
@@ -7508,7 +7508,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
7508 wrqu->retry.value = priv->short_retry_limit; 7508 wrqu->retry.value = priv->short_retry_limit;
7509 } 7509 }
7510 7510
7511 IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value); 7511 IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
7512 7512
7513 return 0; 7513 return 0;
7514} 7514}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d72e3d19586..3aa3bb18f615 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
459{ 459{
460 u32 word; 460 u32 word;
461 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 461 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
462 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); 462 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
463 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 463 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
464 return (word >> ((reg & 0x3) * 8)) & 0xff; 464 return (word >> ((reg & 0x3) * 8)) & 0xff;
465} 465}
@@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
473 473
474 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 474 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
475 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 475 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
476 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); 476 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
477 return value; 477 return value;
478} 478}
479 479
@@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
2349 mutex_unlock(&priv->mutex); 2349 mutex_unlock(&priv->mutex);
2350} 2350}
2351 2351
2352#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2352static void ipw_abort_scan(struct ipw_priv *priv);
2353
2354#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2353 2355
2354static void ipw_scan_check(void *data) 2356static void ipw_scan_check(void *data)
2355{ 2357{
2356 struct ipw_priv *priv = data; 2358 struct ipw_priv *priv = data;
2357 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 2359
2360 if (priv->status & STATUS_SCAN_ABORTING) {
2358 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2361 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2359 "adapter after (%dms).\n", 2362 "adapter after (%dms).\n",
2360 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2363 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2361 queue_work(priv->workqueue, &priv->adapter_restart); 2364 queue_work(priv->workqueue, &priv->adapter_restart);
2365 } else if (priv->status & STATUS_SCANNING) {
2366 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2367 "after (%dms).\n",
2368 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2369 ipw_abort_scan(priv);
2370 queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
2362 } 2371 }
2363} 2372}
2364 2373
@@ -2598,8 +2607,6 @@ static inline void eeprom_write_reg(struct ipw_priv *p, u32 data)
2598 2607
2599 /* the eeprom requires some time to complete the operation */ 2608 /* the eeprom requires some time to complete the operation */
2600 udelay(p->eeprom_delay); 2609 udelay(p->eeprom_delay);
2601
2602 return;
2603} 2610}
2604 2611
2605/* perform a chip select operation */ 2612/* perform a chip select operation */
@@ -2739,7 +2746,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2739static int ipw_fw_dma_enable(struct ipw_priv *priv) 2746static int ipw_fw_dma_enable(struct ipw_priv *priv)
2740{ /* start dma engine but no transfers yet */ 2747{ /* start dma engine but no transfers yet */
2741 2748
2742 IPW_DEBUG_FW(">> : \n"); 2749 IPW_DEBUG_FW(">> :\n");
2743 2750
2744 /* Start the dma */ 2751 /* Start the dma */
2745 ipw_fw_dma_reset_command_blocks(priv); 2752 ipw_fw_dma_reset_command_blocks(priv);
@@ -2747,7 +2754,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
2747 /* Write CB base address */ 2754 /* Write CB base address */
2748 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2755 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2749 2756
2750 IPW_DEBUG_FW("<< : \n"); 2757 IPW_DEBUG_FW("<< :\n");
2751 return 0; 2758 return 0;
2752} 2759}
2753 2760
@@ -2762,7 +2769,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
2762 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2769 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2763 priv->sram_desc.last_cb_index = 0; 2770 priv->sram_desc.last_cb_index = 0;
2764 2771
2765 IPW_DEBUG_FW("<< \n"); 2772 IPW_DEBUG_FW("<<\n");
2766} 2773}
2767 2774
2768static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2775static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@@ -2813,29 +2820,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2813 2820
2814 IPW_DEBUG_FW(">> :\n"); 2821 IPW_DEBUG_FW(">> :\n");
2815 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2822 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2816 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address); 2823 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2817 2824
2818 /* Read the DMA Controlor register */ 2825 /* Read the DMA Controlor register */
2819 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2826 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2820 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value); 2827 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2821 2828
2822 /* Print the CB values */ 2829 /* Print the CB values */
2823 cb_fields_address = address; 2830 cb_fields_address = address;
2824 register_value = ipw_read_reg32(priv, cb_fields_address); 2831 register_value = ipw_read_reg32(priv, cb_fields_address);
2825 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value); 2832 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2826 2833
2827 cb_fields_address += sizeof(u32); 2834 cb_fields_address += sizeof(u32);
2828 register_value = ipw_read_reg32(priv, cb_fields_address); 2835 register_value = ipw_read_reg32(priv, cb_fields_address);
2829 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value); 2836 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2830 2837
2831 cb_fields_address += sizeof(u32); 2838 cb_fields_address += sizeof(u32);
2832 register_value = ipw_read_reg32(priv, cb_fields_address); 2839 register_value = ipw_read_reg32(priv, cb_fields_address);
2833 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", 2840 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2834 register_value); 2841 register_value);
2835 2842
2836 cb_fields_address += sizeof(u32); 2843 cb_fields_address += sizeof(u32);
2837 register_value = ipw_read_reg32(priv, cb_fields_address); 2844 register_value = ipw_read_reg32(priv, cb_fields_address);
2838 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value); 2845 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2839 2846
2840 IPW_DEBUG_FW(">> :\n"); 2847 IPW_DEBUG_FW(">> :\n");
2841} 2848}
@@ -2851,7 +2858,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2851 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2858 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2852 sizeof(struct command_block); 2859 sizeof(struct command_block);
2853 2860
2854 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", 2861 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2855 current_cb_index, current_cb_address); 2862 current_cb_index, current_cb_address);
2856 2863
2857 IPW_DEBUG_FW(">> :\n"); 2864 IPW_DEBUG_FW(">> :\n");
@@ -2910,7 +2917,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2910 int ret, i; 2917 int ret, i;
2911 u32 size; 2918 u32 size;
2912 2919
2913 IPW_DEBUG_FW(">> \n"); 2920 IPW_DEBUG_FW(">>\n");
2914 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2921 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2915 nr, dest_address, len); 2922 nr, dest_address, len);
2916 2923
@@ -2927,7 +2934,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2927 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2934 IPW_DEBUG_FW_INFO(": Added new cb\n");
2928 } 2935 }
2929 2936
2930 IPW_DEBUG_FW("<< \n"); 2937 IPW_DEBUG_FW("<<\n");
2931 return 0; 2938 return 0;
2932} 2939}
2933 2940
@@ -2936,7 +2943,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
2936 u32 current_index = 0, previous_index; 2943 u32 current_index = 0, previous_index;
2937 u32 watchdog = 0; 2944 u32 watchdog = 0;
2938 2945
2939 IPW_DEBUG_FW(">> : \n"); 2946 IPW_DEBUG_FW(">> :\n");
2940 2947
2941 current_index = ipw_fw_dma_command_block_index(priv); 2948 current_index = ipw_fw_dma_command_block_index(priv);
2942 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2949 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@@ -2965,7 +2972,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
2965 ipw_set_bit(priv, IPW_RESET_REG, 2972 ipw_set_bit(priv, IPW_RESET_REG,
2966 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2973 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2967 2974
2968 IPW_DEBUG_FW("<< dmaWaitSync \n"); 2975 IPW_DEBUG_FW("<< dmaWaitSync\n");
2969 return 0; 2976 return 0;
2970} 2977}
2971 2978
@@ -3026,7 +3033,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
3026{ 3033{
3027 int rc; 3034 int rc;
3028 3035
3029 IPW_DEBUG_TRACE(">> \n"); 3036 IPW_DEBUG_TRACE(">>\n");
3030 /* stop master. typical delay - 0 */ 3037 /* stop master. typical delay - 0 */
3031 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3038 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3032 3039
@@ -3045,7 +3052,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
3045 3052
3046static void ipw_arc_release(struct ipw_priv *priv) 3053static void ipw_arc_release(struct ipw_priv *priv)
3047{ 3054{
3048 IPW_DEBUG_TRACE(">> \n"); 3055 IPW_DEBUG_TRACE(">>\n");
3049 mdelay(5); 3056 mdelay(5);
3050 3057
3051 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3058 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@@ -3067,7 +3074,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3067 3074
3068 image = (__le16 *) data; 3075 image = (__le16 *) data;
3069 3076
3070 IPW_DEBUG_TRACE(">> \n"); 3077 IPW_DEBUG_TRACE(">>\n");
3071 3078
3072 rc = ipw_stop_master(priv); 3079 rc = ipw_stop_master(priv);
3073 3080
@@ -3181,7 +3188,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3181 void **virts; 3188 void **virts;
3182 dma_addr_t *phys; 3189 dma_addr_t *phys;
3183 3190
3184 IPW_DEBUG_TRACE("<< : \n"); 3191 IPW_DEBUG_TRACE("<< :\n");
3185 3192
3186 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL, 3193 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3187 GFP_KERNEL); 3194 GFP_KERNEL);
@@ -4482,7 +4489,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4482 case CMAS_ASSOCIATED:{ 4489 case CMAS_ASSOCIATED:{
4483 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4490 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4484 IPW_DL_ASSOC, 4491 IPW_DL_ASSOC,
4485 "associated: '%s' %pM \n", 4492 "associated: '%s' %pM\n",
4486 print_ssid(ssid, priv->essid, 4493 print_ssid(ssid, priv->essid,
4487 priv->essid_len), 4494 priv->essid_len),
4488 priv->bssid); 4495 priv->bssid);
@@ -4563,7 +4570,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4563 IPW_DL_ASSOC, 4570 IPW_DL_ASSOC,
4564 "deauthenticated: '%s' " 4571 "deauthenticated: '%s' "
4565 "%pM" 4572 "%pM"
4566 ": (0x%04X) - %s \n", 4573 ": (0x%04X) - %s\n",
4567 print_ssid(ssid, 4574 print_ssid(ssid,
4568 priv-> 4575 priv->
4569 essid, 4576 essid,
@@ -4614,7 +4621,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4614 4621
4615 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4622 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4616 IPW_DL_ASSOC, 4623 IPW_DL_ASSOC,
4617 "disassociated: '%s' %pM \n", 4624 "disassociated: '%s' %pM\n",
4618 print_ssid(ssid, priv->essid, 4625 print_ssid(ssid, priv->essid,
4619 priv->essid_len), 4626 priv->essid_len),
4620 priv->bssid); 4627 priv->bssid);
@@ -4652,7 +4659,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4652 switch (auth->state) { 4659 switch (auth->state) {
4653 case CMAS_AUTHENTICATED: 4660 case CMAS_AUTHENTICATED:
4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4661 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4655 "authenticated: '%s' %pM \n", 4662 "authenticated: '%s' %pM\n",
4656 print_ssid(ssid, priv->essid, 4663 print_ssid(ssid, priv->essid,
4657 priv->essid_len), 4664 priv->essid_len),
4658 priv->bssid); 4665 priv->bssid);
@@ -6925,7 +6932,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6925 } else { 6932 } else {
6926 mode = priv->ieee->mode; 6933 mode = priv->ieee->mode;
6927 } 6934 }
6928 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode); 6935 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6929 return mode; 6936 return mode;
6930} 6937}
6931 6938
@@ -6965,7 +6972,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6965 &def_parameters_OFDM, size); 6972 &def_parameters_OFDM, size);
6966 6973
6967 if ((network->qos_data.active == 1) && (active_network == 1)) { 6974 if ((network->qos_data.active == 1) && (active_network == 1)) {
6968 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n"); 6975 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6969 schedule_work(&priv->qos_activate); 6976 schedule_work(&priv->qos_activate);
6970 } 6977 }
6971 6978
@@ -7542,7 +7549,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
7542 return err; 7549 return err;
7543 } 7550 }
7544 7551
7545 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n", 7552 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7546 print_ssid(ssid, priv->essid, priv->essid_len), 7553 print_ssid(ssid, priv->essid, priv->essid_len),
7547 priv->bssid); 7554 priv->bssid);
7548 7555
@@ -8793,7 +8800,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
8793 } 8800 }
8794 } 8801 }
8795 8802
8796 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 8803 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8797 mutex_lock(&priv->mutex); 8804 mutex_lock(&priv->mutex);
8798 ret = ipw_set_channel(priv, channel); 8805 ret = ipw_set_channel(priv, channel);
8799 mutex_unlock(&priv->mutex); 8806 mutex_unlock(&priv->mutex);
@@ -8835,7 +8842,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
8835 wrqu->freq.m = 0; 8842 wrqu->freq.m = 0;
8836 8843
8837 mutex_unlock(&priv->mutex); 8844 mutex_unlock(&priv->mutex);
8838 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 8845 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8839 return 0; 8846 return 0;
8840} 8847}
8841 8848
@@ -9230,7 +9237,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
9230 wrqu->sens.value = priv->roaming_threshold; 9237 wrqu->sens.value = priv->roaming_threshold;
9231 mutex_unlock(&priv->mutex); 9238 mutex_unlock(&priv->mutex);
9232 9239
9233 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", 9240 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9234 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9241 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9235 9242
9236 return 0; 9243 return 0;
@@ -9358,7 +9365,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
9358 wrqu->bitrate.value = priv->last_rate; 9365 wrqu->bitrate.value = priv->last_rate;
9359 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9366 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9360 mutex_unlock(&priv->mutex); 9367 mutex_unlock(&priv->mutex);
9361 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 9368 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9362 return 0; 9369 return 0;
9363} 9370}
9364 9371
@@ -9381,7 +9388,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
9381 9388
9382 ipw_send_rts_threshold(priv, priv->rts_threshold); 9389 ipw_send_rts_threshold(priv, priv->rts_threshold);
9383 mutex_unlock(&priv->mutex); 9390 mutex_unlock(&priv->mutex);
9384 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); 9391 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9385 return 0; 9392 return 0;
9386} 9393}
9387 9394
@@ -9395,7 +9402,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
9395 wrqu->rts.fixed = 0; /* no auto select */ 9402 wrqu->rts.fixed = 0; /* no auto select */
9396 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9403 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9397 mutex_unlock(&priv->mutex); 9404 mutex_unlock(&priv->mutex);
9398 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 9405 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9399 return 0; 9406 return 0;
9400} 9407}
9401 9408
@@ -9445,7 +9452,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
9445 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9452 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9446 mutex_unlock(&priv->mutex); 9453 mutex_unlock(&priv->mutex);
9447 9454
9448 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 9455 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9449 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9456 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9450 9457
9451 return 0; 9458 return 0;
@@ -9471,7 +9478,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
9471 9478
9472 ipw_send_frag_threshold(priv, wrqu->frag.value); 9479 ipw_send_frag_threshold(priv, wrqu->frag.value);
9473 mutex_unlock(&priv->mutex); 9480 mutex_unlock(&priv->mutex);
9474 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); 9481 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9475 return 0; 9482 return 0;
9476} 9483}
9477 9484
@@ -9485,7 +9492,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
9485 wrqu->frag.fixed = 0; /* no auto select */ 9492 wrqu->frag.fixed = 0; /* no auto select */
9486 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9493 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9487 mutex_unlock(&priv->mutex); 9494 mutex_unlock(&priv->mutex);
9488 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 9495 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9489 9496
9490 return 0; 9497 return 0;
9491} 9498}
@@ -9549,7 +9556,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
9549 } 9556 }
9550 mutex_unlock(&priv->mutex); 9557 mutex_unlock(&priv->mutex);
9551 9558
9552 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); 9559 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9553 9560
9554 return 0; 9561 return 0;
9555} 9562}
@@ -9996,49 +10003,48 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9996} 10003}
9997 10004
9998/* Rebase the WE IOCTLs to zero for the handler array */ 10005/* Rebase the WE IOCTLs to zero for the handler array */
9999#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
10000static iw_handler ipw_wx_handlers[] = { 10006static iw_handler ipw_wx_handlers[] = {
10001 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 10007 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10002 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 10008 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10003 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 10009 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10004 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 10010 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10005 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 10011 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10006 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, 10012 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10007 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, 10013 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10008 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 10014 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10009 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 10015 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10010 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 10016 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10011 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, 10017 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10012 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, 10018 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10013 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, 10019 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10014 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, 10020 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10015 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, 10021 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10016 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, 10022 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10017 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, 10023 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10018 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, 10024 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10019 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, 10025 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10020 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, 10026 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10021 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, 10027 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10022 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, 10028 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10023 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, 10029 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10024 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, 10030 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10025 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, 10031 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10026 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, 10032 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10027 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, 10033 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10028 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, 10034 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10029 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, 10035 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10030 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, 10036 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10031 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy, 10037 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10032 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy, 10038 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10033 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy, 10039 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10034 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy, 10040 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10035 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie, 10041 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10036 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie, 10042 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10037 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme, 10043 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10038 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth, 10044 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10039 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth, 10045 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10040 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext, 10046 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10041 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext, 10047 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10042}; 10048};
10043 10049
10044enum { 10050enum {
@@ -11667,7 +11673,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11667 if (priv->prom_net_dev) 11673 if (priv->prom_net_dev)
11668 return -EPERM; 11674 return -EPERM;
11669 11675
11670 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); 11676 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11671 if (priv->prom_net_dev == NULL) 11677 if (priv->prom_net_dev == NULL)
11672 return -ENOMEM; 11678 return -ENOMEM;
11673 11679
@@ -11686,7 +11692,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11686 11692
11687 rc = register_netdev(priv->prom_net_dev); 11693 rc = register_netdev(priv->prom_net_dev);
11688 if (rc) { 11694 if (rc) {
11689 free_ieee80211(priv->prom_net_dev, 1); 11695 free_libipw(priv->prom_net_dev, 1);
11690 priv->prom_net_dev = NULL; 11696 priv->prom_net_dev = NULL;
11691 return rc; 11697 return rc;
11692 } 11698 }
@@ -11700,7 +11706,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11700 return; 11706 return;
11701 11707
11702 unregister_netdev(priv->prom_net_dev); 11708 unregister_netdev(priv->prom_net_dev);
11703 free_ieee80211(priv->prom_net_dev, 1); 11709 free_libipw(priv->prom_net_dev, 1);
11704 11710
11705 priv->prom_net_dev = NULL; 11711 priv->prom_net_dev = NULL;
11706} 11712}
@@ -11728,7 +11734,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11728 struct ipw_priv *priv; 11734 struct ipw_priv *priv;
11729 int i; 11735 int i;
11730 11736
11731 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); 11737 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11732 if (net_dev == NULL) { 11738 if (net_dev == NULL) {
11733 err = -ENOMEM; 11739 err = -ENOMEM;
11734 goto out; 11740 goto out;
@@ -11748,7 +11754,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11748 mutex_init(&priv->mutex); 11754 mutex_init(&priv->mutex);
11749 if (pci_enable_device(pdev)) { 11755 if (pci_enable_device(pdev)) {
11750 err = -ENODEV; 11756 err = -ENODEV;
11751 goto out_free_ieee80211; 11757 goto out_free_libipw;
11752 } 11758 }
11753 11759
11754 pci_set_master(pdev); 11760 pci_set_master(pdev);
@@ -11875,8 +11881,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11875 out_pci_disable_device: 11881 out_pci_disable_device:
11876 pci_disable_device(pdev); 11882 pci_disable_device(pdev);
11877 pci_set_drvdata(pdev, NULL); 11883 pci_set_drvdata(pdev, NULL);
11878 out_free_ieee80211: 11884 out_free_libipw:
11879 free_ieee80211(priv->net_dev, 0); 11885 free_libipw(priv->net_dev, 0);
11880 out: 11886 out:
11881 return err; 11887 return err;
11882} 11888}
@@ -11943,11 +11949,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11943 pci_release_regions(pdev); 11949 pci_release_regions(pdev);
11944 pci_disable_device(pdev); 11950 pci_disable_device(pdev);
11945 pci_set_drvdata(pdev, NULL); 11951 pci_set_drvdata(pdev, NULL);
11946 /* wiphy_unregister needs to be here, before free_ieee80211 */ 11952 /* wiphy_unregister needs to be here, before free_libipw */
11947 wiphy_unregister(priv->ieee->wdev.wiphy); 11953 wiphy_unregister(priv->ieee->wdev.wiphy);
11948 kfree(priv->ieee->a_band.channels); 11954 kfree(priv->ieee->a_band.channels);
11949 kfree(priv->ieee->bg_band.channels); 11955 kfree(priv->ieee->bg_band.channels);
11950 free_ieee80211(priv->net_dev, 0); 11956 free_libipw(priv->net_dev, 0);
11951 free_firmware(); 11957 free_firmware();
11952} 11958}
11953 11959
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index a6d5e42647e4..284b0e4cb815 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -64,7 +64,7 @@
64extern u32 libipw_debug_level; 64extern u32 libipw_debug_level;
65#define LIBIPW_DEBUG(level, fmt, args...) \ 65#define LIBIPW_DEBUG(level, fmt, args...) \
66do { if (libipw_debug_level & (level)) \ 66do { if (libipw_debug_level & (level)) \
67 printk(KERN_DEBUG "ieee80211: %c %s " fmt, \ 67 printk(KERN_DEBUG "libipw: %c %s " fmt, \
68 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) 68 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
69static inline bool libipw_ratelimit_debug(u32 level) 69static inline bool libipw_ratelimit_debug(u32 level)
70{ 70{
@@ -116,8 +116,8 @@ static inline bool libipw_ratelimit_debug(u32 level)
116#define LIBIPW_DL_RX (1<<9) 116#define LIBIPW_DL_RX (1<<9)
117#define LIBIPW_DL_QOS (1<<31) 117#define LIBIPW_DL_QOS (1<<31)
118 118
119#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) 119#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
120#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) 120#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
121#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a) 121#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
122 122
123#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a) 123#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
@@ -905,7 +905,7 @@ struct libipw_device {
905 struct libipw_reassoc_request * req); 905 struct libipw_reassoc_request * req);
906 906
907 /* This must be the last item so that it points to the data 907 /* This must be the last item so that it points to the data
908 * allocated beyond this structure by alloc_ieee80211 */ 908 * allocated beyond this structure by alloc_libipw */
909 u8 priv[0]; 909 u8 priv[0];
910}; 910};
911 911
@@ -1017,9 +1017,9 @@ static inline int libipw_is_cck_rate(u8 rate)
1017 return 0; 1017 return 0;
1018} 1018}
1019 1019
1020/* ieee80211.c */ 1020/* libipw.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1021extern void free_libipw(struct net_device *dev, int monitor);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1022extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1024
1025extern void libipw_networks_age(struct libipw_device *ieee, 1025extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 2fa55867bd8b..55965408ff3f 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -53,7 +53,7 @@
53#include "libipw.h" 53#include "libipw.h"
54 54
55#define DRV_DESCRIPTION "802.11 data/management/control stack" 55#define DRV_DESCRIPTION "802.11 data/management/control stack"
56#define DRV_NAME "ieee80211" 56#define DRV_NAME "libipw"
57#define DRV_VERSION LIBIPW_VERSION 57#define DRV_VERSION LIBIPW_VERSION
58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>" 58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
59 59
@@ -140,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
140} 140}
141EXPORT_SYMBOL(libipw_change_mtu); 141EXPORT_SYMBOL(libipw_change_mtu);
142 142
143struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) 143struct net_device *alloc_libipw(int sizeof_priv, int monitor)
144{ 144{
145 struct libipw_device *ieee; 145 struct libipw_device *ieee;
146 struct net_device *dev; 146 struct net_device *dev;
@@ -222,8 +222,9 @@ failed_free_netdev:
222failed: 222failed:
223 return NULL; 223 return NULL;
224} 224}
225EXPORT_SYMBOL(alloc_libipw);
225 226
226void free_ieee80211(struct net_device *dev, int monitor) 227void free_libipw(struct net_device *dev, int monitor)
227{ 228{
228 struct libipw_device *ieee = netdev_priv(dev); 229 struct libipw_device *ieee = netdev_priv(dev);
229 230
@@ -237,6 +238,7 @@ void free_ieee80211(struct net_device *dev, int monitor)
237 238
238 free_netdev(dev); 239 free_netdev(dev);
239} 240}
241EXPORT_SYMBOL(free_libipw);
240 242
241#ifdef CONFIG_LIBIPW_DEBUG 243#ifdef CONFIG_LIBIPW_DEBUG
242 244
@@ -291,7 +293,7 @@ static int __init libipw_init(void)
291 struct proc_dir_entry *e; 293 struct proc_dir_entry *e;
292 294
293 libipw_debug_level = debug; 295 libipw_debug_level = debug;
294 libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net); 296 libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
295 if (libipw_proc == NULL) { 297 if (libipw_proc == NULL) {
296 LIBIPW_ERROR("Unable to create " DRV_NAME 298 LIBIPW_ERROR("Unable to create " DRV_NAME
297 " proc directory\n"); 299 " proc directory\n");
@@ -331,6 +333,3 @@ MODULE_PARM_DESC(debug, "debug output mask");
331 333
332module_exit(libipw_exit); 334module_exit(libipw_exit);
333module_init(libipw_init); 335module_init(libipw_init);
334
335EXPORT_SYMBOL(alloc_ieee80211);
336EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
index 39a34da52d52..0de1b1893220 100644
--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
@@ -918,7 +918,6 @@ void libipw_rx_any(struct libipw_device *ieee,
918drop_free: 918drop_free:
919 dev_kfree_skb_irq(skb); 919 dev_kfree_skb_irq(skb);
920 ieee->dev->stats.rx_dropped++; 920 ieee->dev->stats.rx_dropped++;
921 return;
922} 921}
923 922
924#define MGMT_FRAME_FIXED_PART_LENGTH 0x24 923#define MGMT_FRAME_FIXED_PART_LENGTH 0x24
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4e378faee650..7c7235385513 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,7 +9,10 @@ CFLAGS_iwl-devtrace.o := -I$(src)
9 9
10# AGN 10# AGN
11obj-$(CONFIG_IWLAGN) += iwlagn.o 11obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o
15iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
13 16
14iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 17iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
15iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 18iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
@@ -19,5 +22,6 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
19# 3945 22# 3945
20obj-$(CONFIG_IWL3945) += iwl3945.o 23obj-$(CONFIG_IWL3945) += iwl3945.o
21iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 24iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
25iwl3945-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-3945-debugfs.o
22 26
23ccflags-y += -D__CHECK_ENDIAN__ 27ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 3bf2e6e9b2d9..6be2992f8f21 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -42,9 +42,11 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-agn.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 47#include "iwl-agn-hw.h"
47#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn-debugfs.h"
48 50
49/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
50#define IWL1000_UCODE_API_MAX 3 52#define IWL1000_UCODE_API_MAX 3
@@ -117,7 +119,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
117static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) 119static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
118{ 120{
119 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 121 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
120 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 122 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
121 priv->cfg->num_of_queues = 123 priv->cfg->num_of_queues =
122 priv->cfg->mod_params->num_of_queues; 124 priv->cfg->mod_params->num_of_queues;
123 125
@@ -125,13 +127,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
125 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 127 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
126 priv->hw_params.scd_bc_tbls_size = 128 priv->hw_params.scd_bc_tbls_size =
127 priv->cfg->num_of_queues * 129 priv->cfg->num_of_queues *
128 sizeof(struct iwl5000_scd_bc_tbl); 130 sizeof(struct iwlagn_scd_bc_tbl);
129 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 131 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
130 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 132 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
131 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 133 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
132 134
133 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 135 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
134 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 136 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
135 137
136 priv->hw_params.max_bsm_size = 0; 138 priv->hw_params.max_bsm_size = 0;
137 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 139 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -161,25 +163,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
161 163
162static struct iwl_lib_ops iwl1000_lib = { 164static struct iwl_lib_ops iwl1000_lib = {
163 .set_hw_params = iwl1000_hw_set_hw_params, 165 .set_hw_params = iwl1000_hw_set_hw_params,
164 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 166 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
165 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 167 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
166 .txq_set_sched = iwl5000_txq_set_sched, 168 .txq_set_sched = iwlagn_txq_set_sched,
167 .txq_agg_enable = iwl5000_txq_agg_enable, 169 .txq_agg_enable = iwlagn_txq_agg_enable,
168 .txq_agg_disable = iwl5000_txq_agg_disable, 170 .txq_agg_disable = iwlagn_txq_agg_disable,
169 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 171 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
170 .txq_free_tfd = iwl_hw_txq_free_tfd, 172 .txq_free_tfd = iwl_hw_txq_free_tfd,
171 .txq_init = iwl_hw_tx_queue_init, 173 .txq_init = iwl_hw_tx_queue_init,
172 .rx_handler_setup = iwl5000_rx_handler_setup, 174 .rx_handler_setup = iwlagn_rx_handler_setup,
173 .setup_deferred_work = iwl5000_setup_deferred_work, 175 .setup_deferred_work = iwlagn_setup_deferred_work,
174 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 176 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
175 .load_ucode = iwl5000_load_ucode, 177 .load_ucode = iwlagn_load_ucode,
176 .dump_nic_event_log = iwl_dump_nic_event_log, 178 .dump_nic_event_log = iwl_dump_nic_event_log,
177 .dump_nic_error_log = iwl_dump_nic_error_log, 179 .dump_nic_error_log = iwl_dump_nic_error_log,
178 .dump_csr = iwl_dump_csr, 180 .dump_csr = iwl_dump_csr,
179 .dump_fh = iwl_dump_fh, 181 .dump_fh = iwl_dump_fh,
180 .init_alive_start = iwl5000_init_alive_start, 182 .init_alive_start = iwlagn_init_alive_start,
181 .alive_notify = iwl5000_alive_notify, 183 .alive_notify = iwlagn_alive_notify,
182 .send_tx_power = iwl5000_send_tx_power, 184 .send_tx_power = iwlagn_send_tx_power,
183 .update_chain_flags = iwl_update_chain_flags, 185 .update_chain_flags = iwl_update_chain_flags,
184 .apm_ops = { 186 .apm_ops = {
185 .init = iwl_apm_init, 187 .init = iwl_apm_init,
@@ -189,40 +191,47 @@ static struct iwl_lib_ops iwl1000_lib = {
189 }, 191 },
190 .eeprom_ops = { 192 .eeprom_ops = {
191 .regulatory_bands = { 193 .regulatory_bands = {
192 EEPROM_5000_REG_BAND_1_CHANNELS, 194 EEPROM_REG_BAND_1_CHANNELS,
193 EEPROM_5000_REG_BAND_2_CHANNELS, 195 EEPROM_REG_BAND_2_CHANNELS,
194 EEPROM_5000_REG_BAND_3_CHANNELS, 196 EEPROM_REG_BAND_3_CHANNELS,
195 EEPROM_5000_REG_BAND_4_CHANNELS, 197 EEPROM_REG_BAND_4_CHANNELS,
196 EEPROM_5000_REG_BAND_5_CHANNELS, 198 EEPROM_REG_BAND_5_CHANNELS,
197 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 199 EEPROM_REG_BAND_24_HT40_CHANNELS,
198 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 200 EEPROM_REG_BAND_52_HT40_CHANNELS
199 }, 201 },
200 .verify_signature = iwlcore_eeprom_verify_signature, 202 .verify_signature = iwlcore_eeprom_verify_signature,
201 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 203 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
202 .release_semaphore = iwlcore_eeprom_release_semaphore, 204 .release_semaphore = iwlcore_eeprom_release_semaphore,
203 .calib_version = iwl5000_eeprom_calib_version, 205 .calib_version = iwlagn_eeprom_calib_version,
204 .query_addr = iwl5000_eeprom_query_addr, 206 .query_addr = iwlagn_eeprom_query_addr,
205 }, 207 },
206 .post_associate = iwl_post_associate, 208 .post_associate = iwl_post_associate,
207 .isr = iwl_isr_ict, 209 .isr = iwl_isr_ict,
208 .config_ap = iwl_config_ap, 210 .config_ap = iwl_config_ap,
209 .temp_ops = { 211 .temp_ops = {
210 .temperature = iwl5000_temperature, 212 .temperature = iwlagn_temperature,
211 .set_ct_kill = iwl1000_set_ct_threshold, 213 .set_ct_kill = iwl1000_set_ct_threshold,
212 }, 214 },
213 .add_bcast_station = iwl_add_bcast_station, 215 .manage_ibss_station = iwlagn_manage_ibss_station,
216 .debugfs_ops = {
217 .rx_stats_read = iwl_ucode_rx_stats_read,
218 .tx_stats_read = iwl_ucode_tx_stats_read,
219 .general_stats_read = iwl_ucode_general_stats_read,
220 },
221 .recover_from_tx_stall = iwl_bg_monitor_recover,
222 .check_plcp_health = iwl_good_plcp_health,
223 .check_ack_health = iwl_good_ack_health,
214}; 224};
215 225
216static const struct iwl_ops iwl1000_ops = { 226static const struct iwl_ops iwl1000_ops = {
217 .ucode = &iwl5000_ucode,
218 .lib = &iwl1000_lib, 227 .lib = &iwl1000_lib,
219 .hcmd = &iwl5000_hcmd, 228 .hcmd = &iwlagn_hcmd,
220 .utils = &iwl5000_hcmd_utils, 229 .utils = &iwlagn_hcmd_utils,
221 .led = &iwlagn_led_ops, 230 .led = &iwlagn_led_ops,
222}; 231};
223 232
224struct iwl_cfg iwl1000_bgn_cfg = { 233struct iwl_cfg iwl1000_bgn_cfg = {
225 .name = "1000 Series BGN", 234 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
226 .fw_name_pre = IWL1000_FW_PRE, 235 .fw_name_pre = IWL1000_FW_PRE,
227 .ucode_api_max = IWL1000_UCODE_API_MAX, 236 .ucode_api_max = IWL1000_UCODE_API_MAX,
228 .ucode_api_min = IWL1000_UCODE_API_MIN, 237 .ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -230,10 +239,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
230 .ops = &iwl1000_ops, 239 .ops = &iwl1000_ops,
231 .eeprom_size = OTP_LOW_IMAGE_SIZE, 240 .eeprom_size = OTP_LOW_IMAGE_SIZE,
232 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 241 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
233 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 242 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
234 .num_of_queues = IWL50_NUM_QUEUES, 243 .num_of_queues = IWLAGN_NUM_QUEUES,
235 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 244 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
236 .mod_params = &iwl50_mod_params, 245 .mod_params = &iwlagn_mod_params,
237 .valid_tx_ant = ANT_A, 246 .valid_tx_ant = ANT_A,
238 .valid_rx_ant = ANT_AB, 247 .valid_rx_ant = ANT_AB,
239 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 248 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -248,10 +257,15 @@ struct iwl_cfg iwl1000_bgn_cfg = {
248 .support_ct_kill_exit = true, 257 .support_ct_kill_exit = true,
249 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 258 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
250 .chain_noise_scale = 1000, 259 .chain_noise_scale = 1000,
260 .monitor_recover_period = IWL_MONITORING_PERIOD,
261 .max_event_log_size = 128,
262 .ucode_tracing = true,
263 .sensitivity_calib_by_driver = true,
264 .chain_noise_calib_by_driver = true,
251}; 265};
252 266
253struct iwl_cfg iwl1000_bg_cfg = { 267struct iwl_cfg iwl1000_bg_cfg = {
254 .name = "1000 Series BG", 268 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
255 .fw_name_pre = IWL1000_FW_PRE, 269 .fw_name_pre = IWL1000_FW_PRE,
256 .ucode_api_max = IWL1000_UCODE_API_MAX, 270 .ucode_api_max = IWL1000_UCODE_API_MAX,
257 .ucode_api_min = IWL1000_UCODE_API_MIN, 271 .ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -259,10 +273,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
259 .ops = &iwl1000_ops, 273 .ops = &iwl1000_ops,
260 .eeprom_size = OTP_LOW_IMAGE_SIZE, 274 .eeprom_size = OTP_LOW_IMAGE_SIZE,
261 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 275 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
262 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 276 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
263 .num_of_queues = IWL50_NUM_QUEUES, 277 .num_of_queues = IWLAGN_NUM_QUEUES,
264 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 278 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
265 .mod_params = &iwl50_mod_params, 279 .mod_params = &iwlagn_mod_params,
266 .valid_tx_ant = ANT_A, 280 .valid_tx_ant = ANT_A,
267 .valid_rx_ant = ANT_AB, 281 .valid_rx_ant = ANT_AB,
268 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 282 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -270,12 +284,16 @@ struct iwl_cfg iwl1000_bg_cfg = {
270 .use_bsm = false, 284 .use_bsm = false,
271 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 285 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
272 .shadow_ram_support = false, 286 .shadow_ram_support = false,
273 .ht_greenfield_support = true,
274 .led_compensation = 51, 287 .led_compensation = 51,
275 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 288 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
276 .support_ct_kill_exit = true, 289 .support_ct_kill_exit = true,
277 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 290 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
278 .chain_noise_scale = 1000, 291 .chain_noise_scale = 1000,
292 .monitor_recover_period = IWL_MONITORING_PERIOD,
293 .max_event_log_size = 128,
294 .ucode_tracing = true,
295 .sensitivity_calib_by_driver = true,
296 .chain_noise_calib_by_driver = true,
279}; 297};
280 298
281MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 299MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
new file mode 100644
index 000000000000..6a9c64a50e36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.c
@@ -0,0 +1,500 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-3945-debugfs.h"
30
31ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
32 char __user *user_buf,
33 size_t count, loff_t *ppos)
34{
35 struct iwl_priv *priv = file->private_data;
36 int pos = 0;
37 char *buf;
38 int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
39 sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
40 ssize_t ret;
41 struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
42 struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
43 struct iwl39_statistics_rx_non_phy *general, *accum_general;
44 struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
45
46 if (!iwl_is_alive(priv))
47 return -EAGAIN;
48
49 buf = kzalloc(bufsz, GFP_KERNEL);
50 if (!buf) {
51 IWL_ERR(priv, "Can not allocate Buffer\n");
52 return -ENOMEM;
53 }
54
55 /*
56 * The statistic information display here is based on
57 * the last statistics notification from uCode
58 * might not reflect the current uCode activity
59 */
60 ofdm = &priv->_3945.statistics.rx.ofdm;
61 cck = &priv->_3945.statistics.rx.cck;
62 general = &priv->_3945.statistics.rx.general;
63 accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
64 accum_cck = &priv->_3945.accum_statistics.rx.cck;
65 accum_general = &priv->_3945.accum_statistics.rx.general;
66 delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
67 delta_cck = &priv->_3945.delta_statistics.rx.cck;
68 delta_general = &priv->_3945.delta_statistics.rx.general;
69 max_ofdm = &priv->_3945.max_delta.rx.ofdm;
70 max_cck = &priv->_3945.max_delta.rx.cck;
71 max_general = &priv->_3945.max_delta.rx.general;
72
73 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
74 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
75 "acumulative delta max\n",
76 "Statistics_Rx - OFDM:");
77 pos += scnprintf(buf + pos, bufsz - pos,
78 " %-30s %10u %10u %10u %10u\n",
79 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
80 accum_ofdm->ina_cnt,
81 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
82 pos += scnprintf(buf + pos, bufsz - pos,
83 " %-30s %10u %10u %10u %10u\n",
84 "fina_cnt:",
85 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
86 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
87 pos += scnprintf(buf + pos, bufsz - pos,
88 " %-30s %10u %10u %10u %10u\n", "plcp_err:",
89 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
90 delta_ofdm->plcp_err, max_ofdm->plcp_err);
91 pos += scnprintf(buf + pos, bufsz - pos,
92 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
93 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
94 delta_ofdm->crc32_err, max_ofdm->crc32_err);
95 pos += scnprintf(buf + pos, bufsz - pos,
96 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
97 le32_to_cpu(ofdm->overrun_err),
98 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
99 max_ofdm->overrun_err);
100 pos += scnprintf(buf + pos, bufsz - pos,
101 " %-30s %10u %10u %10u %10u\n",
102 "early_overrun_err:",
103 le32_to_cpu(ofdm->early_overrun_err),
104 accum_ofdm->early_overrun_err,
105 delta_ofdm->early_overrun_err,
106 max_ofdm->early_overrun_err);
107 pos += scnprintf(buf + pos, bufsz - pos,
108 " %-30s %10u %10u %10u %10u\n",
109 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
110 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
111 max_ofdm->crc32_good);
112 pos += scnprintf(buf + pos, bufsz - pos,
113 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
114 le32_to_cpu(ofdm->false_alarm_cnt),
115 accum_ofdm->false_alarm_cnt,
116 delta_ofdm->false_alarm_cnt,
117 max_ofdm->false_alarm_cnt);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n",
120 "fina_sync_err_cnt:",
121 le32_to_cpu(ofdm->fina_sync_err_cnt),
122 accum_ofdm->fina_sync_err_cnt,
123 delta_ofdm->fina_sync_err_cnt,
124 max_ofdm->fina_sync_err_cnt);
125 pos += scnprintf(buf + pos, bufsz - pos,
126 " %-30s %10u %10u %10u %10u\n",
127 "sfd_timeout:",
128 le32_to_cpu(ofdm->sfd_timeout),
129 accum_ofdm->sfd_timeout,
130 delta_ofdm->sfd_timeout,
131 max_ofdm->sfd_timeout);
132 pos += scnprintf(buf + pos, bufsz - pos,
133 " %-30s %10u %10u %10u %10u\n",
134 "fina_timeout:",
135 le32_to_cpu(ofdm->fina_timeout),
136 accum_ofdm->fina_timeout,
137 delta_ofdm->fina_timeout,
138 max_ofdm->fina_timeout);
139 pos += scnprintf(buf + pos, bufsz - pos,
140 " %-30s %10u %10u %10u %10u\n",
141 "unresponded_rts:",
142 le32_to_cpu(ofdm->unresponded_rts),
143 accum_ofdm->unresponded_rts,
144 delta_ofdm->unresponded_rts,
145 max_ofdm->unresponded_rts);
146 pos += scnprintf(buf + pos, bufsz - pos,
147 " %-30s %10u %10u %10u %10u\n",
148 "rxe_frame_lmt_ovrun:",
149 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
150 accum_ofdm->rxe_frame_limit_overrun,
151 delta_ofdm->rxe_frame_limit_overrun,
152 max_ofdm->rxe_frame_limit_overrun);
153 pos += scnprintf(buf + pos, bufsz - pos,
154 " %-30s %10u %10u %10u %10u\n",
155 "sent_ack_cnt:",
156 le32_to_cpu(ofdm->sent_ack_cnt),
157 accum_ofdm->sent_ack_cnt,
158 delta_ofdm->sent_ack_cnt,
159 max_ofdm->sent_ack_cnt);
160 pos += scnprintf(buf + pos, bufsz - pos,
161 " %-30s %10u %10u %10u %10u\n",
162 "sent_cts_cnt:",
163 le32_to_cpu(ofdm->sent_cts_cnt),
164 accum_ofdm->sent_cts_cnt,
165 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
166
167 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
168 "acumulative delta max\n",
169 "Statistics_Rx - CCK:");
170 pos += scnprintf(buf + pos, bufsz - pos,
171 " %-30s %10u %10u %10u %10u\n",
172 "ina_cnt:",
173 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
174 delta_cck->ina_cnt, max_cck->ina_cnt);
175 pos += scnprintf(buf + pos, bufsz - pos,
176 " %-30s %10u %10u %10u %10u\n",
177 "fina_cnt:",
178 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
179 delta_cck->fina_cnt, max_cck->fina_cnt);
180 pos += scnprintf(buf + pos, bufsz - pos,
181 " %-30s %10u %10u %10u %10u\n",
182 "plcp_err:",
183 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
184 delta_cck->plcp_err, max_cck->plcp_err);
185 pos += scnprintf(buf + pos, bufsz - pos,
186 " %-30s %10u %10u %10u %10u\n",
187 "crc32_err:",
188 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
189 delta_cck->crc32_err, max_cck->crc32_err);
190 pos += scnprintf(buf + pos, bufsz - pos,
191 " %-30s %10u %10u %10u %10u\n",
192 "overrun_err:",
193 le32_to_cpu(cck->overrun_err),
194 accum_cck->overrun_err,
195 delta_cck->overrun_err, max_cck->overrun_err);
196 pos += scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n",
198 "early_overrun_err:",
199 le32_to_cpu(cck->early_overrun_err),
200 accum_cck->early_overrun_err,
201 delta_cck->early_overrun_err,
202 max_cck->early_overrun_err);
203 pos += scnprintf(buf + pos, bufsz - pos,
204 " %-30s %10u %10u %10u %10u\n",
205 "crc32_good:",
206 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
207 delta_cck->crc32_good,
208 max_cck->crc32_good);
209 pos += scnprintf(buf + pos, bufsz - pos,
210 " %-30s %10u %10u %10u %10u\n",
211 "false_alarm_cnt:",
212 le32_to_cpu(cck->false_alarm_cnt),
213 accum_cck->false_alarm_cnt,
214 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
215 pos += scnprintf(buf + pos, bufsz - pos,
216 " %-30s %10u %10u %10u %10u\n",
217 "fina_sync_err_cnt:",
218 le32_to_cpu(cck->fina_sync_err_cnt),
219 accum_cck->fina_sync_err_cnt,
220 delta_cck->fina_sync_err_cnt,
221 max_cck->fina_sync_err_cnt);
222 pos += scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n",
224 "sfd_timeout:",
225 le32_to_cpu(cck->sfd_timeout),
226 accum_cck->sfd_timeout,
227 delta_cck->sfd_timeout, max_cck->sfd_timeout);
228 pos += scnprintf(buf + pos, bufsz - pos,
229 " %-30s %10u %10u %10u %10u\n",
230 "fina_timeout:",
231 le32_to_cpu(cck->fina_timeout),
232 accum_cck->fina_timeout,
233 delta_cck->fina_timeout, max_cck->fina_timeout);
234 pos += scnprintf(buf + pos, bufsz - pos,
235 " %-30s %10u %10u %10u %10u\n",
236 "unresponded_rts:",
237 le32_to_cpu(cck->unresponded_rts),
238 accum_cck->unresponded_rts,
239 delta_cck->unresponded_rts,
240 max_cck->unresponded_rts);
241 pos += scnprintf(buf + pos, bufsz - pos,
242 " %-30s %10u %10u %10u %10u\n",
243 "rxe_frame_lmt_ovrun:",
244 le32_to_cpu(cck->rxe_frame_limit_overrun),
245 accum_cck->rxe_frame_limit_overrun,
246 delta_cck->rxe_frame_limit_overrun,
247 max_cck->rxe_frame_limit_overrun);
248 pos += scnprintf(buf + pos, bufsz - pos,
249 " %-30s %10u %10u %10u %10u\n",
250 "sent_ack_cnt:",
251 le32_to_cpu(cck->sent_ack_cnt),
252 accum_cck->sent_ack_cnt,
253 delta_cck->sent_ack_cnt,
254 max_cck->sent_ack_cnt);
255 pos += scnprintf(buf + pos, bufsz - pos,
256 " %-30s %10u %10u %10u %10u\n",
257 "sent_cts_cnt:",
258 le32_to_cpu(cck->sent_cts_cnt),
259 accum_cck->sent_cts_cnt,
260 delta_cck->sent_cts_cnt,
261 max_cck->sent_cts_cnt);
262
263 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
264 "acumulative delta max\n",
265 "Statistics_Rx - GENERAL:");
266 pos += scnprintf(buf + pos, bufsz - pos,
267 " %-30s %10u %10u %10u %10u\n",
268 "bogus_cts:",
269 le32_to_cpu(general->bogus_cts),
270 accum_general->bogus_cts,
271 delta_general->bogus_cts, max_general->bogus_cts);
272 pos += scnprintf(buf + pos, bufsz - pos,
273 " %-30s %10u %10u %10u %10u\n",
274 "bogus_ack:",
275 le32_to_cpu(general->bogus_ack),
276 accum_general->bogus_ack,
277 delta_general->bogus_ack, max_general->bogus_ack);
278 pos += scnprintf(buf + pos, bufsz - pos,
279 " %-30s %10u %10u %10u %10u\n",
280 "non_bssid_frames:",
281 le32_to_cpu(general->non_bssid_frames),
282 accum_general->non_bssid_frames,
283 delta_general->non_bssid_frames,
284 max_general->non_bssid_frames);
285 pos += scnprintf(buf + pos, bufsz - pos,
286 " %-30s %10u %10u %10u %10u\n",
287 "filtered_frames:",
288 le32_to_cpu(general->filtered_frames),
289 accum_general->filtered_frames,
290 delta_general->filtered_frames,
291 max_general->filtered_frames);
292 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n",
294 "non_channel_beacons:",
295 le32_to_cpu(general->non_channel_beacons),
296 accum_general->non_channel_beacons,
297 delta_general->non_channel_beacons,
298 max_general->non_channel_beacons);
299
300 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
301 kfree(buf);
302 return ret;
303}
304
305ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
306 char __user *user_buf,
307 size_t count, loff_t *ppos)
308{
309 struct iwl_priv *priv = file->private_data;
310 int pos = 0;
311 char *buf;
312 int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
313 ssize_t ret;
314 struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
315
316 if (!iwl_is_alive(priv))
317 return -EAGAIN;
318
319 buf = kzalloc(bufsz, GFP_KERNEL);
320 if (!buf) {
321 IWL_ERR(priv, "Can not allocate Buffer\n");
322 return -ENOMEM;
323 }
324
325 /*
326 * The statistic information display here is based on
327 * the last statistics notification from uCode
328 * might not reflect the current uCode activity
329 */
330 tx = &priv->_3945.statistics.tx;
331 accum_tx = &priv->_3945.accum_statistics.tx;
332 delta_tx = &priv->_3945.delta_statistics.tx;
333 max_tx = &priv->_3945.max_delta.tx;
334 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
335 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
336 "acumulative delta max\n",
337 "Statistics_Tx:");
338 pos += scnprintf(buf + pos, bufsz - pos,
339 " %-30s %10u %10u %10u %10u\n",
340 "preamble:",
341 le32_to_cpu(tx->preamble_cnt),
342 accum_tx->preamble_cnt,
343 delta_tx->preamble_cnt, max_tx->preamble_cnt);
344 pos += scnprintf(buf + pos, bufsz - pos,
345 " %-30s %10u %10u %10u %10u\n",
346 "rx_detected_cnt:",
347 le32_to_cpu(tx->rx_detected_cnt),
348 accum_tx->rx_detected_cnt,
349 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
350 pos += scnprintf(buf + pos, bufsz - pos,
351 " %-30s %10u %10u %10u %10u\n",
352 "bt_prio_defer_cnt:",
353 le32_to_cpu(tx->bt_prio_defer_cnt),
354 accum_tx->bt_prio_defer_cnt,
355 delta_tx->bt_prio_defer_cnt,
356 max_tx->bt_prio_defer_cnt);
357 pos += scnprintf(buf + pos, bufsz - pos,
358 " %-30s %10u %10u %10u %10u\n",
359 "bt_prio_kill_cnt:",
360 le32_to_cpu(tx->bt_prio_kill_cnt),
361 accum_tx->bt_prio_kill_cnt,
362 delta_tx->bt_prio_kill_cnt,
363 max_tx->bt_prio_kill_cnt);
364 pos += scnprintf(buf + pos, bufsz - pos,
365 " %-30s %10u %10u %10u %10u\n",
366 "few_bytes_cnt:",
367 le32_to_cpu(tx->few_bytes_cnt),
368 accum_tx->few_bytes_cnt,
369 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
370 pos += scnprintf(buf + pos, bufsz - pos,
371 " %-30s %10u %10u %10u %10u\n",
372 "cts_timeout:",
373 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
374 delta_tx->cts_timeout, max_tx->cts_timeout);
375 pos += scnprintf(buf + pos, bufsz - pos,
376 " %-30s %10u %10u %10u %10u\n",
377 "ack_timeout:",
378 le32_to_cpu(tx->ack_timeout),
379 accum_tx->ack_timeout,
380 delta_tx->ack_timeout, max_tx->ack_timeout);
381 pos += scnprintf(buf + pos, bufsz - pos,
382 " %-30s %10u %10u %10u %10u\n",
383 "expected_ack_cnt:",
384 le32_to_cpu(tx->expected_ack_cnt),
385 accum_tx->expected_ack_cnt,
386 delta_tx->expected_ack_cnt,
387 max_tx->expected_ack_cnt);
388 pos += scnprintf(buf + pos, bufsz - pos,
389 " %-30s %10u %10u %10u %10u\n",
390 "actual_ack_cnt:",
391 le32_to_cpu(tx->actual_ack_cnt),
392 accum_tx->actual_ack_cnt,
393 delta_tx->actual_ack_cnt,
394 max_tx->actual_ack_cnt);
395
396 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
397 kfree(buf);
398 return ret;
399}
400
401ssize_t iwl3945_ucode_general_stats_read(struct file *file,
402 char __user *user_buf,
403 size_t count, loff_t *ppos)
404{
405 struct iwl_priv *priv = file->private_data;
406 int pos = 0;
407 char *buf;
408 int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
409 ssize_t ret;
410 struct iwl39_statistics_general *general, *accum_general;
411 struct iwl39_statistics_general *delta_general, *max_general;
412 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
413 struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
414
415 if (!iwl_is_alive(priv))
416 return -EAGAIN;
417
418 buf = kzalloc(bufsz, GFP_KERNEL);
419 if (!buf) {
420 IWL_ERR(priv, "Can not allocate Buffer\n");
421 return -ENOMEM;
422 }
423
424 /*
425 * The statistic information display here is based on
426 * the last statistics notification from uCode
427 * might not reflect the current uCode activity
428 */
429 general = &priv->_3945.statistics.general;
430 dbg = &priv->_3945.statistics.general.dbg;
431 div = &priv->_3945.statistics.general.div;
432 accum_general = &priv->_3945.accum_statistics.general;
433 delta_general = &priv->_3945.delta_statistics.general;
434 max_general = &priv->_3945.max_delta.general;
435 accum_dbg = &priv->_3945.accum_statistics.general.dbg;
436 delta_dbg = &priv->_3945.delta_statistics.general.dbg;
437 max_dbg = &priv->_3945.max_delta.general.dbg;
438 accum_div = &priv->_3945.accum_statistics.general.div;
439 delta_div = &priv->_3945.delta_statistics.general.div;
440 max_div = &priv->_3945.max_delta.general.div;
441 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
442 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
443 "acumulative delta max\n",
444 "Statistics_General:");
445 pos += scnprintf(buf + pos, bufsz - pos,
446 " %-30s %10u %10u %10u %10u\n",
447 "burst_check:",
448 le32_to_cpu(dbg->burst_check),
449 accum_dbg->burst_check,
450 delta_dbg->burst_check, max_dbg->burst_check);
451 pos += scnprintf(buf + pos, bufsz - pos,
452 " %-30s %10u %10u %10u %10u\n",
453 "burst_count:",
454 le32_to_cpu(dbg->burst_count),
455 accum_dbg->burst_count,
456 delta_dbg->burst_count, max_dbg->burst_count);
457 pos += scnprintf(buf + pos, bufsz - pos,
458 " %-30s %10u %10u %10u %10u\n",
459 "sleep_time:",
460 le32_to_cpu(general->sleep_time),
461 accum_general->sleep_time,
462 delta_general->sleep_time, max_general->sleep_time);
463 pos += scnprintf(buf + pos, bufsz - pos,
464 " %-30s %10u %10u %10u %10u\n",
465 "slots_out:",
466 le32_to_cpu(general->slots_out),
467 accum_general->slots_out,
468 delta_general->slots_out, max_general->slots_out);
469 pos += scnprintf(buf + pos, bufsz - pos,
470 " %-30s %10u %10u %10u %10u\n",
471 "slots_idle:",
472 le32_to_cpu(general->slots_idle),
473 accum_general->slots_idle,
474 delta_general->slots_idle, max_general->slots_idle);
475 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
476 le32_to_cpu(general->ttl_timestamp));
477 pos += scnprintf(buf + pos, bufsz - pos,
478 " %-30s %10u %10u %10u %10u\n",
479 "tx_on_a:",
480 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
481 delta_div->tx_on_a, max_div->tx_on_a);
482 pos += scnprintf(buf + pos, bufsz - pos,
483 " %-30s %10u %10u %10u %10u\n",
484 "tx_on_b:",
485 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
486 delta_div->tx_on_b, max_div->tx_on_b);
487 pos += scnprintf(buf + pos, bufsz - pos,
488 " %-30s %10u %10u %10u %10u\n",
489 "exec_time:",
490 le32_to_cpu(div->exec_time), accum_div->exec_time,
491 delta_div->exec_time, max_div->exec_time);
492 pos += scnprintf(buf + pos, bufsz - pos,
493 " %-30s %10u %10u %10u %10u\n",
494 "probe_time:",
495 le32_to_cpu(div->probe_time), accum_div->probe_time,
496 delta_div->probe_time, max_div->probe_time);
497 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
498 kfree(buf);
499 return ret;
500}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
new file mode 100644
index 000000000000..70809c53c215
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-debugfs.h
@@ -0,0 +1,60 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_DEBUGFS
34ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl3945_ucode_general_stats_read(struct file *file,
39 char __user *user_buf, size_t count,
40 loff_t *ppos);
41#else
42static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
43 char __user *user_buf, size_t count,
44 loff_t *ppos)
45{
46 return 0;
47}
48static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
49 char __user *user_buf, size_t count,
50 loff_t *ppos)
51{
52 return 0;
53}
54static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
55 char __user *user_buf,
56 size_t count, loff_t *ppos)
57{
58 return 0;
59}
60#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 3a876a8ece38..91bcb4e3cdfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,13 +71,11 @@
71 71
72#include "iwl-eeprom.h" 72#include "iwl-eeprom.h"
73 73
74/* Time constants */
75#define SHORT_SLOT_TIME 9
76#define LONG_SLOT_TIME 20
77
78/* RSSI to dBm */ 74/* RSSI to dBm */
79#define IWL39_RSSI_OFFSET 95 75#define IWL39_RSSI_OFFSET 95
80 76
77#define IWL_DEFAULT_TX_POWER 0x0F
78
81/* 79/*
82 * EEPROM related constants, enums, and structures. 80 * EEPROM related constants, enums, and structures.
83 */ 81 */
@@ -228,7 +226,6 @@ struct iwl3945_eeprom {
228 226
229/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ 227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
230#define IWL39_NUM_QUEUES 5 228#define IWL39_NUM_QUEUES 5
231#define IWL_NUM_SCAN_RATES (2)
232 229
233#define IWL_DEFAULT_TX_RETRY 15 230#define IWL_DEFAULT_TX_RETRY 15
234 231
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 902c4d4293e9..8e84a08ff951 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -330,16 +330,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
330 330
331} 331}
332 332
333static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, 333/*
334 struct ieee80211_sta *sta, void *priv_sta) 334 * Called after adding a new station to initialize rate scaling
335 */
336void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
335{ 337{
336 struct iwl3945_rs_sta *rs_sta = priv_sta; 338 struct ieee80211_hw *hw = priv->hw;
337 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 339 struct ieee80211_conf *conf = &priv->hw->conf;
340 struct iwl3945_sta_priv *psta;
341 struct iwl3945_rs_sta *rs_sta;
342 struct ieee80211_supported_band *sband;
338 int i; 343 int i;
339 344
340 IWL_DEBUG_RATE(priv, "enter\n"); 345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->hw_params.bcast_sta_id)
347 goto out;
341 348
342 spin_lock_init(&rs_sta->lock); 349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
350 rs_sta = &psta->rs_sta;
351 sband = hw->wiphy->bands[conf->channel->band];
343 352
344 rs_sta->priv = priv; 353 rs_sta->priv = priv;
345 354
@@ -352,9 +361,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
352 rs_sta->last_flush = jiffies; 361 rs_sta->last_flush = jiffies;
353 rs_sta->flush_time = IWL_RATE_FLUSH; 362 rs_sta->flush_time = IWL_RATE_FLUSH;
354 rs_sta->last_tx_packets = 0; 363 rs_sta->last_tx_packets = 0;
355 rs_sta->ibss_sta_added = 0;
356 364
357 init_timer(&rs_sta->rate_scale_flush);
358 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; 365 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
359 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; 366 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
360 367
@@ -373,16 +380,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
373 } 380 }
374 } 381 }
375 382
376 priv->sta_supp_rates = sta->supp_rates[sband->band]; 383 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
377 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ 384 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
378 if (sband->band == IEEE80211_BAND_5GHZ) { 385 if (sband->band == IEEE80211_BAND_5GHZ) {
379 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 386 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
380 priv->sta_supp_rates = priv->sta_supp_rates << 387 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
381 IWL_FIRST_OFDM_RATE; 388 IWL_FIRST_OFDM_RATE;
382 } 389 }
383 390
391out:
392 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
384 393
385 IWL_DEBUG_RATE(priv, "leave\n"); 394 IWL_DEBUG_INFO(priv, "leave\n");
386} 395}
387 396
388static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 397static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -406,6 +415,9 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
406 415
407 rs_sta = &psta->rs_sta; 416 rs_sta = &psta->rs_sta;
408 417
418 spin_lock_init(&rs_sta->lock);
419 init_timer(&rs_sta->rate_scale_flush);
420
409 IWL_DEBUG_RATE(priv, "leave\n"); 421 IWL_DEBUG_RATE(priv, "leave\n");
410 422
411 return rs_sta; 423 return rs_sta;
@@ -414,13 +426,14 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
414static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, 426static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
415 void *priv_sta) 427 void *priv_sta)
416{ 428{
417 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 429 struct iwl3945_rs_sta *rs_sta = priv_sta;
418 struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
419 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
420 430
421 IWL_DEBUG_RATE(priv, "enter\n"); 431 /*
432 * Be careful not to use any members of iwl3945_rs_sta (like trying
433 * to use iwl_priv to print out debugging) since it may not be fully
434 * initialized at this point.
435 */
422 del_timer_sync(&rs_sta->rate_scale_flush); 436 del_timer_sync(&rs_sta->rate_scale_flush);
423 IWL_DEBUG_RATE(priv, "leave\n");
424} 437}
425 438
426 439
@@ -459,6 +472,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
459 return; 472 return;
460 } 473 }
461 474
475 /* Treat uninitialized rate scaling data same as non-existing. */
476 if (!rs_sta->priv) {
477 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
478 return;
479 }
480
481
462 rs_sta->tx_packets++; 482 rs_sta->tx_packets++;
463 483
464 scale_rate_index = first_index; 484 scale_rate_index = first_index;
@@ -525,8 +545,6 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
525 spin_unlock_irqrestore(&rs_sta->lock, flags); 545 spin_unlock_irqrestore(&rs_sta->lock, flags);
526 546
527 IWL_DEBUG_RATE(priv, "leave\n"); 547 IWL_DEBUG_RATE(priv, "leave\n");
528
529 return;
530} 548}
531 549
532static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta, 550static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
@@ -626,14 +644,19 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
626 u32 fail_count; 644 u32 fail_count;
627 s8 scale_action = 0; 645 s8 scale_action = 0;
628 unsigned long flags; 646 unsigned long flags;
629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
630 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; 647 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
631 s8 max_rate_idx = -1; 648 s8 max_rate_idx = -1;
632 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 649 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 650 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
634 651
635 IWL_DEBUG_RATE(priv, "enter\n"); 652 IWL_DEBUG_RATE(priv, "enter\n");
636 653
654 /* Treat uninitialized rate scaling data same as non-existing. */
655 if (rs_sta && !rs_sta->priv) {
656 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
657 priv_sta = NULL;
658 }
659
637 if (rate_control_send_low(sta, priv_sta, txrc)) 660 if (rate_control_send_low(sta, priv_sta, txrc))
638 return; 661 return;
639 662
@@ -651,20 +674,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
651 if (sband->band == IEEE80211_BAND_5GHZ) 674 if (sband->band == IEEE80211_BAND_5GHZ)
652 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 675 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
653 676
654 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
655 !rs_sta->ibss_sta_added) {
656 u8 sta_id = iwl_find_station(priv, hdr->addr1);
657
658 if (sta_id == IWL_INVALID_STATION) {
659 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
660 hdr->addr1);
661 sta_id = iwl_add_station(priv, hdr->addr1, false,
662 CMD_ASYNC, NULL);
663 }
664 if (sta_id != IWL_INVALID_STATION)
665 rs_sta->ibss_sta_added = 1;
666 }
667
668 spin_lock_irqsave(&rs_sta->lock, flags); 677 spin_lock_irqsave(&rs_sta->lock, flags);
669 678
670 /* for recent assoc, choose best rate regarding 679 /* for recent assoc, choose best rate regarding
@@ -884,12 +893,22 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
884} 893}
885#endif 894#endif
886 895
896/*
897 * Initialization of rate scaling information is done by driver after
898 * the station is added. Since mac80211 calls this function before a
899 * station is added we ignore it.
900 */
901static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
902 struct ieee80211_sta *sta, void *priv_sta)
903{
904}
905
887static struct rate_control_ops rs_ops = { 906static struct rate_control_ops rs_ops = {
888 .module = NULL, 907 .module = NULL,
889 .name = RS_NAME, 908 .name = RS_NAME,
890 .tx_status = rs_tx_status, 909 .tx_status = rs_tx_status,
891 .get_rate = rs_get_rate, 910 .get_rate = rs_get_rate,
892 .rate_init = rs_rate_init, 911 .rate_init = rs_rate_init_stub,
893 .alloc = rs_alloc, 912 .alloc = rs_alloc,
894 .free = rs_free, 913 .free = rs_free,
895 .alloc_sta = rs_alloc_sta, 914 .alloc_sta = rs_alloc_sta,
@@ -900,7 +919,6 @@ static struct rate_control_ops rs_ops = {
900#endif 919#endif
901 920
902}; 921};
903
904void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 922void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
905{ 923{
906 struct iwl_priv *priv = hw->priv; 924 struct iwl_priv *priv = hw->priv;
@@ -917,6 +935,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
917 sta = ieee80211_find_sta(priv->vif, 935 sta = ieee80211_find_sta(priv->vif,
918 priv->stations[sta_id].sta.sta.addr); 936 priv->stations[sta_id].sta.sta.addr);
919 if (!sta) { 937 if (!sta) {
938 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
920 rcu_read_unlock(); 939 rcu_read_unlock();
921 return; 940 return;
922 } 941 }
@@ -947,7 +966,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
947 966
948 spin_unlock_irqrestore(&rs_sta->lock, flags); 967 spin_unlock_irqrestore(&rs_sta->lock, flags);
949 968
950 rssi = priv->last_rx_rssi; 969 rssi = priv->_3945.last_rx_rssi;
951 if (rssi == 0) 970 if (rssi == 0)
952 rssi = IWL_MIN_RSSI_VAL; 971 rssi = IWL_MIN_RSSI_VAL;
953 972
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0728054a22d4..068f7f8435c5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -50,6 +50,7 @@
50#include "iwl-helpers.h" 50#include "iwl-helpers.h"
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h"
53 54
54#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
55 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -192,12 +193,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
192} 193}
193 194
194#ifdef CONFIG_IWLWIFI_DEBUG 195#ifdef CONFIG_IWLWIFI_DEBUG
195#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 196#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
196 197
197static const char *iwl3945_get_tx_fail_reason(u32 status) 198static const char *iwl3945_get_tx_fail_reason(u32 status)
198{ 199{
199 switch (status & TX_STATUS_MSK) { 200 switch (status & TX_STATUS_MSK) {
200 case TX_STATUS_SUCCESS: 201 case TX_3945_STATUS_SUCCESS:
201 return "SUCCESS"; 202 return "SUCCESS";
202 TX_STATUS_ENTRY(SHORT_LIMIT); 203 TX_STATUS_ENTRY(SHORT_LIMIT);
203 TX_STATUS_ENTRY(LONG_LIMIT); 204 TX_STATUS_ENTRY(LONG_LIMIT);
@@ -243,7 +244,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
243 next_rate = IWL_RATE_6M_INDEX; 244 next_rate = IWL_RATE_6M_INDEX;
244 break; 245 break;
245 case IEEE80211_BAND_2GHZ: 246 case IEEE80211_BAND_2GHZ:
246 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 247 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
247 iwl_is_associated(priv)) { 248 iwl_is_associated(priv)) {
248 if (rate == IWL_RATE_11M_INDEX) 249 if (rate == IWL_RATE_11M_INDEX)
249 next_rate = IWL_RATE_5M_INDEX; 250 next_rate = IWL_RATE_5M_INDEX;
@@ -293,7 +294,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
293 * iwl3945_rx_reply_tx - Handle Tx response 294 * iwl3945_rx_reply_tx - Handle Tx response
294 */ 295 */
295static void iwl3945_rx_reply_tx(struct iwl_priv *priv, 296static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
296 struct iwl_rx_mem_buffer *rxb) 297 struct iwl_rx_mem_buffer *rxb)
297{ 298{
298 struct iwl_rx_packet *pkt = rxb_addr(rxb); 299 struct iwl_rx_packet *pkt = rxb_addr(rxb);
299 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 300 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -351,18 +352,143 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
351 * RX handler implementations 352 * RX handler implementations
352 * 353 *
353 *****************************************************************************/ 354 *****************************************************************************/
355#ifdef CONFIG_IWLWIFI_DEBUG
356/*
357 * based on the assumption of all statistics counter are in DWORD
358 * FIXME: This function is for debugging, do not deal with
359 * the case of counters roll-over.
360 */
361static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
362 __le32 *stats)
363{
364 int i;
365 __le32 *prev_stats;
366 u32 *accum_stats;
367 u32 *delta, *max_delta;
368
369 prev_stats = (__le32 *)&priv->_3945.statistics;
370 accum_stats = (u32 *)&priv->_3945.accum_statistics;
371 delta = (u32 *)&priv->_3945.delta_statistics;
372 max_delta = (u32 *)&priv->_3945.max_delta;
373
374 for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
375 i += sizeof(__le32), stats++, prev_stats++, delta++,
376 max_delta++, accum_stats++) {
377 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
378 *delta = (le32_to_cpu(*stats) -
379 le32_to_cpu(*prev_stats));
380 *accum_stats += *delta;
381 if (*delta > *max_delta)
382 *max_delta = *delta;
383 }
384 }
385
386 /* reset accumulative statistics for "no-counter" type statistics */
387 priv->_3945.accum_statistics.general.temperature =
388 priv->_3945.statistics.general.temperature;
389 priv->_3945.accum_statistics.general.ttl_timestamp =
390 priv->_3945.statistics.general.ttl_timestamp;
391}
392#endif
393
394/**
395 * iwl3945_good_plcp_health - checks for plcp error.
396 *
397 * When the plcp error is exceeding the thresholds, reset the radio
398 * to improve the throughput.
399 */
400static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
401 struct iwl_rx_packet *pkt)
402{
403 bool rc = true;
404 struct iwl3945_notif_statistics current_stat;
405 int combined_plcp_delta;
406 unsigned int plcp_msec;
407 unsigned long plcp_received_jiffies;
408
409 memcpy(&current_stat, pkt->u.raw, sizeof(struct
410 iwl3945_notif_statistics));
411 /*
412 * check for plcp_err and trigger radio reset if it exceeds
413 * the plcp error threshold plcp_delta.
414 */
415 plcp_received_jiffies = jiffies;
416 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
417 (long) priv->plcp_jiffies);
418 priv->plcp_jiffies = plcp_received_jiffies;
419 /*
420 * check to make sure plcp_msec is not 0 to prevent division
421 * by zero.
422 */
423 if (plcp_msec) {
424 combined_plcp_delta =
425 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
426 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
427
428 if ((combined_plcp_delta > 0) &&
429 ((combined_plcp_delta * 100) / plcp_msec) >
430 priv->cfg->plcp_delta_threshold) {
431 /*
432 * if plcp_err exceed the threshold, the following
433 * data is printed in csv format:
434 * Text: plcp_err exceeded %d,
435 * Received ofdm.plcp_err,
436 * Current ofdm.plcp_err,
437 * combined_plcp_delta,
438 * plcp_msec
439 */
440 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
441 "%u, %d, %u mSecs\n",
442 priv->cfg->plcp_delta_threshold,
443 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
444 combined_plcp_delta, plcp_msec);
445 /*
446 * Reset the RF radio due to the high plcp
447 * error rate
448 */
449 rc = false;
450 }
451 }
452 return rc;
453}
354 454
355void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 455void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
356 struct iwl_rx_mem_buffer *rxb) 456 struct iwl_rx_mem_buffer *rxb)
357{ 457{
358 struct iwl_rx_packet *pkt = rxb_addr(rxb); 458 struct iwl_rx_packet *pkt = rxb_addr(rxb);
459
359 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 460 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
360 (int)sizeof(struct iwl3945_notif_statistics), 461 (int)sizeof(struct iwl3945_notif_statistics),
361 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 462 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
463#ifdef CONFIG_IWLWIFI_DEBUG
464 iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
465#endif
466 iwl_recover_from_statistics(priv, pkt);
467
468 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
469}
470
471void iwl3945_reply_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb)
473{
474 struct iwl_rx_packet *pkt = rxb_addr(rxb);
475 __le32 *flag = (__le32 *)&pkt->u.raw;
362 476
363 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); 477 if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
478#ifdef CONFIG_IWLWIFI_DEBUG
479 memset(&priv->_3945.accum_statistics, 0,
480 sizeof(struct iwl3945_notif_statistics));
481 memset(&priv->_3945.delta_statistics, 0,
482 sizeof(struct iwl3945_notif_statistics));
483 memset(&priv->_3945.max_delta, 0,
484 sizeof(struct iwl3945_notif_statistics));
485#endif
486 IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
487 }
488 iwl3945_hw_rx_statistics(priv, rxb);
364} 489}
365 490
491
366/****************************************************************************** 492/******************************************************************************
367 * 493 *
368 * Misc. internal state and helper functions 494 * Misc. internal state and helper functions
@@ -487,7 +613,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
487 * but you can hack it to show more, if you'd like to. */ 613 * but you can hack it to show more, if you'd like to. */
488 if (dataframe) 614 if (dataframe)
489 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " 615 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
490 "len=%u, rssi=%d, chnl=%d, rate=%d, \n", 616 "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
491 title, le16_to_cpu(fc), header->addr1[5], 617 title, le16_to_cpu(fc), header->addr1[5],
492 length, rssi, channel, rate); 618 length, rssi, channel, rate);
493 else { 619 else {
@@ -549,7 +675,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
549 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 675 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
550 u16 len = le16_to_cpu(rx_hdr->len); 676 u16 len = le16_to_cpu(rx_hdr->len);
551 struct sk_buff *skb; 677 struct sk_buff *skb;
552 int ret;
553 __le16 fc = hdr->frame_control; 678 __le16 fc = hdr->frame_control;
554 679
555 /* We received data from the HW, so stop the watchdog */ 680 /* We received data from the HW, so stop the watchdog */
@@ -566,9 +691,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
566 return; 691 return;
567 } 692 }
568 693
569 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); 694 skb = dev_alloc_skb(128);
570 if (!skb) { 695 if (!skb) {
571 IWL_ERR(priv, "alloc_skb failed\n"); 696 IWL_ERR(priv, "dev_alloc_skb failed\n");
572 return; 697 return;
573 } 698 }
574 699
@@ -577,37 +702,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
577 (struct ieee80211_hdr *)rxb_addr(rxb), 702 (struct ieee80211_hdr *)rxb_addr(rxb),
578 le32_to_cpu(rx_end->status), stats); 703 le32_to_cpu(rx_end->status), stats);
579 704
580 skb_reserve(skb, IWL_LINK_HDR_MAX);
581 skb_add_rx_frag(skb, 0, rxb->page, 705 skb_add_rx_frag(skb, 0, rxb->page,
582 (void *)rx_hdr->payload - (void *)pkt, len); 706 (void *)rx_hdr->payload - (void *)pkt, len);
583 707
584 /* mac80211 currently doesn't support paged SKB. Convert it to
585 * linear SKB for management frame and data frame requires
586 * software decryption or software defragementation. */
587 if (ieee80211_is_mgmt(fc) ||
588 ieee80211_has_protected(fc) ||
589 ieee80211_has_morefrags(fc) ||
590 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
591 ret = skb_linearize(skb);
592 else
593 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
594 0 : -ENOMEM;
595
596 if (ret) {
597 kfree_skb(skb);
598 goto out;
599 }
600
601 /*
602 * XXX: We cannot touch the page and its virtual memory (pkt) after
603 * here. It might have already been freed by the above skb change.
604 */
605
606 iwl_update_stats(priv, false, fc, len); 708 iwl_update_stats(priv, false, fc, len);
607 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 709 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
608 710
609 ieee80211_rx(priv->hw, skb); 711 ieee80211_rx(priv->hw, skb);
610 out:
611 priv->alloc_rxb_page--; 712 priv->alloc_rxb_page--;
612 rxb->page = NULL; 713 rxb->page = NULL;
613} 714}
@@ -623,9 +724,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
623 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 724 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
624 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 725 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
625 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 726 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
626 int snr; 727 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
627 u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); 728 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
628 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
629 u8 network_packet; 729 u8 network_packet;
630 730
631 rx_status.flag = 0; 731 rx_status.flag = 0;
@@ -663,53 +763,29 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
663 /* Convert 3945's rssi indicator to dBm */ 763 /* Convert 3945's rssi indicator to dBm */
664 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; 764 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
665 765
666 /* Set default noise value to -127 */ 766 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
667 if (priv->last_rx_noise == 0) 767 rx_status.signal, rx_stats_sig_avg,
668 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 768 rx_stats_noise_diff);
669
670 /* 3945 provides noise info for OFDM frames only.
671 * sig_avg and noise_diff are measured by the 3945's digital signal
672 * processor (DSP), and indicate linear levels of signal level and
673 * distortion/noise within the packet preamble after
674 * automatic gain control (AGC). sig_avg should stay fairly
675 * constant if the radio's AGC is working well.
676 * Since these values are linear (not dB or dBm), linear
677 * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
678 * Convert linear SNR to dB SNR, then subtract that from rssi dBm
679 * to obtain noise level in dBm.
680 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
681 if (rx_stats_noise_diff) {
682 snr = rx_stats_sig_avg / rx_stats_noise_diff;
683 rx_status.noise = rx_status.signal -
684 iwl3945_calc_db_from_ratio(snr);
685 } else {
686 rx_status.noise = priv->last_rx_noise;
687 }
688
689
690 IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
691 rx_status.signal, rx_status.noise,
692 rx_stats_sig_avg, rx_stats_noise_diff);
693 769
694 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 770 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
695 771
696 network_packet = iwl3945_is_network_packet(priv, header); 772 network_packet = iwl3945_is_network_packet(priv, header);
697 773
698 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 774 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
699 network_packet ? '*' : ' ', 775 network_packet ? '*' : ' ',
700 le16_to_cpu(rx_hdr->channel), 776 le16_to_cpu(rx_hdr->channel),
701 rx_status.signal, rx_status.signal, 777 rx_status.signal, rx_status.signal,
702 rx_status.noise, rx_status.rate_idx); 778 rx_status.rate_idx);
703 779
704 /* Set "1" to report good data frames in groups of 100 */ 780 /* Set "1" to report good data frames in groups of 100 */
705 iwl3945_dbg_report_frame(priv, pkt, header, 1); 781 iwl3945_dbg_report_frame(priv, pkt, header, 1);
706 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 782 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
707 783
708 if (network_packet) { 784 if (network_packet) {
709 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 785 priv->_3945.last_beacon_time =
710 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 786 le32_to_cpu(rx_end->beacon_timestamp);
711 priv->last_rx_rssi = rx_status.signal; 787 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
712 priv->last_rx_noise = rx_status.noise; 788 priv->_3945.last_rx_rssi = rx_status.signal;
713 } 789 }
714 790
715 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); 791 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@@ -871,7 +947,8 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
871 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]); 947 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
872} 948}
873 949
874u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) 950static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
951 u16 tx_rate, u8 flags)
875{ 952{
876 unsigned long flags_spin; 953 unsigned long flags_spin;
877 struct iwl_station_entry *station; 954 struct iwl_station_entry *station;
@@ -957,7 +1034,7 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
957 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 1034 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
958 1035
959 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, 1036 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
960 priv->shared_phys); 1037 priv->_3945.shared_phys);
961 1038
962 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, 1039 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
963 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 1040 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
@@ -1049,7 +1126,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
1049 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 1126 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
1050 1127
1051 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 1128 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
1052 IWL_DEBUG_INFO(priv, "RTP type \n"); 1129 IWL_DEBUG_INFO(priv, "RTP type\n");
1053 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 1130 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
1054 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); 1131 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
1055 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1132 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -1607,7 +1684,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1607 int power; 1684 int power;
1608 1685
1609 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1686 /* Get this chnlgrp's rate-to-max/clip-powers table */
1610 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 1687 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1611 1688
1612 /* Get this channel's rate-to-current-power settings table */ 1689 /* Get this channel's rate-to-current-power settings table */
1613 power_info = ch_info->power_info; 1690 power_info = ch_info->power_info;
@@ -1701,6 +1778,11 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1701 int ref_temp; 1778 int ref_temp;
1702 int temperature = priv->temperature; 1779 int temperature = priv->temperature;
1703 1780
1781 if (priv->disable_tx_power_cal ||
1782 test_bit(STATUS_SCANNING, &priv->status)) {
1783 /* do not perform tx power calibration */
1784 return 0;
1785 }
1704 /* set up new Tx power info for each and every channel, 2.4 and 5.x */ 1786 /* set up new Tx power info for each and every channel, 2.4 and 5.x */
1705 for (i = 0; i < priv->channel_count; i++) { 1787 for (i = 0; i < priv->channel_count; i++) {
1706 ch_info = &priv->channel_info[i]; 1788 ch_info = &priv->channel_info[i];
@@ -1733,7 +1815,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1733 } 1815 }
1734 1816
1735 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1817 /* Get this chnlgrp's rate-to-max/clip-powers table */
1736 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 1818 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1737 1819
1738 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ 1820 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1739 for (scan_tbl_index = 0; 1821 for (scan_tbl_index = 0;
@@ -1911,6 +1993,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1911 "configuration (%d).\n", rc); 1993 "configuration (%d).\n", rc);
1912 return rc; 1994 return rc;
1913 } 1995 }
1996 iwl_clear_ucode_stations(priv);
1997 iwl_restore_stations(priv);
1914 } 1998 }
1915 1999
1916 IWL_DEBUG_INFO(priv, "Sending RXON\n" 2000 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1941,7 +2025,10 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1941 2025
1942 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 2026 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1943 2027
1944 iwl_clear_stations_table(priv); 2028 if (!new_assoc) {
2029 iwl_clear_ucode_stations(priv);
2030 iwl_restore_stations(priv);
2031 }
1945 2032
1946 /* If we issue a new RXON command which required a tune then we must 2033 /* If we issue a new RXON command which required a tune then we must
1947 * send a new TXPOWER command or we won't be able to Tx any frames */ 2034 * send a new TXPOWER command or we won't be able to Tx any frames */
@@ -1951,19 +2038,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1951 return rc; 2038 return rc;
1952 } 2039 }
1953 2040
1954 /* Add the broadcast address so we can send broadcast frames */
1955 priv->cfg->ops->lib->add_bcast_station(priv);
1956
1957 /* If we have set the ASSOC_MSK and we are in BSS mode then
1958 * add the IWL_AP_ID to the station rate table */
1959 if (iwl_is_associated(priv) &&
1960 (priv->iw_mode == NL80211_IFTYPE_STATION))
1961 if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
1962 true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
1963 IWL_ERR(priv, "Error adding AP address for transmit\n");
1964 return -EIO;
1965 }
1966
1967 /* Init the hardware's rate fallback order based on the band */ 2041 /* Init the hardware's rate fallback order based on the band */
1968 rc = iwl3945_init_hw_rate_table(priv); 2042 rc = iwl3945_init_hw_rate_table(priv);
1969 if (rc) { 2043 if (rc) {
@@ -1998,13 +2072,13 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1998 2072
1999 reschedule: 2073 reschedule:
2000 queue_delayed_work(priv->workqueue, 2074 queue_delayed_work(priv->workqueue,
2001 &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ); 2075 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
2002} 2076}
2003 2077
2004static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) 2078static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
2005{ 2079{
2006 struct iwl_priv *priv = container_of(work, struct iwl_priv, 2080 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2007 thermal_periodic.work); 2081 _3945.thermal_periodic.work);
2008 2082
2009 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2083 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2010 return; 2084 return;
@@ -2140,7 +2214,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
2140 * power peaks, without too much distortion (clipping). 2214 * power peaks, without too much distortion (clipping).
2141 */ 2215 */
2142 /* we'll fill in this array with h/w max power levels */ 2216 /* we'll fill in this array with h/w max power levels */
2143 clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers; 2217 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2144 2218
2145 /* divide factory saturation power by 2 to find -3dB level */ 2219 /* divide factory saturation power by 2 to find -3dB level */
2146 satur_pwr = (s8) (group->saturation_power >> 1); 2220 satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2224,7 +2298,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2224 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); 2298 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2225 2299
2226 /* Get this chnlgrp's rate->max/clip-powers table */ 2300 /* Get this chnlgrp's rate->max/clip-powers table */
2227 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 2301 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2228 2302
2229 /* calculate power index *adjustment* value according to 2303 /* calculate power index *adjustment* value according to
2230 * diff between current temperature and factory temperature */ 2304 * diff between current temperature and factory temperature */
@@ -2332,7 +2406,7 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2332{ 2406{
2333 int txq_id = txq->q.id; 2407 int txq_id = txq->q.id;
2334 2408
2335 struct iwl3945_shared *shared_data = priv->shared_virt; 2409 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2336 2410
2337 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2411 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2338 2412
@@ -2385,6 +2459,30 @@ static u16 iwl3945_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
2385 return (u16)sizeof(struct iwl3945_addsta_cmd); 2459 return (u16)sizeof(struct iwl3945_addsta_cmd);
2386} 2460}
2387 2461
2462static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
2463 struct ieee80211_vif *vif, bool add)
2464{
2465 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
2466 int ret;
2467
2468 if (add) {
2469 ret = iwl_add_bssid_station(priv, vif->bss_conf.bssid, false,
2470 &vif_priv->ibss_bssid_sta_id);
2471 if (ret)
2472 return ret;
2473
2474 iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
2475 (priv->band == IEEE80211_BAND_5GHZ) ?
2476 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
2477 CMD_ASYNC);
2478 iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
2479
2480 return 0;
2481 }
2482
2483 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
2484 vif->bss_conf.bssid);
2485}
2388 2486
2389/** 2487/**
2390 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table 2488 * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
@@ -2432,7 +2530,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2432 /* If an OFDM rate is used, have it fall back to the 2530 /* If an OFDM rate is used, have it fall back to the
2433 * 1M CCK rates */ 2531 * 1M CCK rates */
2434 2532
2435 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 2533 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2436 iwl_is_associated(priv)) { 2534 iwl_is_associated(priv)) {
2437 2535
2438 index = IWL_FIRST_CCK_RATE; 2536 index = IWL_FIRST_CCK_RATE;
@@ -2471,12 +2569,12 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2471 memset((void *)&priv->hw_params, 0, 2569 memset((void *)&priv->hw_params, 0,
2472 sizeof(struct iwl_hw_params)); 2570 sizeof(struct iwl_hw_params));
2473 2571
2474 priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev, 2572 priv->_3945.shared_virt =
2475 sizeof(struct iwl3945_shared), 2573 dma_alloc_coherent(&priv->pci_dev->dev,
2476 &priv->shared_phys, GFP_KERNEL); 2574 sizeof(struct iwl3945_shared),
2477 if (!priv->shared_virt) { 2575 &priv->_3945.shared_phys, GFP_KERNEL);
2576 if (!priv->_3945.shared_virt) {
2478 IWL_ERR(priv, "failed to allocate pci memory\n"); 2577 IWL_ERR(priv, "failed to allocate pci memory\n");
2479 mutex_unlock(&priv->mutex);
2480 return -ENOMEM; 2578 return -ENOMEM;
2481 } 2579 }
2482 2580
@@ -2537,13 +2635,13 @@ void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2537 2635
2538void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) 2636void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2539{ 2637{
2540 INIT_DELAYED_WORK(&priv->thermal_periodic, 2638 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2541 iwl3945_bg_reg_txpower_periodic); 2639 iwl3945_bg_reg_txpower_periodic);
2542} 2640}
2543 2641
2544void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) 2642void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2545{ 2643{
2546 cancel_delayed_work(&priv->thermal_periodic); 2644 cancel_delayed_work(&priv->_3945.thermal_periodic);
2547} 2645}
2548 2646
2549/* check contents of special bootstrap uCode SRAM */ 2647/* check contents of special bootstrap uCode SRAM */
@@ -2714,48 +2812,10 @@ static int iwl3945_load_bsm(struct iwl_priv *priv)
2714 return 0; 2812 return 0;
2715} 2813}
2716 2814
2717#define IWL3945_UCODE_GET(item) \
2718static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode,\
2719 u32 api_ver) \
2720{ \
2721 return le32_to_cpu(ucode->u.v1.item); \
2722}
2723
2724static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2725{
2726 return UCODE_HEADER_SIZE(1);
2727}
2728static u32 iwl3945_ucode_get_build(const struct iwl_ucode_header *ucode,
2729 u32 api_ver)
2730{
2731 return 0;
2732}
2733static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode,
2734 u32 api_ver)
2735{
2736 return (u8 *) ucode->u.v1.data;
2737}
2738
2739IWL3945_UCODE_GET(inst_size);
2740IWL3945_UCODE_GET(data_size);
2741IWL3945_UCODE_GET(init_size);
2742IWL3945_UCODE_GET(init_data_size);
2743IWL3945_UCODE_GET(boot_size);
2744
2745static struct iwl_hcmd_ops iwl3945_hcmd = { 2815static struct iwl_hcmd_ops iwl3945_hcmd = {
2746 .rxon_assoc = iwl3945_send_rxon_assoc, 2816 .rxon_assoc = iwl3945_send_rxon_assoc,
2747 .commit_rxon = iwl3945_commit_rxon, 2817 .commit_rxon = iwl3945_commit_rxon,
2748}; 2818 .send_bt_config = iwl_send_bt_config,
2749
2750static struct iwl_ucode_ops iwl3945_ucode = {
2751 .get_header_size = iwl3945_ucode_get_header_size,
2752 .get_build = iwl3945_ucode_get_build,
2753 .get_inst_size = iwl3945_ucode_get_inst_size,
2754 .get_data_size = iwl3945_ucode_get_data_size,
2755 .get_init_size = iwl3945_ucode_get_init_size,
2756 .get_init_data_size = iwl3945_ucode_get_init_data_size,
2757 .get_boot_size = iwl3945_ucode_get_boot_size,
2758 .get_data = iwl3945_ucode_get_data,
2759}; 2819};
2760 2820
2761static struct iwl_lib_ops iwl3945_lib = { 2821static struct iwl_lib_ops iwl3945_lib = {
@@ -2791,17 +2851,24 @@ static struct iwl_lib_ops iwl3945_lib = {
2791 .post_associate = iwl3945_post_associate, 2851 .post_associate = iwl3945_post_associate,
2792 .isr = iwl_isr_legacy, 2852 .isr = iwl_isr_legacy,
2793 .config_ap = iwl3945_config_ap, 2853 .config_ap = iwl3945_config_ap,
2794 .add_bcast_station = iwl3945_add_bcast_station, 2854 .manage_ibss_station = iwl3945_manage_ibss_station,
2855 .check_plcp_health = iwl3945_good_plcp_health,
2856
2857 .debugfs_ops = {
2858 .rx_stats_read = iwl3945_ucode_rx_stats_read,
2859 .tx_stats_read = iwl3945_ucode_tx_stats_read,
2860 .general_stats_read = iwl3945_ucode_general_stats_read,
2861 },
2795}; 2862};
2796 2863
2797static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2864static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2798 .get_hcmd_size = iwl3945_get_hcmd_size, 2865 .get_hcmd_size = iwl3945_get_hcmd_size,
2799 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2866 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2800 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2867 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2868 .request_scan = iwl3945_request_scan,
2801}; 2869};
2802 2870
2803static const struct iwl_ops iwl3945_ops = { 2871static const struct iwl_ops iwl3945_ops = {
2804 .ucode = &iwl3945_ucode,
2805 .lib = &iwl3945_lib, 2872 .lib = &iwl3945_lib,
2806 .hcmd = &iwl3945_hcmd, 2873 .hcmd = &iwl3945_hcmd,
2807 .utils = &iwl3945_hcmd_utils, 2874 .utils = &iwl3945_hcmd_utils,
@@ -2826,7 +2893,10 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2826 .ht_greenfield_support = false, 2893 .ht_greenfield_support = false,
2827 .led_compensation = 64, 2894 .led_compensation = 64,
2828 .broken_powersave = true, 2895 .broken_powersave = true,
2829 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2896 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2897 .monitor_recover_period = IWL_MONITORING_PERIOD,
2898 .max_event_log_size = 512,
2899 .tx_power_by_driver = true,
2830}; 2900};
2831 2901
2832static struct iwl_cfg iwl3945_abg_cfg = { 2902static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2844,7 +2914,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2844 .ht_greenfield_support = false, 2914 .ht_greenfield_support = false,
2845 .led_compensation = 64, 2915 .led_compensation = 64,
2846 .broken_powersave = true, 2916 .broken_powersave = true,
2847 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2917 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2918 .monitor_recover_period = IWL_MONITORING_PERIOD,
2919 .max_event_log_size = 512,
2920 .tx_power_by_driver = true,
2848}; 2921};
2849 2922
2850DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { 2923DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 452dfd5456c6..bb2aeebf3652 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -95,7 +95,6 @@ struct iwl3945_rs_sta {
95 u8 tgg; 95 u8 tgg;
96 u8 flush_pending; 96 u8 flush_pending;
97 u8 start_rate; 97 u8 start_rate;
98 u8 ibss_sta_added;
99 struct timer_list rate_scale_flush; 98 struct timer_list rate_scale_flush;
100 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; 99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
101#ifdef CONFIG_MAC80211_DEBUGFS 100#ifdef CONFIG_MAC80211_DEBUGFS
@@ -107,7 +106,12 @@ struct iwl3945_rs_sta {
107}; 106};
108 107
109 108
109/*
110 * The common struct MUST be first because it is shared between
111 * 3945 and agn!
112 */
110struct iwl3945_sta_priv { 113struct iwl3945_sta_priv {
114 struct iwl_station_priv_common common;
111 struct iwl3945_rs_sta rs_sta; 115 struct iwl3945_rs_sta rs_sta;
112}; 116};
113 117
@@ -212,13 +216,6 @@ extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
212 char **buf, bool display); 216 char **buf, bool display);
213extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv); 217extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
214 218
215/*
216 * Currently used by iwl-3945-rs... look at restructuring so that it doesn't
217 * call this... todo... fix that.
218*/
219extern u8 iwl3945_sync_station(struct iwl_priv *priv, int sta_id,
220 u16 tx_rate, u8 flags);
221
222/****************************************************************************** 219/******************************************************************************
223 * 220 *
224 * Functions implemented in iwl-[34]*.c which are forward declared here 221 * Functions implemented in iwl-[34]*.c which are forward declared here
@@ -265,10 +262,14 @@ extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
265extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power); 262extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
266extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 263extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
267 struct iwl_rx_mem_buffer *rxb); 264 struct iwl_rx_mem_buffer *rxb);
265void iwl3945_reply_statistics(struct iwl_priv *priv,
266 struct iwl_rx_mem_buffer *rxb);
268extern void iwl3945_disable_events(struct iwl_priv *priv); 267extern void iwl3945_disable_events(struct iwl_priv *priv);
269extern int iwl4965_get_temperature(const struct iwl_priv *priv); 268extern int iwl4965_get_temperature(const struct iwl_priv *priv);
270extern void iwl3945_post_associate(struct iwl_priv *priv); 269extern void iwl3945_post_associate(struct iwl_priv *priv,
271extern void iwl3945_config_ap(struct iwl_priv *priv); 270 struct ieee80211_vif *vif);
271extern void iwl3945_config_ap(struct iwl_priv *priv,
272 struct ieee80211_vif *vif);
272 273
273/** 274/**
274 * iwl3945_hw_find_station - Find station id for a given BSSID 275 * iwl3945_hw_find_station - Find station id for a given BSSID
@@ -287,14 +288,15 @@ extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
287extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv); 288extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
288extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv); 289extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
289extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv); 290extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
290extern u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id,
291 u16 tx_rate, u8 flags);
292 291
293extern const struct iwl_channel_info *iwl3945_get_channel_info( 292extern const struct iwl_channel_info *iwl3945_get_channel_info(
294 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel); 293 const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
295 294
296extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate); 295extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
297 296
297/* scanning */
298void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
299
298/* Requires full declaration of iwl_priv before including */ 300/* Requires full declaration of iwl_priv before including */
299#include "iwl-io.h" 301#include "iwl-io.h"
300 302
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 67ef562e8db1..cd4b61ae25b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -81,26 +81,6 @@
81 */ 81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7 82#define IWL49_FIRST_AMPDU_QUEUE 7
83 83
84/* Time constants */
85#define SHORT_SLOT_TIME 9
86#define LONG_SLOT_TIME 20
87
88/* RSSI to dBm */
89#define IWL49_RSSI_OFFSET 44
90
91
92/* PCI registers */
93#define PCI_CFG_RETRY_TIMEOUT 0x041
94
95/* PCI register values */
96#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
97#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
98
99#define IWL_NUM_SCAN_RATES (2)
100
101#define IWL_DEFAULT_TX_RETRY 15
102
103
104/* Sizes and addresses for instruction and data memory (SRAM) in 84/* Sizes and addresses for instruction and data memory (SRAM) in
105 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
106#define IWL49_RTC_INST_LOWER_BOUND (0x000000) 86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
393 * location(s) in command (struct iwl4965_txpowertable_cmd). 373 * location(s) in command (struct iwl4965_txpowertable_cmd).
394 */ 374 */
395 375
396/* Limit range of txpower output target to be between these values */
397#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
398#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
399
400/** 376/**
401 * When MIMO is used (2 transmitters operating simultaneously), driver should 377 * When MIMO is used (2 transmitters operating simultaneously), driver should
402 * limit each transmitter to deliver a max of 3 dB below the regulatory limit 378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8972166386cb..d3afddae8d9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -46,6 +46,8 @@
46#include "iwl-calib.h" 46#include "iwl-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h"
50#include "iwl-agn-debugfs.h"
49 51
50static int iwl4965_send_tx_power(struct iwl_priv *priv); 52static int iwl4965_send_tx_power(struct iwl_priv *priv);
51static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 53static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -60,14 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
60#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" 62#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
61#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) 63#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
62 64
63
64/* module parameters */
65static struct iwl_mod_params iwl4965_mod_params = {
66 .amsdu_size_8K = 1,
67 .restart_fw = 1,
68 /* the rest are 0 by default */
69};
70
71/* check contents of special bootstrap uCode SRAM */ 65/* check contents of special bootstrap uCode SRAM */
72static int iwl4965_verify_bsm(struct iwl_priv *priv) 66static int iwl4965_verify_bsm(struct iwl_priv *priv)
73{ 67{
@@ -417,7 +411,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
417 sizeof(cmd), &cmd); 411 sizeof(cmd), &cmd);
418 if (ret) 412 if (ret)
419 IWL_DEBUG_CALIB(priv, "fail sending cmd " 413 IWL_DEBUG_CALIB(priv, "fail sending cmd "
420 "REPLY_PHY_CALIBRATION_CMD \n"); 414 "REPLY_PHY_CALIBRATION_CMD\n");
421 415
422 /* TODO we might want recalculate 416 /* TODO we might want recalculate
423 * rx_chain in rxon cmd */ 417 * rx_chain in rxon cmd */
@@ -502,14 +496,14 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
502 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 496 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
503} 497}
504 498
505static const u16 default_queue_to_tx_fifo[] = { 499static const s8 default_queue_to_tx_fifo[] = {
506 IWL_TX_FIFO_AC3, 500 IWL_TX_FIFO_VO,
507 IWL_TX_FIFO_AC2, 501 IWL_TX_FIFO_VI,
508 IWL_TX_FIFO_AC1, 502 IWL_TX_FIFO_BE,
509 IWL_TX_FIFO_AC0, 503 IWL_TX_FIFO_BK,
510 IWL49_CMD_FIFO_NUM, 504 IWL49_CMD_FIFO_NUM,
511 IWL_TX_FIFO_HCCA_1, 505 IWL_TX_FIFO_UNUSED,
512 IWL_TX_FIFO_HCCA_2 506 IWL_TX_FIFO_UNUSED,
513}; 507};
514 508
515static int iwl4965_alive_notify(struct iwl_priv *priv) 509static int iwl4965_alive_notify(struct iwl_priv *priv)
@@ -589,9 +583,15 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
589 /* reset to 0 to enable all the queue first */ 583 /* reset to 0 to enable all the queue first */
590 priv->txq_ctx_active_msk = 0; 584 priv->txq_ctx_active_msk = 0;
591 /* Map each Tx/cmd queue to its corresponding fifo */ 585 /* Map each Tx/cmd queue to its corresponding fifo */
586 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
592 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 587 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
593 int ac = default_queue_to_tx_fifo[i]; 588 int ac = default_queue_to_tx_fifo[i];
589
594 iwl_txq_ctx_activate(priv, i); 590 iwl_txq_ctx_activate(priv, i);
591
592 if (ac == IWL_TX_FIFO_UNUSED)
593 continue;
594
595 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 595 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
596 } 596 }
597 597
@@ -1613,19 +1613,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1613 1613
1614 /* get absolute value */ 1614 /* get absolute value */
1615 if (temp_diff < 0) { 1615 if (temp_diff < 0) {
1616 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff); 1616 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1617 temp_diff = -temp_diff; 1617 temp_diff = -temp_diff;
1618 } else if (temp_diff == 0) 1618 } else if (temp_diff == 0)
1619 IWL_DEBUG_POWER(priv, "Same temp, \n"); 1619 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1620 else 1620 else
1621 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff); 1621 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1622 1622
1623 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { 1623 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1624 IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n"); 1624 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1625 return 0; 1625 return 0;
1626 } 1626 }
1627 1627
1628 IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n"); 1628 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1629 1629
1630 return 1; 1630 return 1;
1631} 1631}
@@ -1874,7 +1874,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1874 info->status.rates[0].count = tx_resp->failure_frame + 1; 1874 info->status.rates[0].count = tx_resp->failure_frame + 1;
1875 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1875 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1876 info->flags |= iwl_tx_status_to_mac80211(status); 1876 info->flags |= iwl_tx_status_to_mac80211(status);
1877 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 1877 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
1878 /* FIXME: code repetition end */ 1878 /* FIXME: code repetition end */
1879 1879
1880 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1880 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@@ -1953,6 +1953,60 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1953 return 0; 1953 return 0;
1954} 1954}
1955 1955
1956static u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
1957{
1958 int i;
1959 int start = 0;
1960 int ret = IWL_INVALID_STATION;
1961 unsigned long flags;
1962
1963 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
1964 (priv->iw_mode == NL80211_IFTYPE_AP))
1965 start = IWL_STA_ID;
1966
1967 if (is_broadcast_ether_addr(addr))
1968 return priv->hw_params.bcast_sta_id;
1969
1970 spin_lock_irqsave(&priv->sta_lock, flags);
1971 for (i = start; i < priv->hw_params.max_stations; i++)
1972 if (priv->stations[i].used &&
1973 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
1974 addr))) {
1975 ret = i;
1976 goto out;
1977 }
1978
1979 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
1980 addr, priv->num_stations);
1981
1982 out:
1983 /*
1984 * It may be possible that more commands interacting with stations
1985 * arrive before we completed processing the adding of
1986 * station
1987 */
1988 if (ret != IWL_INVALID_STATION &&
1989 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
1990 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
1991 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
1992 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
1993 ret);
1994 ret = IWL_INVALID_STATION;
1995 }
1996 spin_unlock_irqrestore(&priv->sta_lock, flags);
1997 return ret;
1998}
1999
2000static int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
2001{
2002 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
2003 return IWL_AP_ID;
2004 } else {
2005 u8 *da = ieee80211_get_DA(hdr);
2006 return iwl_find_station(priv, da);
2007 }
2008}
2009
1956/** 2010/**
1957 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response 2011 * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
1958 */ 2012 */
@@ -2014,7 +2068,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2014 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 2068 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2069 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2016 "%d index %d\n", scd_ssn , index); 2070 "%d index %d\n", scd_ssn , index);
2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2071 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
2018 if (qc) 2072 if (qc)
2019 iwl_free_tfds_in_queue(priv, sta_id, 2073 iwl_free_tfds_in_queue(priv, sta_id,
2020 tid, freed); 2074 tid, freed);
@@ -2031,7 +2085,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2031 } else { 2085 } else {
2032 info->status.rates[0].count = tx_resp->failure_frame + 1; 2086 info->status.rates[0].count = tx_resp->failure_frame + 1;
2033 info->flags |= iwl_tx_status_to_mac80211(status); 2087 info->flags |= iwl_tx_status_to_mac80211(status);
2034 iwl_hwrate_to_tx_control(priv, 2088 iwlagn_hwrate_to_tx_control(priv,
2035 le32_to_cpu(tx_resp->rate_n_flags), 2089 le32_to_cpu(tx_resp->rate_n_flags),
2036 info); 2090 info);
2037 2091
@@ -2042,7 +2096,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2042 le32_to_cpu(tx_resp->rate_n_flags), 2096 le32_to_cpu(tx_resp->rate_n_flags),
2043 tx_resp->failure_frame); 2097 tx_resp->failure_frame);
2044 2098
2045 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2099 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
2046 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2100 if (qc && likely(sta_id != IWL_INVALID_STATION))
2047 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2101 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2048 else if (sta_id == IWL_INVALID_STATION) 2102 else if (sta_id == IWL_INVALID_STATION)
@@ -2053,10 +2107,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2053 iwl_wake_queue(priv, txq_id); 2107 iwl_wake_queue(priv, txq_id);
2054 } 2108 }
2055 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2109 if (qc && likely(sta_id != IWL_INVALID_STATION))
2056 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2110 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
2057 2111
2058 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2112 iwl_check_abort_status(priv, tx_resp->frame_count, status);
2059 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
2060} 2113}
2061 2114
2062static int iwl4965_calc_rssi(struct iwl_priv *priv, 2115static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2090,7 +2143,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
2090 2143
2091 /* dBm = max_rssi dB - agc dB - constant. 2144 /* dBm = max_rssi dB - agc dB - constant.
2092 * Higher AGC (higher radio gain) means lower signal. */ 2145 * Higher AGC (higher radio gain) means lower signal. */
2093 return max_rssi - agc - IWL49_RSSI_OFFSET; 2146 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
2094} 2147}
2095 2148
2096 2149
@@ -2098,7 +2151,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
2098static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 2151static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2099{ 2152{
2100 /* Legacy Rx frames */ 2153 /* Legacy Rx frames */
2101 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx; 2154 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
2102 /* Tx response */ 2155 /* Tx response */
2103 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 2156 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2104} 2157}
@@ -2113,50 +2166,13 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2113 cancel_work_sync(&priv->txpower_work); 2166 cancel_work_sync(&priv->txpower_work);
2114} 2167}
2115 2168
2116#define IWL4965_UCODE_GET(item) \
2117static u32 iwl4965_ucode_get_##item(const struct iwl_ucode_header *ucode,\
2118 u32 api_ver) \
2119{ \
2120 return le32_to_cpu(ucode->u.v1.item); \
2121}
2122
2123static u32 iwl4965_ucode_get_header_size(u32 api_ver)
2124{
2125 return UCODE_HEADER_SIZE(1);
2126}
2127static u32 iwl4965_ucode_get_build(const struct iwl_ucode_header *ucode,
2128 u32 api_ver)
2129{
2130 return 0;
2131}
2132static u8 *iwl4965_ucode_get_data(const struct iwl_ucode_header *ucode,
2133 u32 api_ver)
2134{
2135 return (u8 *) ucode->u.v1.data;
2136}
2137
2138IWL4965_UCODE_GET(inst_size);
2139IWL4965_UCODE_GET(data_size);
2140IWL4965_UCODE_GET(init_size);
2141IWL4965_UCODE_GET(init_data_size);
2142IWL4965_UCODE_GET(boot_size);
2143
2144static struct iwl_hcmd_ops iwl4965_hcmd = { 2169static struct iwl_hcmd_ops iwl4965_hcmd = {
2145 .rxon_assoc = iwl4965_send_rxon_assoc, 2170 .rxon_assoc = iwl4965_send_rxon_assoc,
2146 .commit_rxon = iwl_commit_rxon, 2171 .commit_rxon = iwl_commit_rxon,
2147 .set_rxon_chain = iwl_set_rxon_chain, 2172 .set_rxon_chain = iwl_set_rxon_chain,
2173 .send_bt_config = iwl_send_bt_config,
2148}; 2174};
2149 2175
2150static struct iwl_ucode_ops iwl4965_ucode = {
2151 .get_header_size = iwl4965_ucode_get_header_size,
2152 .get_build = iwl4965_ucode_get_build,
2153 .get_inst_size = iwl4965_ucode_get_inst_size,
2154 .get_data_size = iwl4965_ucode_get_data_size,
2155 .get_init_size = iwl4965_ucode_get_init_size,
2156 .get_init_data_size = iwl4965_ucode_get_init_data_size,
2157 .get_boot_size = iwl4965_ucode_get_boot_size,
2158 .get_data = iwl4965_ucode_get_data,
2159};
2160static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2176static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2161 .get_hcmd_size = iwl4965_get_hcmd_size, 2177 .get_hcmd_size = iwl4965_get_hcmd_size,
2162 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2178 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
@@ -2164,6 +2180,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2164 .gain_computation = iwl4965_gain_computation, 2180 .gain_computation = iwl4965_gain_computation,
2165 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag, 2181 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2166 .calc_rssi = iwl4965_calc_rssi, 2182 .calc_rssi = iwl4965_calc_rssi,
2183 .request_scan = iwlagn_request_scan,
2167}; 2184};
2168 2185
2169static struct iwl_lib_ops iwl4965_lib = { 2186static struct iwl_lib_ops iwl4965_lib = {
@@ -2184,6 +2201,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2184 .load_ucode = iwl4965_load_bsm, 2201 .load_ucode = iwl4965_load_bsm,
2185 .dump_nic_event_log = iwl_dump_nic_event_log, 2202 .dump_nic_event_log = iwl_dump_nic_event_log,
2186 .dump_nic_error_log = iwl_dump_nic_error_log, 2203 .dump_nic_error_log = iwl_dump_nic_error_log,
2204 .dump_fh = iwl_dump_fh,
2187 .set_channel_switch = iwl4965_hw_channel_switch, 2205 .set_channel_switch = iwl4965_hw_channel_switch,
2188 .apm_ops = { 2206 .apm_ops = {
2189 .init = iwl_apm_init, 2207 .init = iwl_apm_init,
@@ -2216,11 +2234,16 @@ static struct iwl_lib_ops iwl4965_lib = {
2216 .temperature = iwl4965_temperature_calib, 2234 .temperature = iwl4965_temperature_calib,
2217 .set_ct_kill = iwl4965_set_ct_threshold, 2235 .set_ct_kill = iwl4965_set_ct_threshold,
2218 }, 2236 },
2219 .add_bcast_station = iwl_add_bcast_station, 2237 .manage_ibss_station = iwlagn_manage_ibss_station,
2238 .debugfs_ops = {
2239 .rx_stats_read = iwl_ucode_rx_stats_read,
2240 .tx_stats_read = iwl_ucode_tx_stats_read,
2241 .general_stats_read = iwl_ucode_general_stats_read,
2242 },
2243 .check_plcp_health = iwl_good_plcp_health,
2220}; 2244};
2221 2245
2222static const struct iwl_ops iwl4965_ops = { 2246static const struct iwl_ops iwl4965_ops = {
2223 .ucode = &iwl4965_ucode,
2224 .lib = &iwl4965_lib, 2247 .lib = &iwl4965_lib,
2225 .hcmd = &iwl4965_hcmd, 2248 .hcmd = &iwl4965_hcmd,
2226 .utils = &iwl4965_hcmd_utils, 2249 .utils = &iwl4965_hcmd_utils,
@@ -2228,7 +2251,7 @@ static const struct iwl_ops iwl4965_ops = {
2228}; 2251};
2229 2252
2230struct iwl_cfg iwl4965_agn_cfg = { 2253struct iwl_cfg iwl4965_agn_cfg = {
2231 .name = "4965AGN", 2254 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2232 .fw_name_pre = IWL4965_FW_PRE, 2255 .fw_name_pre = IWL4965_FW_PRE,
2233 .ucode_api_max = IWL4965_UCODE_API_MAX, 2256 .ucode_api_max = IWL4965_UCODE_API_MAX,
2234 .ucode_api_min = IWL4965_UCODE_API_MIN, 2257 .ucode_api_min = IWL4965_UCODE_API_MIN,
@@ -2239,7 +2262,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2239 .ops = &iwl4965_ops, 2262 .ops = &iwl4965_ops,
2240 .num_of_queues = IWL49_NUM_QUEUES, 2263 .num_of_queues = IWL49_NUM_QUEUES,
2241 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, 2264 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2242 .mod_params = &iwl4965_mod_params, 2265 .mod_params = &iwlagn_mod_params,
2243 .valid_tx_ant = ANT_AB, 2266 .valid_tx_ant = ANT_AB,
2244 .valid_rx_ant = ANT_ABC, 2267 .valid_rx_ant = ANT_ABC,
2245 .pll_cfg_val = 0, 2268 .pll_cfg_val = 0,
@@ -2251,27 +2274,20 @@ struct iwl_cfg iwl4965_agn_cfg = {
2251 .led_compensation = 61, 2274 .led_compensation = 61,
2252 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2275 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2253 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2276 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2277 .monitor_recover_period = IWL_MONITORING_PERIOD,
2278 .temperature_kelvin = true,
2279 .max_event_log_size = 512,
2280 .tx_power_by_driver = true,
2281 .ucode_tracing = true,
2282 .sensitivity_calib_by_driver = true,
2283 .chain_noise_calib_by_driver = true,
2284 /*
2285 * Force use of chains B and C for scan RX on 5 GHz band
2286 * because the device has off-channel reception on chain A.
2287 */
2288 .scan_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
2254}; 2289};
2255 2290
2256/* Module firmware */ 2291/* Module firmware */
2257MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2292MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2258 2293
2259module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
2260MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2261module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
2262MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2263module_param_named(
2264 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
2265MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2266
2267module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
2268MODULE_PARM_DESC(queues_num, "number of hw queues.");
2269/* 11n */
2270module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
2271MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
2272module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
2273 int, S_IRUGO);
2274MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
2275
2276module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
2277MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 714e032f6217..146e6431ae95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -68,25 +68,6 @@
68#ifndef __iwl_5000_hw_h__ 68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__ 69#define __iwl_5000_hw_h__
70 70
71#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
72#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
73
74#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
75#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
76
77#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
78 IWL50_RTC_INST_LOWER_BOUND)
79#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
80 IWL50_RTC_DATA_LOWER_BOUND)
81
82/* EEPROM */
83#define IWL_5000_EEPROM_IMG_SIZE 2048
84
85#define IWL50_CMD_FIFO_NUM 7
86#define IWL50_NUM_QUEUES 20
87#define IWL50_NUM_AMPDU_QUEUES 10
88#define IWL50_FIRST_AMPDU_QUEUE 10
89
90/* 5150 only */ 71/* 5150 only */
91#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) 72#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
92 73
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
103 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); 84 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
104} 85}
105 86
106/* Fixed (non-configurable) rx data from phy */
107
108/**
109 * struct iwl5000_schedq_bc_tbl scheduler byte count table
110 * base physical address of iwl5000_shared
111 * is provided to SCD_DRAM_BASE_ADDR
112 * @tfd_offset 0-12 - tx command byte count
113 * 12-16 - station index
114 */
115struct iwl5000_scd_bc_tbl {
116 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
117} __attribute__ ((packed));
118
119
120#endif /* __iwl_5000_hw_h__ */ 87#endif /* __iwl_5000_hw_h__ */
121 88
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e476acb53aa7..a28af7eb67eb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -19,6 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 * 24 *
24 *****************************************************************************/ 25 *****************************************************************************/
@@ -43,9 +44,11 @@
43#include "iwl-io.h" 44#include "iwl-io.h"
44#include "iwl-sta.h" 45#include "iwl-sta.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn.h"
46#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn-hw.h"
47#include "iwl-5000-hw.h" 50#include "iwl-5000-hw.h"
48#include "iwl-6000-hw.h" 51#include "iwl-agn-debugfs.h"
49 52
50/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
51#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 2
@@ -63,18 +66,8 @@
63#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" 66#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
64#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) 67#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
65 68
66static const u16 iwl5000_default_queue_to_tx_fifo[] = {
67 IWL_TX_FIFO_AC3,
68 IWL_TX_FIFO_AC2,
69 IWL_TX_FIFO_AC1,
70 IWL_TX_FIFO_AC0,
71 IWL50_CMD_FIFO_NUM,
72 IWL_TX_FIFO_HCCA_1,
73 IWL_TX_FIFO_HCCA_2
74};
75
76/* NIC configuration for 5000 series */ 69/* NIC configuration for 5000 series */
77void iwl5000_nic_config(struct iwl_priv *priv) 70static void iwl5000_nic_config(struct iwl_priv *priv)
78{ 71{
79 unsigned long flags; 72 unsigned long flags;
80 u16 radio_cfg; 73 u16 radio_cfg;
@@ -107,162 +100,6 @@ void iwl5000_nic_config(struct iwl_priv *priv)
107 spin_unlock_irqrestore(&priv->lock, flags); 100 spin_unlock_irqrestore(&priv->lock, flags);
108} 101}
109 102
110
111/*
112 * EEPROM
113 */
114static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
115{
116 u16 offset = 0;
117
118 if ((address & INDIRECT_ADDRESS) == 0)
119 return address;
120
121 switch (address & INDIRECT_TYPE_MSK) {
122 case INDIRECT_HOST:
123 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
124 break;
125 case INDIRECT_GENERAL:
126 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
127 break;
128 case INDIRECT_REGULATORY:
129 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
130 break;
131 case INDIRECT_CALIBRATION:
132 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
133 break;
134 case INDIRECT_PROCESS_ADJST:
135 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
136 break;
137 case INDIRECT_OTHERS:
138 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
139 break;
140 default:
141 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
142 address & INDIRECT_TYPE_MSK);
143 break;
144 }
145
146 /* translate the offset from words to byte */
147 return (address & ADDRESS_MSK) + (offset << 1);
148}
149
150u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
151{
152 struct iwl_eeprom_calib_hdr {
153 u8 version;
154 u8 pa_type;
155 u16 voltage;
156 } *hdr;
157
158 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
159 EEPROM_5000_CALIB_ALL);
160 return hdr->version;
161
162}
163
164static void iwl5000_gain_computation(struct iwl_priv *priv,
165 u32 average_noise[NUM_RX_CHAINS],
166 u16 min_average_noise_antenna_i,
167 u32 min_average_noise,
168 u8 default_chain)
169{
170 int i;
171 s32 delta_g;
172 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
173
174 /*
175 * Find Gain Code for the chains based on "default chain"
176 */
177 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
178 if ((data->disconn_array[i])) {
179 data->delta_gain_code[i] = 0;
180 continue;
181 }
182
183 delta_g = (priv->cfg->chain_noise_scale *
184 ((s32)average_noise[default_chain] -
185 (s32)average_noise[i])) / 1500;
186
187 /* bound gain by 2 bits value max, 3rd bit is sign */
188 data->delta_gain_code[i] =
189 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
190
191 if (delta_g < 0)
192 /*
193 * set negative sign ...
194 * note to Intel developers: This is uCode API format,
195 * not the format of any internal device registers.
196 * Do not change this format for e.g. 6050 or similar
197 * devices. Change format only if more resolution
198 * (i.e. more than 2 bits magnitude) is needed.
199 */
200 data->delta_gain_code[i] |= (1 << 2);
201 }
202
203 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
204 data->delta_gain_code[1], data->delta_gain_code[2]);
205
206 if (!data->radio_write) {
207 struct iwl_calib_chain_noise_gain_cmd cmd;
208
209 memset(&cmd, 0, sizeof(cmd));
210
211 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
212 cmd.hdr.first_group = 0;
213 cmd.hdr.groups_num = 1;
214 cmd.hdr.data_valid = 1;
215 cmd.delta_gain_1 = data->delta_gain_code[1];
216 cmd.delta_gain_2 = data->delta_gain_code[2];
217 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
218 sizeof(cmd), &cmd, NULL);
219
220 data->radio_write = 1;
221 data->state = IWL_CHAIN_NOISE_CALIBRATED;
222 }
223
224 data->chain_noise_a = 0;
225 data->chain_noise_b = 0;
226 data->chain_noise_c = 0;
227 data->chain_signal_a = 0;
228 data->chain_signal_b = 0;
229 data->chain_signal_c = 0;
230 data->beacon_count = 0;
231}
232
233static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
234{
235 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
236 int ret;
237
238 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
239 struct iwl_calib_chain_noise_reset_cmd cmd;
240 memset(&cmd, 0, sizeof(cmd));
241
242 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
243 cmd.hdr.first_group = 0;
244 cmd.hdr.groups_num = 1;
245 cmd.hdr.data_valid = 1;
246 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
247 sizeof(cmd), &cmd);
248 if (ret)
249 IWL_ERR(priv,
250 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
251 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
252 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
253 }
254}
255
256void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
257 __le32 *tx_flags)
258{
259 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
260 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
261 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
262 else
263 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
264}
265
266static struct iwl_sensitivity_ranges iwl5000_sensitivity = { 103static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
267 .min_nrg_cck = 95, 104 .min_nrg_cck = 95,
268 .max_nrg_cck = 0, /* not used, set to 0 */ 105 .max_nrg_cck = 0, /* not used, set to 0 */
@@ -314,14 +151,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
314 .nrg_th_cca = 62, 151 .nrg_th_cca = 62,
315}; 152};
316 153
317const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
318 size_t offset)
319{
320 u32 address = eeprom_indirect_address(priv, offset);
321 BUG_ON(address >= priv->cfg->eeprom_size);
322 return &priv->eeprom[address];
323}
324
325static void iwl5150_set_ct_threshold(struct iwl_priv *priv) 154static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
326{ 155{
327 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; 156 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
@@ -337,356 +166,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
337 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; 166 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
338} 167}
339 168
340/* 169static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
341 * Calibration
342 */
343static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
344{
345 struct iwl_calib_xtal_freq_cmd cmd;
346 __le16 *xtal_calib =
347 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
348
349 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
350 cmd.hdr.first_group = 0;
351 cmd.hdr.groups_num = 1;
352 cmd.hdr.data_valid = 1;
353 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
354 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
355 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
356 (u8 *)&cmd, sizeof(cmd));
357}
358
359static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
360{
361 struct iwl_calib_cfg_cmd calib_cfg_cmd;
362 struct iwl_host_cmd cmd = {
363 .id = CALIBRATION_CFG_CMD,
364 .len = sizeof(struct iwl_calib_cfg_cmd),
365 .data = &calib_cfg_cmd,
366 };
367
368 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
369 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
370 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
371 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
372 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
373
374 return iwl_send_cmd(priv, &cmd);
375}
376
377static void iwl5000_rx_calib_result(struct iwl_priv *priv,
378 struct iwl_rx_mem_buffer *rxb)
379{
380 struct iwl_rx_packet *pkt = rxb_addr(rxb);
381 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
382 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
383 int index;
384
385 /* reduce the size of the length field itself */
386 len -= 4;
387
388 /* Define the order in which the results will be sent to the runtime
389 * uCode. iwl_send_calib_results sends them in a row according to their
390 * index. We sort them here */
391 switch (hdr->op_code) {
392 case IWL_PHY_CALIBRATE_DC_CMD:
393 index = IWL_CALIB_DC;
394 break;
395 case IWL_PHY_CALIBRATE_LO_CMD:
396 index = IWL_CALIB_LO;
397 break;
398 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
399 index = IWL_CALIB_TX_IQ;
400 break;
401 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
402 index = IWL_CALIB_TX_IQ_PERD;
403 break;
404 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
405 index = IWL_CALIB_BASE_BAND;
406 break;
407 default:
408 IWL_ERR(priv, "Unknown calibration notification %d\n",
409 hdr->op_code);
410 return;
411 }
412 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
413}
414
415static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
416 struct iwl_rx_mem_buffer *rxb)
417{
418 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
419 queue_work(priv->workqueue, &priv->restart);
420}
421
422/*
423 * ucode
424 */
425static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
426 struct fw_desc *image, u32 dst_addr)
427{
428 dma_addr_t phy_addr = image->p_addr;
429 u32 byte_cnt = image->len;
430 int ret;
431
432 priv->ucode_write_complete = 0;
433
434 iwl_write_direct32(priv,
435 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
436 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
437
438 iwl_write_direct32(priv,
439 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
440
441 iwl_write_direct32(priv,
442 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
443 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
444
445 iwl_write_direct32(priv,
446 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
447 (iwl_get_dma_hi_addr(phy_addr)
448 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
449
450 iwl_write_direct32(priv,
451 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
452 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
453 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
454 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
455
456 iwl_write_direct32(priv,
457 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
458 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
459 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
460 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
461
462 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
463 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
464 priv->ucode_write_complete, 5 * HZ);
465 if (ret == -ERESTARTSYS) {
466 IWL_ERR(priv, "Could not load the %s uCode section due "
467 "to interrupt\n", name);
468 return ret;
469 }
470 if (!ret) {
471 IWL_ERR(priv, "Could not load the %s uCode section\n",
472 name);
473 return -ETIMEDOUT;
474 }
475
476 return 0;
477}
478
479static int iwl5000_load_given_ucode(struct iwl_priv *priv,
480 struct fw_desc *inst_image,
481 struct fw_desc *data_image)
482{
483 int ret = 0;
484
485 ret = iwl5000_load_section(priv, "INST", inst_image,
486 IWL50_RTC_INST_LOWER_BOUND);
487 if (ret)
488 return ret;
489
490 return iwl5000_load_section(priv, "DATA", data_image,
491 IWL50_RTC_DATA_LOWER_BOUND);
492}
493
494int iwl5000_load_ucode(struct iwl_priv *priv)
495{
496 int ret = 0;
497
498 /* check whether init ucode should be loaded, or rather runtime ucode */
499 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
500 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
501 ret = iwl5000_load_given_ucode(priv,
502 &priv->ucode_init, &priv->ucode_init_data);
503 if (!ret) {
504 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
505 priv->ucode_type = UCODE_INIT;
506 }
507 } else {
508 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
509 "Loading runtime ucode...\n");
510 ret = iwl5000_load_given_ucode(priv,
511 &priv->ucode_code, &priv->ucode_data);
512 if (!ret) {
513 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
514 priv->ucode_type = UCODE_RT;
515 }
516 }
517
518 return ret;
519}
520
521void iwl5000_init_alive_start(struct iwl_priv *priv)
522{
523 int ret = 0;
524
525 /* Check alive response for "valid" sign from uCode */
526 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
527 /* We had an error bringing up the hardware, so take it
528 * all the way back down so we can try again */
529 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
530 goto restart;
531 }
532
533 /* initialize uCode was loaded... verify inst image.
534 * This is a paranoid check, because we would not have gotten the
535 * "initialize" alive if code weren't properly loaded. */
536 if (iwl_verify_ucode(priv)) {
537 /* Runtime instruction load was bad;
538 * take it all the way back down so we can try again */
539 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
540 goto restart;
541 }
542
543 iwl_clear_stations_table(priv);
544 ret = priv->cfg->ops->lib->alive_notify(priv);
545 if (ret) {
546 IWL_WARN(priv,
547 "Could not complete ALIVE transition: %d\n", ret);
548 goto restart;
549 }
550
551 iwl5000_send_calib_cfg(priv);
552 return;
553
554restart:
555 /* real restart (first load init_ucode) */
556 queue_work(priv->workqueue, &priv->restart);
557}
558
559static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
560 int txq_id, u32 index)
561{
562 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
563 (index & 0xff) | (txq_id << 8));
564 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
565}
566
567static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
568 struct iwl_tx_queue *txq,
569 int tx_fifo_id, int scd_retry)
570{
571 int txq_id = txq->q.id;
572 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
573
574 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
575 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
576 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
577 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
578 IWL50_SCD_QUEUE_STTS_REG_MSK);
579
580 txq->sched_retry = scd_retry;
581
582 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
583 active ? "Activate" : "Deactivate",
584 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
585}
586
587int iwl5000_alive_notify(struct iwl_priv *priv)
588{
589 u32 a;
590 unsigned long flags;
591 int i, chan;
592 u32 reg_val;
593
594 spin_lock_irqsave(&priv->lock, flags);
595
596 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
597 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
598 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
599 a += 4)
600 iwl_write_targ_mem(priv, a, 0);
601 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
602 a += 4)
603 iwl_write_targ_mem(priv, a, 0);
604 for (; a < priv->scd_base_addr +
605 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
606 iwl_write_targ_mem(priv, a, 0);
607
608 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
609 priv->scd_bc_tbls.dma >> 10);
610
611 /* Enable DMA channel */
612 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
613 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
614 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
616
617 /* Update FH chicken bits */
618 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
619 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
620 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
621
622 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
623 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
624 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
625
626 /* initiate the queues */
627 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
628 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
629 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
630 iwl_write_targ_mem(priv, priv->scd_base_addr +
631 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
632 iwl_write_targ_mem(priv, priv->scd_base_addr +
633 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
634 sizeof(u32),
635 ((SCD_WIN_SIZE <<
636 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
637 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
638 ((SCD_FRAME_LIMIT <<
639 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
640 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
641 }
642
643 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
644 IWL_MASK(0, priv->hw_params.max_txq_num));
645
646 /* Activate all Tx DMA/FIFO channels */
647 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
648
649 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
650
651 /* make sure all queue are not stopped */
652 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
653 for (i = 0; i < 4; i++)
654 atomic_set(&priv->queue_stop_count[i], 0);
655
656 /* reset to 0 to enable all the queue first */
657 priv->txq_ctx_active_msk = 0;
658 /* map qos queues to fifos one-to-one */
659 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
660 int ac = iwl5000_default_queue_to_tx_fifo[i];
661 iwl_txq_ctx_activate(priv, i);
662 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
663 }
664
665 /*
666 * TODO - need to initialize these queues and map them to FIFOs
667 * in the loop above, not only mark them as active. We do this
668 * because we want the first aggregation queue to be queue #10,
669 * but do not use 8 or 9 otherwise yet.
670 */
671 iwl_txq_ctx_activate(priv, 7);
672 iwl_txq_ctx_activate(priv, 8);
673 iwl_txq_ctx_activate(priv, 9);
674
675 spin_unlock_irqrestore(&priv->lock, flags);
676
677
678 iwl_send_wimax_coex(priv);
679
680 iwl5000_set_Xtal_calib(priv);
681 iwl_send_calib_results(priv);
682
683 return 0;
684}
685
686int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
687{ 170{
688 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 171 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
689 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 172 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
690 priv->cfg->num_of_queues = 173 priv->cfg->num_of_queues =
691 priv->cfg->mod_params->num_of_queues; 174 priv->cfg->mod_params->num_of_queues;
692 175
@@ -694,13 +177,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
694 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 177 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
695 priv->hw_params.scd_bc_tbls_size = 178 priv->hw_params.scd_bc_tbls_size =
696 priv->cfg->num_of_queues * 179 priv->cfg->num_of_queues *
697 sizeof(struct iwl5000_scd_bc_tbl); 180 sizeof(struct iwlagn_scd_bc_tbl);
698 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 181 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
699 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 182 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
700 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 183 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
701 184
702 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 185 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
703 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 186 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
704 187
705 priv->hw_params.max_bsm_size = 0; 188 priv->hw_params.max_bsm_size = 0;
706 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 189 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -717,571 +200,61 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
717 200
718 /* Set initial sensitivity parameters */ 201 /* Set initial sensitivity parameters */
719 /* Set initial calibration set */ 202 /* Set initial calibration set */
720 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 203 priv->hw_params.sens = &iwl5000_sensitivity;
721 case CSR_HW_REV_TYPE_5150: 204 priv->hw_params.calib_init_cfg =
722 priv->hw_params.sens = &iwl5150_sensitivity; 205 BIT(IWL_CALIB_XTAL) |
723 priv->hw_params.calib_init_cfg = 206 BIT(IWL_CALIB_LO) |
724 BIT(IWL_CALIB_DC) | 207 BIT(IWL_CALIB_TX_IQ) |
725 BIT(IWL_CALIB_LO) | 208 BIT(IWL_CALIB_TX_IQ_PERD) |
726 BIT(IWL_CALIB_TX_IQ) | 209 BIT(IWL_CALIB_BASE_BAND);
727 BIT(IWL_CALIB_BASE_BAND);
728
729 break;
730 default:
731 priv->hw_params.sens = &iwl5000_sensitivity;
732 priv->hw_params.calib_init_cfg =
733 BIT(IWL_CALIB_XTAL) |
734 BIT(IWL_CALIB_LO) |
735 BIT(IWL_CALIB_TX_IQ) |
736 BIT(IWL_CALIB_TX_IQ_PERD) |
737 BIT(IWL_CALIB_BASE_BAND);
738 break;
739 }
740
741 return 0;
742}
743
744/**
745 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
746 */
747void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
748 struct iwl_tx_queue *txq,
749 u16 byte_cnt)
750{
751 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
752 int write_ptr = txq->q.write_ptr;
753 int txq_id = txq->q.id;
754 u8 sec_ctl = 0;
755 u8 sta_id = 0;
756 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
757 __le16 bc_ent;
758
759 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
760
761 if (txq_id != IWL_CMD_QUEUE_NUM) {
762 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
763 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
764
765 switch (sec_ctl & TX_CMD_SEC_MSK) {
766 case TX_CMD_SEC_CCM:
767 len += CCMP_MIC_LEN;
768 break;
769 case TX_CMD_SEC_TKIP:
770 len += TKIP_ICV_LEN;
771 break;
772 case TX_CMD_SEC_WEP:
773 len += WEP_IV_LEN + WEP_ICV_LEN;
774 break;
775 }
776 }
777
778 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
779
780 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
781
782 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
783 scd_bc_tbl[txq_id].
784 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
785}
786
787void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
788 struct iwl_tx_queue *txq)
789{
790 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
791 int txq_id = txq->q.id;
792 int read_ptr = txq->q.read_ptr;
793 u8 sta_id = 0;
794 __le16 bc_ent;
795
796 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
797
798 if (txq_id != IWL_CMD_QUEUE_NUM)
799 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
800
801 bc_ent = cpu_to_le16(1 | (sta_id << 12));
802 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
803
804 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
805 scd_bc_tbl[txq_id].
806 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
807}
808
809static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
810 u16 txq_id)
811{
812 u32 tbl_dw_addr;
813 u32 tbl_dw;
814 u16 scd_q2ratid;
815
816 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
817
818 tbl_dw_addr = priv->scd_base_addr +
819 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
820
821 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
822
823 if (txq_id & 0x1)
824 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
825 else
826 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
827
828 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
829
830 return 0;
831}
832static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
833{
834 /* Simply stop the queue, but don't change any configuration;
835 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
836 iwl_write_prph(priv,
837 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
838 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
839 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
840}
841
842int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
843 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
844{
845 unsigned long flags;
846 u16 ra_tid;
847
848 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
849 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
850 <= txq_id)) {
851 IWL_WARN(priv,
852 "queue number out of range: %d, must be %d to %d\n",
853 txq_id, IWL50_FIRST_AMPDU_QUEUE,
854 IWL50_FIRST_AMPDU_QUEUE +
855 priv->cfg->num_of_ampdu_queues - 1);
856 return -EINVAL;
857 }
858
859 ra_tid = BUILD_RAxTID(sta_id, tid);
860
861 /* Modify device's station table to Tx this TID */
862 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
863
864 spin_lock_irqsave(&priv->lock, flags);
865
866 /* Stop this Tx queue before configuring it */
867 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
868
869 /* Map receiver-address / traffic-ID to this queue */
870 iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
871
872 /* Set this queue as a chain-building queue */
873 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
874
875 /* enable aggregations for the queue */
876 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
877
878 /* Place first TFD at index corresponding to start sequence number.
879 * Assumes that ssn_idx is valid (!= 0xFFF) */
880 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
881 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
882 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
883
884 /* Set up Tx window size and frame limit for this queue */
885 iwl_write_targ_mem(priv, priv->scd_base_addr +
886 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
887 sizeof(u32),
888 ((SCD_WIN_SIZE <<
889 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
890 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
891 ((SCD_FRAME_LIMIT <<
892 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
893 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
894
895 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
896
897 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
898 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
899
900 spin_unlock_irqrestore(&priv->lock, flags);
901 210
902 return 0; 211 return 0;
903} 212}
904 213
905int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, 214static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
906 u16 ssn_idx, u8 tx_fifo)
907{ 215{
908 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 216 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
909 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues 217 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
910 <= txq_id)) { 218 priv->cfg->num_of_queues =
911 IWL_ERR(priv, 219 priv->cfg->mod_params->num_of_queues;
912 "queue number out of range: %d, must be %d to %d\n",
913 txq_id, IWL50_FIRST_AMPDU_QUEUE,
914 IWL50_FIRST_AMPDU_QUEUE +
915 priv->cfg->num_of_ampdu_queues - 1);
916 return -EINVAL;
917 }
918
919 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
920
921 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
922
923 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
924 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
925 /* supposes that ssn_idx is valid (!= 0xFFF) */
926 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
927
928 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
929 iwl_txq_ctx_deactivate(priv, txq_id);
930 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
931
932 return 0;
933}
934
935u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
936{
937 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
938 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
939 memcpy(addsta, cmd, size);
940 /* resrved in 5000 */
941 addsta->rate_n_flags = cpu_to_le16(0);
942 return size;
943}
944
945
946/*
947 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
948 * must be called under priv->lock and mac access
949 */
950void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
951{
952 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
953}
954
955
956static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
957{
958 return le32_to_cpup((__le32 *)&tx_resp->status +
959 tx_resp->frame_count) & MAX_SN;
960}
961
962static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
963 struct iwl_ht_agg *agg,
964 struct iwl5000_tx_resp *tx_resp,
965 int txq_id, u16 start_idx)
966{
967 u16 status;
968 struct agg_tx_status *frame_status = &tx_resp->status;
969 struct ieee80211_tx_info *info = NULL;
970 struct ieee80211_hdr *hdr = NULL;
971 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
972 int i, sh, idx;
973 u16 seq;
974
975 if (agg->wait_for_ba)
976 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
977
978 agg->frame_count = tx_resp->frame_count;
979 agg->start_idx = start_idx;
980 agg->rate_n_flags = rate_n_flags;
981 agg->bitmap = 0;
982
983 /* # frames attempted by Tx command */
984 if (agg->frame_count == 1) {
985 /* Only one frame was attempted; no block-ack will arrive */
986 status = le16_to_cpu(frame_status[0].status);
987 idx = start_idx;
988
989 /* FIXME: code repetition */
990 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
991 agg->frame_count, agg->start_idx, idx);
992
993 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
994 info->status.rates[0].count = tx_resp->failure_frame + 1;
995 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
996 info->flags |= iwl_tx_status_to_mac80211(status);
997 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
998
999 /* FIXME: code repetition end */
1000
1001 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1002 status & 0xff, tx_resp->failure_frame);
1003 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1004
1005 agg->wait_for_ba = 0;
1006 } else {
1007 /* Two or more frames were attempted; expect block-ack */
1008 u64 bitmap = 0;
1009 int start = agg->start_idx;
1010
1011 /* Construct bit-map of pending frames within Tx window */
1012 for (i = 0; i < agg->frame_count; i++) {
1013 u16 sc;
1014 status = le16_to_cpu(frame_status[i].status);
1015 seq = le16_to_cpu(frame_status[i].sequence);
1016 idx = SEQ_TO_INDEX(seq);
1017 txq_id = SEQ_TO_QUEUE(seq);
1018
1019 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1020 AGG_TX_STATE_ABORT_MSK))
1021 continue;
1022 220
1023 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n", 221 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
1024 agg->frame_count, txq_id, idx); 222 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
223 priv->hw_params.scd_bc_tbls_size =
224 priv->cfg->num_of_queues *
225 sizeof(struct iwlagn_scd_bc_tbl);
226 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
227 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
228 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
1025 229
1026 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx); 230 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
1027 if (!hdr) { 231 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
1028 IWL_ERR(priv,
1029 "BUG_ON idx doesn't point to valid skb"
1030 " idx=%d, txq_id=%d\n", idx, txq_id);
1031 return -1;
1032 }
1033 232
1034 sc = le16_to_cpu(hdr->seq_ctrl); 233 priv->hw_params.max_bsm_size = 0;
1035 if (idx != (SEQ_TO_SN(sc) & 0xff)) { 234 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
1036 IWL_ERR(priv, 235 BIT(IEEE80211_BAND_5GHZ);
1037 "BUG_ON idx doesn't match seq control" 236 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
1038 " idx=%d, seq_idx=%d, seq=%d\n",
1039 idx, SEQ_TO_SN(sc),
1040 hdr->seq_ctrl);
1041 return -1;
1042 }
1043 237
1044 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n", 238 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
1045 i, idx, SEQ_TO_SN(sc)); 239 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
240 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
241 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
1046 242
1047 sh = idx - start; 243 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
1048 if (sh > 64) { 244 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
1049 sh = (start - idx) + 0xff;
1050 bitmap = bitmap << sh;
1051 sh = 0;
1052 start = idx;
1053 } else if (sh < -64)
1054 sh = 0xff - (start - idx);
1055 else if (sh < 0) {
1056 sh = start - idx;
1057 start = idx;
1058 bitmap = bitmap << sh;
1059 sh = 0;
1060 }
1061 bitmap |= 1ULL << sh;
1062 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1063 start, (unsigned long long)bitmap);
1064 }
1065 245
1066 agg->bitmap = bitmap; 246 /* Set initial sensitivity parameters */
1067 agg->start_idx = start; 247 /* Set initial calibration set */
1068 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n", 248 priv->hw_params.sens = &iwl5150_sensitivity;
1069 agg->frame_count, agg->start_idx, 249 priv->hw_params.calib_init_cfg =
1070 (unsigned long long)agg->bitmap); 250 BIT(IWL_CALIB_DC) |
251 BIT(IWL_CALIB_LO) |
252 BIT(IWL_CALIB_TX_IQ) |
253 BIT(IWL_CALIB_BASE_BAND);
1071 254
1072 if (bitmap)
1073 agg->wait_for_ba = 1;
1074 }
1075 return 0; 255 return 0;
1076} 256}
1077 257
1078static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1079 struct iwl_rx_mem_buffer *rxb)
1080{
1081 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1082 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1083 int txq_id = SEQ_TO_QUEUE(sequence);
1084 int index = SEQ_TO_INDEX(sequence);
1085 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1086 struct ieee80211_tx_info *info;
1087 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1088 u32 status = le16_to_cpu(tx_resp->status.status);
1089 int tid;
1090 int sta_id;
1091 int freed;
1092
1093 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1094 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1095 "is out of range [0-%d] %d %d\n", txq_id,
1096 index, txq->q.n_bd, txq->q.write_ptr,
1097 txq->q.read_ptr);
1098 return;
1099 }
1100
1101 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1102 memset(&info->status, 0, sizeof(info->status));
1103
1104 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
1105 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
1106
1107 if (txq->sched_retry) {
1108 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1109 struct iwl_ht_agg *agg = NULL;
1110
1111 agg = &priv->stations[sta_id].tid[tid].agg;
1112
1113 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1114
1115 /* check if BAR is needed */
1116 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
1117 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1118
1119 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1120 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1121 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
1122 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1123 scd_ssn , index, txq_id, txq->swq_id);
1124
1125 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1126 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1127
1128 if (priv->mac80211_registered &&
1129 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1130 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
1131 if (agg->state == IWL_AGG_OFF)
1132 iwl_wake_queue(priv, txq_id);
1133 else
1134 iwl_wake_queue(priv, txq->swq_id);
1135 }
1136 }
1137 } else {
1138 BUG_ON(txq_id != txq->swq_id);
1139
1140 info->status.rates[0].count = tx_resp->failure_frame + 1;
1141 info->flags |= iwl_tx_status_to_mac80211(status);
1142 iwl_hwrate_to_tx_control(priv,
1143 le32_to_cpu(tx_resp->rate_n_flags),
1144 info);
1145
1146 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
1147 "0x%x retries %d\n",
1148 txq_id,
1149 iwl_get_tx_fail_reason(status), status,
1150 le32_to_cpu(tx_resp->rate_n_flags),
1151 tx_resp->failure_frame);
1152
1153 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1154 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1155
1156 if (priv->mac80211_registered &&
1157 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1158 iwl_wake_queue(priv, txq_id);
1159 }
1160
1161 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1162
1163 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1164 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
1165}
1166
1167/* Currently 5000 is the superset of everything */
1168u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1169{
1170 return len;
1171}
1172
1173void iwl5000_setup_deferred_work(struct iwl_priv *priv)
1174{
1175 /* in 5000 the tx power calibration is done in uCode */
1176 priv->disable_tx_power_cal = 1;
1177}
1178
1179void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1180{
1181 /* init calibration handlers */
1182 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1183 iwl5000_rx_calib_result;
1184 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
1185 iwl5000_rx_calib_complete;
1186 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
1187}
1188
1189
1190int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1191{
1192 return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
1193 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1194}
1195
1196static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1197{
1198 int ret = 0;
1199 struct iwl5000_rxon_assoc_cmd rxon_assoc;
1200 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1201 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1202
1203 if ((rxon1->flags == rxon2->flags) &&
1204 (rxon1->filter_flags == rxon2->filter_flags) &&
1205 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1206 (rxon1->ofdm_ht_single_stream_basic_rates ==
1207 rxon2->ofdm_ht_single_stream_basic_rates) &&
1208 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1209 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1210 (rxon1->ofdm_ht_triple_stream_basic_rates ==
1211 rxon2->ofdm_ht_triple_stream_basic_rates) &&
1212 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1213 (rxon1->rx_chain == rxon2->rx_chain) &&
1214 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1215 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1216 return 0;
1217 }
1218
1219 rxon_assoc.flags = priv->staging_rxon.flags;
1220 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1221 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1222 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1223 rxon_assoc.reserved1 = 0;
1224 rxon_assoc.reserved2 = 0;
1225 rxon_assoc.reserved3 = 0;
1226 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1227 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1228 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1229 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1230 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1231 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
1232 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
1233 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
1234
1235 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1236 sizeof(rxon_assoc), &rxon_assoc, NULL);
1237 if (ret)
1238 return ret;
1239
1240 return ret;
1241}
1242int iwl5000_send_tx_power(struct iwl_priv *priv)
1243{
1244 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
1245 u8 tx_ant_cfg_cmd;
1246
1247 /* half dBm need to multiply */
1248 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
1249
1250 if (priv->tx_power_lmt_in_half_dbm &&
1251 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
1252 /*
1253 * For the newer devices which using enhanced/extend tx power
1254 * table in EEPROM, the format is in half dBm. driver need to
1255 * convert to dBm format before report to mac80211.
1256 * By doing so, there is a possibility of 1/2 dBm resolution
1257 * lost. driver will perform "round-up" operation before
1258 * reporting, but it will cause 1/2 dBm tx power over the
1259 * regulatory limit. Perform the checking here, if the
1260 * "tx_power_user_lmt" is higher than EEPROM value (in
1261 * half-dBm format), lower the tx power based on EEPROM
1262 */
1263 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
1264 }
1265 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
1266 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
1267
1268 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1269 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
1270 else
1271 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
1272
1273 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
1274 sizeof(tx_power_cmd), &tx_power_cmd,
1275 NULL);
1276}
1277
1278void iwl5000_temperature(struct iwl_priv *priv)
1279{
1280 /* store temperature from statistics (in Celsius) */
1281 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
1282 iwl_tt_handler(priv);
1283}
1284
1285static void iwl5150_temperature(struct iwl_priv *priv) 258static void iwl5150_temperature(struct iwl_priv *priv)
1286{ 259{
1287 u32 vt = 0; 260 u32 vt = 0;
@@ -1294,100 +267,6 @@ static void iwl5150_temperature(struct iwl_priv *priv)
1294 iwl_tt_handler(priv); 267 iwl_tt_handler(priv);
1295} 268}
1296 269
1297/* Calc max signal level (dBm) among 3 possible receivers */
1298int iwl5000_calc_rssi(struct iwl_priv *priv,
1299 struct iwl_rx_phy_res *rx_resp)
1300{
1301 /* data from PHY/DSP regarding signal strength, etc.,
1302 * contents are always there, not configurable by host
1303 */
1304 struct iwl5000_non_cfg_phy *ncphy =
1305 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
1306 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
1307 u8 agc;
1308
1309 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
1310 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
1311
1312 /* Find max rssi among 3 possible receivers.
1313 * These values are measured by the digital signal processor (DSP).
1314 * They should stay fairly constant even as the signal strength varies,
1315 * if the radio's automatic gain control (AGC) is working right.
1316 * AGC value (see below) will provide the "interesting" info.
1317 */
1318 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
1319 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
1320 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
1321 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
1322 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
1323
1324 max_rssi = max_t(u32, rssi_a, rssi_b);
1325 max_rssi = max_t(u32, max_rssi, rssi_c);
1326
1327 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1328 rssi_a, rssi_b, rssi_c, max_rssi, agc);
1329
1330 /* dBm = max_rssi dB - agc dB - constant.
1331 * Higher AGC (higher radio gain) means lower signal. */
1332 return max_rssi - agc - IWL49_RSSI_OFFSET;
1333}
1334
1335static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1336{
1337 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1338 .valid = cpu_to_le32(valid_tx_ant),
1339 };
1340
1341 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1342 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1343 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
1344 sizeof(struct iwl_tx_ant_config_cmd),
1345 &tx_ant_cmd);
1346 } else {
1347 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1348 return -EOPNOTSUPP;
1349 }
1350}
1351
1352
1353#define IWL5000_UCODE_GET(item) \
1354static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
1355 u32 api_ver) \
1356{ \
1357 if (api_ver <= 2) \
1358 return le32_to_cpu(ucode->u.v1.item); \
1359 return le32_to_cpu(ucode->u.v2.item); \
1360}
1361
1362static u32 iwl5000_ucode_get_header_size(u32 api_ver)
1363{
1364 if (api_ver <= 2)
1365 return UCODE_HEADER_SIZE(1);
1366 return UCODE_HEADER_SIZE(2);
1367}
1368
1369static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode,
1370 u32 api_ver)
1371{
1372 if (api_ver <= 2)
1373 return 0;
1374 return le32_to_cpu(ucode->u.v2.build);
1375}
1376
1377static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode,
1378 u32 api_ver)
1379{
1380 if (api_ver <= 2)
1381 return (u8 *) ucode->u.v1.data;
1382 return (u8 *) ucode->u.v2.data;
1383}
1384
1385IWL5000_UCODE_GET(inst_size);
1386IWL5000_UCODE_GET(data_size);
1387IWL5000_UCODE_GET(init_size);
1388IWL5000_UCODE_GET(init_data_size);
1389IWL5000_UCODE_GET(boot_size);
1390
1391static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) 270static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1392{ 271{
1393 struct iwl5000_channel_switch_cmd cmd; 272 struct iwl5000_channel_switch_cmd cmd;
@@ -1420,54 +299,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1420 return iwl_send_cmd_sync(priv, &hcmd); 299 return iwl_send_cmd_sync(priv, &hcmd);
1421} 300}
1422 301
1423struct iwl_hcmd_ops iwl5000_hcmd = { 302static struct iwl_lib_ops iwl5000_lib = {
1424 .rxon_assoc = iwl5000_send_rxon_assoc,
1425 .commit_rxon = iwl_commit_rxon,
1426 .set_rxon_chain = iwl_set_rxon_chain,
1427 .set_tx_ant = iwl5000_send_tx_ant_config,
1428};
1429
1430struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1431 .get_hcmd_size = iwl5000_get_hcmd_size,
1432 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1433 .gain_computation = iwl5000_gain_computation,
1434 .chain_noise_reset = iwl5000_chain_noise_reset,
1435 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
1436 .calc_rssi = iwl5000_calc_rssi,
1437};
1438
1439struct iwl_ucode_ops iwl5000_ucode = {
1440 .get_header_size = iwl5000_ucode_get_header_size,
1441 .get_build = iwl5000_ucode_get_build,
1442 .get_inst_size = iwl5000_ucode_get_inst_size,
1443 .get_data_size = iwl5000_ucode_get_data_size,
1444 .get_init_size = iwl5000_ucode_get_init_size,
1445 .get_init_data_size = iwl5000_ucode_get_init_data_size,
1446 .get_boot_size = iwl5000_ucode_get_boot_size,
1447 .get_data = iwl5000_ucode_get_data,
1448};
1449
1450struct iwl_lib_ops iwl5000_lib = {
1451 .set_hw_params = iwl5000_hw_set_hw_params, 303 .set_hw_params = iwl5000_hw_set_hw_params,
1452 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 304 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1453 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 305 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1454 .txq_set_sched = iwl5000_txq_set_sched, 306 .txq_set_sched = iwlagn_txq_set_sched,
1455 .txq_agg_enable = iwl5000_txq_agg_enable, 307 .txq_agg_enable = iwlagn_txq_agg_enable,
1456 .txq_agg_disable = iwl5000_txq_agg_disable, 308 .txq_agg_disable = iwlagn_txq_agg_disable,
1457 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 309 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1458 .txq_free_tfd = iwl_hw_txq_free_tfd, 310 .txq_free_tfd = iwl_hw_txq_free_tfd,
1459 .txq_init = iwl_hw_tx_queue_init, 311 .txq_init = iwl_hw_tx_queue_init,
1460 .rx_handler_setup = iwl5000_rx_handler_setup, 312 .rx_handler_setup = iwlagn_rx_handler_setup,
1461 .setup_deferred_work = iwl5000_setup_deferred_work, 313 .setup_deferred_work = iwlagn_setup_deferred_work,
1462 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 314 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
1463 .dump_nic_event_log = iwl_dump_nic_event_log, 315 .dump_nic_event_log = iwl_dump_nic_event_log,
1464 .dump_nic_error_log = iwl_dump_nic_error_log, 316 .dump_nic_error_log = iwl_dump_nic_error_log,
1465 .dump_csr = iwl_dump_csr, 317 .dump_csr = iwl_dump_csr,
1466 .dump_fh = iwl_dump_fh, 318 .dump_fh = iwl_dump_fh,
1467 .load_ucode = iwl5000_load_ucode, 319 .load_ucode = iwlagn_load_ucode,
1468 .init_alive_start = iwl5000_init_alive_start, 320 .init_alive_start = iwlagn_init_alive_start,
1469 .alive_notify = iwl5000_alive_notify, 321 .alive_notify = iwlagn_alive_notify,
1470 .send_tx_power = iwl5000_send_tx_power, 322 .send_tx_power = iwlagn_send_tx_power,
1471 .update_chain_flags = iwl_update_chain_flags, 323 .update_chain_flags = iwl_update_chain_flags,
1472 .set_channel_switch = iwl5000_hw_channel_switch, 324 .set_channel_switch = iwl5000_hw_channel_switch,
1473 .apm_ops = { 325 .apm_ops = {
@@ -1478,50 +330,58 @@ struct iwl_lib_ops iwl5000_lib = {
1478 }, 330 },
1479 .eeprom_ops = { 331 .eeprom_ops = {
1480 .regulatory_bands = { 332 .regulatory_bands = {
1481 EEPROM_5000_REG_BAND_1_CHANNELS, 333 EEPROM_REG_BAND_1_CHANNELS,
1482 EEPROM_5000_REG_BAND_2_CHANNELS, 334 EEPROM_REG_BAND_2_CHANNELS,
1483 EEPROM_5000_REG_BAND_3_CHANNELS, 335 EEPROM_REG_BAND_3_CHANNELS,
1484 EEPROM_5000_REG_BAND_4_CHANNELS, 336 EEPROM_REG_BAND_4_CHANNELS,
1485 EEPROM_5000_REG_BAND_5_CHANNELS, 337 EEPROM_REG_BAND_5_CHANNELS,
1486 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 338 EEPROM_REG_BAND_24_HT40_CHANNELS,
1487 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 339 EEPROM_REG_BAND_52_HT40_CHANNELS
1488 }, 340 },
1489 .verify_signature = iwlcore_eeprom_verify_signature, 341 .verify_signature = iwlcore_eeprom_verify_signature,
1490 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 342 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1491 .release_semaphore = iwlcore_eeprom_release_semaphore, 343 .release_semaphore = iwlcore_eeprom_release_semaphore,
1492 .calib_version = iwl5000_eeprom_calib_version, 344 .calib_version = iwlagn_eeprom_calib_version,
1493 .query_addr = iwl5000_eeprom_query_addr, 345 .query_addr = iwlagn_eeprom_query_addr,
1494 }, 346 },
1495 .post_associate = iwl_post_associate, 347 .post_associate = iwl_post_associate,
1496 .isr = iwl_isr_ict, 348 .isr = iwl_isr_ict,
1497 .config_ap = iwl_config_ap, 349 .config_ap = iwl_config_ap,
1498 .temp_ops = { 350 .temp_ops = {
1499 .temperature = iwl5000_temperature, 351 .temperature = iwlagn_temperature,
1500 .set_ct_kill = iwl5000_set_ct_threshold, 352 .set_ct_kill = iwl5000_set_ct_threshold,
1501 }, 353 },
1502 .add_bcast_station = iwl_add_bcast_station, 354 .manage_ibss_station = iwlagn_manage_ibss_station,
355 .debugfs_ops = {
356 .rx_stats_read = iwl_ucode_rx_stats_read,
357 .tx_stats_read = iwl_ucode_tx_stats_read,
358 .general_stats_read = iwl_ucode_general_stats_read,
359 },
360 .recover_from_tx_stall = iwl_bg_monitor_recover,
361 .check_plcp_health = iwl_good_plcp_health,
362 .check_ack_health = iwl_good_ack_health,
1503}; 363};
1504 364
1505static struct iwl_lib_ops iwl5150_lib = { 365static struct iwl_lib_ops iwl5150_lib = {
1506 .set_hw_params = iwl5000_hw_set_hw_params, 366 .set_hw_params = iwl5150_hw_set_hw_params,
1507 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 367 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1508 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 368 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1509 .txq_set_sched = iwl5000_txq_set_sched, 369 .txq_set_sched = iwlagn_txq_set_sched,
1510 .txq_agg_enable = iwl5000_txq_agg_enable, 370 .txq_agg_enable = iwlagn_txq_agg_enable,
1511 .txq_agg_disable = iwl5000_txq_agg_disable, 371 .txq_agg_disable = iwlagn_txq_agg_disable,
1512 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 372 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1513 .txq_free_tfd = iwl_hw_txq_free_tfd, 373 .txq_free_tfd = iwl_hw_txq_free_tfd,
1514 .txq_init = iwl_hw_tx_queue_init, 374 .txq_init = iwl_hw_tx_queue_init,
1515 .rx_handler_setup = iwl5000_rx_handler_setup, 375 .rx_handler_setup = iwlagn_rx_handler_setup,
1516 .setup_deferred_work = iwl5000_setup_deferred_work, 376 .setup_deferred_work = iwlagn_setup_deferred_work,
1517 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 377 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
1518 .dump_nic_event_log = iwl_dump_nic_event_log, 378 .dump_nic_event_log = iwl_dump_nic_event_log,
1519 .dump_nic_error_log = iwl_dump_nic_error_log, 379 .dump_nic_error_log = iwl_dump_nic_error_log,
1520 .dump_csr = iwl_dump_csr, 380 .dump_csr = iwl_dump_csr,
1521 .load_ucode = iwl5000_load_ucode, 381 .load_ucode = iwlagn_load_ucode,
1522 .init_alive_start = iwl5000_init_alive_start, 382 .init_alive_start = iwlagn_init_alive_start,
1523 .alive_notify = iwl5000_alive_notify, 383 .alive_notify = iwlagn_alive_notify,
1524 .send_tx_power = iwl5000_send_tx_power, 384 .send_tx_power = iwlagn_send_tx_power,
1525 .update_chain_flags = iwl_update_chain_flags, 385 .update_chain_flags = iwl_update_chain_flags,
1526 .set_channel_switch = iwl5000_hw_channel_switch, 386 .set_channel_switch = iwl5000_hw_channel_switch,
1527 .apm_ops = { 387 .apm_ops = {
@@ -1532,19 +392,19 @@ static struct iwl_lib_ops iwl5150_lib = {
1532 }, 392 },
1533 .eeprom_ops = { 393 .eeprom_ops = {
1534 .regulatory_bands = { 394 .regulatory_bands = {
1535 EEPROM_5000_REG_BAND_1_CHANNELS, 395 EEPROM_REG_BAND_1_CHANNELS,
1536 EEPROM_5000_REG_BAND_2_CHANNELS, 396 EEPROM_REG_BAND_2_CHANNELS,
1537 EEPROM_5000_REG_BAND_3_CHANNELS, 397 EEPROM_REG_BAND_3_CHANNELS,
1538 EEPROM_5000_REG_BAND_4_CHANNELS, 398 EEPROM_REG_BAND_4_CHANNELS,
1539 EEPROM_5000_REG_BAND_5_CHANNELS, 399 EEPROM_REG_BAND_5_CHANNELS,
1540 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 400 EEPROM_REG_BAND_24_HT40_CHANNELS,
1541 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 401 EEPROM_REG_BAND_52_HT40_CHANNELS
1542 }, 402 },
1543 .verify_signature = iwlcore_eeprom_verify_signature, 403 .verify_signature = iwlcore_eeprom_verify_signature,
1544 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 404 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1545 .release_semaphore = iwlcore_eeprom_release_semaphore, 405 .release_semaphore = iwlcore_eeprom_release_semaphore,
1546 .calib_version = iwl5000_eeprom_calib_version, 406 .calib_version = iwlagn_eeprom_calib_version,
1547 .query_addr = iwl5000_eeprom_query_addr, 407 .query_addr = iwlagn_eeprom_query_addr,
1548 }, 408 },
1549 .post_associate = iwl_post_associate, 409 .post_associate = iwl_post_associate,
1550 .isr = iwl_isr_ict, 410 .isr = iwl_isr_ict,
@@ -1553,45 +413,44 @@ static struct iwl_lib_ops iwl5150_lib = {
1553 .temperature = iwl5150_temperature, 413 .temperature = iwl5150_temperature,
1554 .set_ct_kill = iwl5150_set_ct_threshold, 414 .set_ct_kill = iwl5150_set_ct_threshold,
1555 }, 415 },
1556 .add_bcast_station = iwl_add_bcast_station, 416 .manage_ibss_station = iwlagn_manage_ibss_station,
417 .debugfs_ops = {
418 .rx_stats_read = iwl_ucode_rx_stats_read,
419 .tx_stats_read = iwl_ucode_tx_stats_read,
420 .general_stats_read = iwl_ucode_general_stats_read,
421 },
422 .recover_from_tx_stall = iwl_bg_monitor_recover,
423 .check_plcp_health = iwl_good_plcp_health,
424 .check_ack_health = iwl_good_ack_health,
1557}; 425};
1558 426
1559static const struct iwl_ops iwl5000_ops = { 427static const struct iwl_ops iwl5000_ops = {
1560 .ucode = &iwl5000_ucode,
1561 .lib = &iwl5000_lib, 428 .lib = &iwl5000_lib,
1562 .hcmd = &iwl5000_hcmd, 429 .hcmd = &iwlagn_hcmd,
1563 .utils = &iwl5000_hcmd_utils, 430 .utils = &iwlagn_hcmd_utils,
1564 .led = &iwlagn_led_ops, 431 .led = &iwlagn_led_ops,
1565}; 432};
1566 433
1567static const struct iwl_ops iwl5150_ops = { 434static const struct iwl_ops iwl5150_ops = {
1568 .ucode = &iwl5000_ucode,
1569 .lib = &iwl5150_lib, 435 .lib = &iwl5150_lib,
1570 .hcmd = &iwl5000_hcmd, 436 .hcmd = &iwlagn_hcmd,
1571 .utils = &iwl5000_hcmd_utils, 437 .utils = &iwlagn_hcmd_utils,
1572 .led = &iwlagn_led_ops, 438 .led = &iwlagn_led_ops,
1573}; 439};
1574 440
1575struct iwl_mod_params iwl50_mod_params = {
1576 .amsdu_size_8K = 1,
1577 .restart_fw = 1,
1578 /* the rest are 0 by default */
1579};
1580
1581
1582struct iwl_cfg iwl5300_agn_cfg = { 441struct iwl_cfg iwl5300_agn_cfg = {
1583 .name = "5300AGN", 442 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
1584 .fw_name_pre = IWL5000_FW_PRE, 443 .fw_name_pre = IWL5000_FW_PRE,
1585 .ucode_api_max = IWL5000_UCODE_API_MAX, 444 .ucode_api_max = IWL5000_UCODE_API_MAX,
1586 .ucode_api_min = IWL5000_UCODE_API_MIN, 445 .ucode_api_min = IWL5000_UCODE_API_MIN,
1587 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 446 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1588 .ops = &iwl5000_ops, 447 .ops = &iwl5000_ops,
1589 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 448 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1590 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 449 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1591 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 450 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1592 .num_of_queues = IWL50_NUM_QUEUES, 451 .num_of_queues = IWLAGN_NUM_QUEUES,
1593 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 452 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1594 .mod_params = &iwl50_mod_params, 453 .mod_params = &iwlagn_mod_params,
1595 .valid_tx_ant = ANT_ABC, 454 .valid_tx_ant = ANT_ABC,
1596 .valid_rx_ant = ANT_ABC, 455 .valid_rx_ant = ANT_ABC,
1597 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 456 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1603,21 +462,26 @@ struct iwl_cfg iwl5300_agn_cfg = {
1603 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 462 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1604 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 463 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1605 .chain_noise_scale = 1000, 464 .chain_noise_scale = 1000,
465 .monitor_recover_period = IWL_MONITORING_PERIOD,
466 .max_event_log_size = 512,
467 .ucode_tracing = true,
468 .sensitivity_calib_by_driver = true,
469 .chain_noise_calib_by_driver = true,
1606}; 470};
1607 471
1608struct iwl_cfg iwl5100_bgn_cfg = { 472struct iwl_cfg iwl5100_bgn_cfg = {
1609 .name = "5100BGN", 473 .name = "Intel(R) WiFi Link 5100 BGN",
1610 .fw_name_pre = IWL5000_FW_PRE, 474 .fw_name_pre = IWL5000_FW_PRE,
1611 .ucode_api_max = IWL5000_UCODE_API_MAX, 475 .ucode_api_max = IWL5000_UCODE_API_MAX,
1612 .ucode_api_min = IWL5000_UCODE_API_MIN, 476 .ucode_api_min = IWL5000_UCODE_API_MIN,
1613 .sku = IWL_SKU_G|IWL_SKU_N, 477 .sku = IWL_SKU_G|IWL_SKU_N,
1614 .ops = &iwl5000_ops, 478 .ops = &iwl5000_ops,
1615 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 479 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1616 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 480 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1617 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 481 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1618 .num_of_queues = IWL50_NUM_QUEUES, 482 .num_of_queues = IWLAGN_NUM_QUEUES,
1619 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 483 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1620 .mod_params = &iwl50_mod_params, 484 .mod_params = &iwlagn_mod_params,
1621 .valid_tx_ant = ANT_B, 485 .valid_tx_ant = ANT_B,
1622 .valid_rx_ant = ANT_AB, 486 .valid_rx_ant = ANT_AB,
1623 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 487 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1629,21 +493,26 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1629 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 493 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1630 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 494 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1631 .chain_noise_scale = 1000, 495 .chain_noise_scale = 1000,
496 .monitor_recover_period = IWL_MONITORING_PERIOD,
497 .max_event_log_size = 512,
498 .ucode_tracing = true,
499 .sensitivity_calib_by_driver = true,
500 .chain_noise_calib_by_driver = true,
1632}; 501};
1633 502
1634struct iwl_cfg iwl5100_abg_cfg = { 503struct iwl_cfg iwl5100_abg_cfg = {
1635 .name = "5100ABG", 504 .name = "Intel(R) WiFi Link 5100 ABG",
1636 .fw_name_pre = IWL5000_FW_PRE, 505 .fw_name_pre = IWL5000_FW_PRE,
1637 .ucode_api_max = IWL5000_UCODE_API_MAX, 506 .ucode_api_max = IWL5000_UCODE_API_MAX,
1638 .ucode_api_min = IWL5000_UCODE_API_MIN, 507 .ucode_api_min = IWL5000_UCODE_API_MIN,
1639 .sku = IWL_SKU_A|IWL_SKU_G, 508 .sku = IWL_SKU_A|IWL_SKU_G,
1640 .ops = &iwl5000_ops, 509 .ops = &iwl5000_ops,
1641 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 510 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1642 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 511 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1643 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 512 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1644 .num_of_queues = IWL50_NUM_QUEUES, 513 .num_of_queues = IWLAGN_NUM_QUEUES,
1645 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 514 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1646 .mod_params = &iwl50_mod_params, 515 .mod_params = &iwlagn_mod_params,
1647 .valid_tx_ant = ANT_B, 516 .valid_tx_ant = ANT_B,
1648 .valid_rx_ant = ANT_AB, 517 .valid_rx_ant = ANT_AB,
1649 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 518 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1653,21 +522,26 @@ struct iwl_cfg iwl5100_abg_cfg = {
1653 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 522 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1654 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 523 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1655 .chain_noise_scale = 1000, 524 .chain_noise_scale = 1000,
525 .monitor_recover_period = IWL_MONITORING_PERIOD,
526 .max_event_log_size = 512,
527 .ucode_tracing = true,
528 .sensitivity_calib_by_driver = true,
529 .chain_noise_calib_by_driver = true,
1656}; 530};
1657 531
1658struct iwl_cfg iwl5100_agn_cfg = { 532struct iwl_cfg iwl5100_agn_cfg = {
1659 .name = "5100AGN", 533 .name = "Intel(R) WiFi Link 5100 AGN",
1660 .fw_name_pre = IWL5000_FW_PRE, 534 .fw_name_pre = IWL5000_FW_PRE,
1661 .ucode_api_max = IWL5000_UCODE_API_MAX, 535 .ucode_api_max = IWL5000_UCODE_API_MAX,
1662 .ucode_api_min = IWL5000_UCODE_API_MIN, 536 .ucode_api_min = IWL5000_UCODE_API_MIN,
1663 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 537 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1664 .ops = &iwl5000_ops, 538 .ops = &iwl5000_ops,
1665 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 539 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1666 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 540 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1667 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 541 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1668 .num_of_queues = IWL50_NUM_QUEUES, 542 .num_of_queues = IWLAGN_NUM_QUEUES,
1669 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 543 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1670 .mod_params = &iwl50_mod_params, 544 .mod_params = &iwlagn_mod_params,
1671 .valid_tx_ant = ANT_B, 545 .valid_tx_ant = ANT_B,
1672 .valid_rx_ant = ANT_AB, 546 .valid_rx_ant = ANT_AB,
1673 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 547 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1679,21 +553,26 @@ struct iwl_cfg iwl5100_agn_cfg = {
1679 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 553 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1680 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 554 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1681 .chain_noise_scale = 1000, 555 .chain_noise_scale = 1000,
556 .monitor_recover_period = IWL_MONITORING_PERIOD,
557 .max_event_log_size = 512,
558 .ucode_tracing = true,
559 .sensitivity_calib_by_driver = true,
560 .chain_noise_calib_by_driver = true,
1682}; 561};
1683 562
1684struct iwl_cfg iwl5350_agn_cfg = { 563struct iwl_cfg iwl5350_agn_cfg = {
1685 .name = "5350AGN", 564 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
1686 .fw_name_pre = IWL5000_FW_PRE, 565 .fw_name_pre = IWL5000_FW_PRE,
1687 .ucode_api_max = IWL5000_UCODE_API_MAX, 566 .ucode_api_max = IWL5000_UCODE_API_MAX,
1688 .ucode_api_min = IWL5000_UCODE_API_MIN, 567 .ucode_api_min = IWL5000_UCODE_API_MIN,
1689 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 568 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1690 .ops = &iwl5000_ops, 569 .ops = &iwl5000_ops,
1691 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 570 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1692 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 571 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1693 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 572 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1694 .num_of_queues = IWL50_NUM_QUEUES, 573 .num_of_queues = IWLAGN_NUM_QUEUES,
1695 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 574 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1696 .mod_params = &iwl50_mod_params, 575 .mod_params = &iwlagn_mod_params,
1697 .valid_tx_ant = ANT_ABC, 576 .valid_tx_ant = ANT_ABC,
1698 .valid_rx_ant = ANT_ABC, 577 .valid_rx_ant = ANT_ABC,
1699 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 578 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1705,21 +584,26 @@ struct iwl_cfg iwl5350_agn_cfg = {
1705 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 584 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1706 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 585 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1707 .chain_noise_scale = 1000, 586 .chain_noise_scale = 1000,
587 .monitor_recover_period = IWL_MONITORING_PERIOD,
588 .max_event_log_size = 512,
589 .ucode_tracing = true,
590 .sensitivity_calib_by_driver = true,
591 .chain_noise_calib_by_driver = true,
1708}; 592};
1709 593
1710struct iwl_cfg iwl5150_agn_cfg = { 594struct iwl_cfg iwl5150_agn_cfg = {
1711 .name = "5150AGN", 595 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
1712 .fw_name_pre = IWL5150_FW_PRE, 596 .fw_name_pre = IWL5150_FW_PRE,
1713 .ucode_api_max = IWL5150_UCODE_API_MAX, 597 .ucode_api_max = IWL5150_UCODE_API_MAX,
1714 .ucode_api_min = IWL5150_UCODE_API_MIN, 598 .ucode_api_min = IWL5150_UCODE_API_MIN,
1715 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 599 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1716 .ops = &iwl5150_ops, 600 .ops = &iwl5150_ops,
1717 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 601 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1718 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 602 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1719 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 603 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1720 .num_of_queues = IWL50_NUM_QUEUES, 604 .num_of_queues = IWLAGN_NUM_QUEUES,
1721 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 605 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1722 .mod_params = &iwl50_mod_params, 606 .mod_params = &iwlagn_mod_params,
1723 .valid_tx_ant = ANT_A, 607 .valid_tx_ant = ANT_A,
1724 .valid_rx_ant = ANT_AB, 608 .valid_rx_ant = ANT_AB,
1725 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 609 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1731,21 +615,26 @@ struct iwl_cfg iwl5150_agn_cfg = {
1731 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 615 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1732 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 616 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1733 .chain_noise_scale = 1000, 617 .chain_noise_scale = 1000,
618 .monitor_recover_period = IWL_MONITORING_PERIOD,
619 .max_event_log_size = 512,
620 .ucode_tracing = true,
621 .sensitivity_calib_by_driver = true,
622 .chain_noise_calib_by_driver = true,
1734}; 623};
1735 624
1736struct iwl_cfg iwl5150_abg_cfg = { 625struct iwl_cfg iwl5150_abg_cfg = {
1737 .name = "5150ABG", 626 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
1738 .fw_name_pre = IWL5150_FW_PRE, 627 .fw_name_pre = IWL5150_FW_PRE,
1739 .ucode_api_max = IWL5150_UCODE_API_MAX, 628 .ucode_api_max = IWL5150_UCODE_API_MAX,
1740 .ucode_api_min = IWL5150_UCODE_API_MIN, 629 .ucode_api_min = IWL5150_UCODE_API_MIN,
1741 .sku = IWL_SKU_A|IWL_SKU_G, 630 .sku = IWL_SKU_A|IWL_SKU_G,
1742 .ops = &iwl5150_ops, 631 .ops = &iwl5150_ops,
1743 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 632 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1744 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 633 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1745 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 634 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1746 .num_of_queues = IWL50_NUM_QUEUES, 635 .num_of_queues = IWLAGN_NUM_QUEUES,
1747 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 636 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1748 .mod_params = &iwl50_mod_params, 637 .mod_params = &iwlagn_mod_params,
1749 .valid_tx_ant = ANT_A, 638 .valid_tx_ant = ANT_A,
1750 .valid_rx_ant = ANT_AB, 639 .valid_rx_ant = ANT_AB,
1751 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 640 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1755,20 +644,12 @@ struct iwl_cfg iwl5150_abg_cfg = {
1755 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 644 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1756 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 645 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1757 .chain_noise_scale = 1000, 646 .chain_noise_scale = 1000,
647 .monitor_recover_period = IWL_MONITORING_PERIOD,
648 .max_event_log_size = 512,
649 .ucode_tracing = true,
650 .sensitivity_calib_by_driver = true,
651 .chain_noise_calib_by_driver = true,
1758}; 652};
1759 653
1760MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 654MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
1761MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 655MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
1762
1763module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
1764MODULE_PARM_DESC(swcrypto50,
1765 "using software crypto engine (default 0 [hardware])\n");
1766module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
1767MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1768module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
1769MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1770module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
1771 int, S_IRUGO);
1772MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1773module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
1774MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c4844adff92a..9fbf54cd3e1a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -42,18 +42,22 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-agn.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 47#include "iwl-agn-hw.h"
47#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
48#include "iwl-agn-led.h" 49#include "iwl-agn-led.h"
50#include "iwl-agn-debugfs.h"
49 51
50/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
51#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
52#define IWL6050_UCODE_API_MAX 4 54#define IWL6050_UCODE_API_MAX 4
55#define IWL6000G2_UCODE_API_MAX 4
53 56
54/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
55#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
56#define IWL6050_UCODE_API_MIN 4 59#define IWL6050_UCODE_API_MIN 4
60#define IWL6000G2_UCODE_API_MIN 4
57 61
58#define IWL6000_FW_PRE "iwlwifi-6000-" 62#define IWL6000_FW_PRE "iwlwifi-6000-"
59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -63,6 +67,11 @@
63#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode" 67#define _IWL6050_MODULE_FIRMWARE(api) IWL6050_FW_PRE #api ".ucode"
64#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api) 68#define IWL6050_MODULE_FIRMWARE(api) _IWL6050_MODULE_FIRMWARE(api)
65 69
70#define IWL6000G2A_FW_PRE "iwlwifi-6000g2a-"
71#define _IWL6000G2A_MODULE_FIRMWARE(api) IWL6000G2A_FW_PRE #api ".ucode"
72#define IWL6000G2A_MODULE_FIRMWARE(api) _IWL6000G2A_MODULE_FIRMWARE(api)
73
74
66static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 75static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
67{ 76{
68 /* want Celsius */ 77 /* want Celsius */
@@ -136,7 +145,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
136static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) 145static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
137{ 146{
138 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 147 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
139 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 148 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
140 priv->cfg->num_of_queues = 149 priv->cfg->num_of_queues =
141 priv->cfg->mod_params->num_of_queues; 150 priv->cfg->mod_params->num_of_queues;
142 151
@@ -144,7 +153,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
144 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 153 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
145 priv->hw_params.scd_bc_tbls_size = 154 priv->hw_params.scd_bc_tbls_size =
146 priv->cfg->num_of_queues * 155 priv->cfg->num_of_queues *
147 sizeof(struct iwl5000_scd_bc_tbl); 156 sizeof(struct iwlagn_scd_bc_tbl);
148 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 157 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
149 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 158 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
150 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 159 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -168,24 +177,56 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
168 /* Set initial sensitivity parameters */ 177 /* Set initial sensitivity parameters */
169 /* Set initial calibration set */ 178 /* Set initial calibration set */
170 priv->hw_params.sens = &iwl6000_sensitivity; 179 priv->hw_params.sens = &iwl6000_sensitivity;
171 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 180 priv->hw_params.calib_init_cfg =
172 case CSR_HW_REV_TYPE_6x50: 181 BIT(IWL_CALIB_XTAL) |
173 priv->hw_params.calib_init_cfg = 182 BIT(IWL_CALIB_LO) |
174 BIT(IWL_CALIB_XTAL) | 183 BIT(IWL_CALIB_TX_IQ) |
175 BIT(IWL_CALIB_DC) | 184 BIT(IWL_CALIB_BASE_BAND);
176 BIT(IWL_CALIB_LO) | 185
177 BIT(IWL_CALIB_TX_IQ) | 186 return 0;
178 BIT(IWL_CALIB_BASE_BAND); 187}
179 188
180 break; 189static int iwl6050_hw_set_hw_params(struct iwl_priv *priv)
181 default: 190{
182 priv->hw_params.calib_init_cfg = 191 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
183 BIT(IWL_CALIB_XTAL) | 192 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
184 BIT(IWL_CALIB_LO) | 193 priv->cfg->num_of_queues =
185 BIT(IWL_CALIB_TX_IQ) | 194 priv->cfg->mod_params->num_of_queues;
186 BIT(IWL_CALIB_BASE_BAND); 195
187 break; 196 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
188 } 197 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
198 priv->hw_params.scd_bc_tbls_size =
199 priv->cfg->num_of_queues *
200 sizeof(struct iwlagn_scd_bc_tbl);
201 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
202 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
203 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
204
205 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
206 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
207
208 priv->hw_params.max_bsm_size = 0;
209 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
210 BIT(IEEE80211_BAND_5GHZ);
211 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
212
213 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
214 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
215 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
216 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
217
218 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
219 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
220
221 /* Set initial sensitivity parameters */
222 /* Set initial calibration set */
223 priv->hw_params.sens = &iwl6000_sensitivity;
224 priv->hw_params.calib_init_cfg =
225 BIT(IWL_CALIB_XTAL) |
226 BIT(IWL_CALIB_DC) |
227 BIT(IWL_CALIB_LO) |
228 BIT(IWL_CALIB_TX_IQ) |
229 BIT(IWL_CALIB_BASE_BAND);
189 230
190 return 0; 231 return 0;
191} 232}
@@ -225,25 +266,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
225 266
226static struct iwl_lib_ops iwl6000_lib = { 267static struct iwl_lib_ops iwl6000_lib = {
227 .set_hw_params = iwl6000_hw_set_hw_params, 268 .set_hw_params = iwl6000_hw_set_hw_params,
228 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 269 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
229 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 270 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
230 .txq_set_sched = iwl5000_txq_set_sched, 271 .txq_set_sched = iwlagn_txq_set_sched,
231 .txq_agg_enable = iwl5000_txq_agg_enable, 272 .txq_agg_enable = iwlagn_txq_agg_enable,
232 .txq_agg_disable = iwl5000_txq_agg_disable, 273 .txq_agg_disable = iwlagn_txq_agg_disable,
233 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 274 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
234 .txq_free_tfd = iwl_hw_txq_free_tfd, 275 .txq_free_tfd = iwl_hw_txq_free_tfd,
235 .txq_init = iwl_hw_tx_queue_init, 276 .txq_init = iwl_hw_tx_queue_init,
236 .rx_handler_setup = iwl5000_rx_handler_setup, 277 .rx_handler_setup = iwlagn_rx_handler_setup,
237 .setup_deferred_work = iwl5000_setup_deferred_work, 278 .setup_deferred_work = iwlagn_setup_deferred_work,
238 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 279 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
239 .load_ucode = iwl5000_load_ucode, 280 .load_ucode = iwlagn_load_ucode,
240 .dump_nic_event_log = iwl_dump_nic_event_log, 281 .dump_nic_event_log = iwl_dump_nic_event_log,
241 .dump_nic_error_log = iwl_dump_nic_error_log, 282 .dump_nic_error_log = iwl_dump_nic_error_log,
242 .dump_csr = iwl_dump_csr, 283 .dump_csr = iwl_dump_csr,
243 .dump_fh = iwl_dump_fh, 284 .dump_fh = iwl_dump_fh,
244 .init_alive_start = iwl5000_init_alive_start, 285 .init_alive_start = iwlagn_init_alive_start,
245 .alive_notify = iwl5000_alive_notify, 286 .alive_notify = iwlagn_alive_notify,
246 .send_tx_power = iwl5000_send_tx_power, 287 .send_tx_power = iwlagn_send_tx_power,
247 .update_chain_flags = iwl_update_chain_flags, 288 .update_chain_flags = iwl_update_chain_flags,
248 .set_channel_switch = iwl6000_hw_channel_switch, 289 .set_channel_switch = iwl6000_hw_channel_switch,
249 .apm_ops = { 290 .apm_ops = {
@@ -254,60 +295,67 @@ static struct iwl_lib_ops iwl6000_lib = {
254 }, 295 },
255 .eeprom_ops = { 296 .eeprom_ops = {
256 .regulatory_bands = { 297 .regulatory_bands = {
257 EEPROM_5000_REG_BAND_1_CHANNELS, 298 EEPROM_REG_BAND_1_CHANNELS,
258 EEPROM_5000_REG_BAND_2_CHANNELS, 299 EEPROM_REG_BAND_2_CHANNELS,
259 EEPROM_5000_REG_BAND_3_CHANNELS, 300 EEPROM_REG_BAND_3_CHANNELS,
260 EEPROM_5000_REG_BAND_4_CHANNELS, 301 EEPROM_REG_BAND_4_CHANNELS,
261 EEPROM_5000_REG_BAND_5_CHANNELS, 302 EEPROM_REG_BAND_5_CHANNELS,
262 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 303 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
263 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 304 EEPROM_REG_BAND_52_HT40_CHANNELS
264 }, 305 },
265 .verify_signature = iwlcore_eeprom_verify_signature, 306 .verify_signature = iwlcore_eeprom_verify_signature,
266 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 307 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
267 .release_semaphore = iwlcore_eeprom_release_semaphore, 308 .release_semaphore = iwlcore_eeprom_release_semaphore,
268 .calib_version = iwl5000_eeprom_calib_version, 309 .calib_version = iwlagn_eeprom_calib_version,
269 .query_addr = iwl5000_eeprom_query_addr, 310 .query_addr = iwlagn_eeprom_query_addr,
270 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 311 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
271 }, 312 },
272 .post_associate = iwl_post_associate, 313 .post_associate = iwl_post_associate,
273 .isr = iwl_isr_ict, 314 .isr = iwl_isr_ict,
274 .config_ap = iwl_config_ap, 315 .config_ap = iwl_config_ap,
275 .temp_ops = { 316 .temp_ops = {
276 .temperature = iwl5000_temperature, 317 .temperature = iwlagn_temperature,
277 .set_ct_kill = iwl6000_set_ct_threshold, 318 .set_ct_kill = iwl6000_set_ct_threshold,
278 }, 319 },
279 .add_bcast_station = iwl_add_bcast_station, 320 .manage_ibss_station = iwlagn_manage_ibss_station,
321 .debugfs_ops = {
322 .rx_stats_read = iwl_ucode_rx_stats_read,
323 .tx_stats_read = iwl_ucode_tx_stats_read,
324 .general_stats_read = iwl_ucode_general_stats_read,
325 },
326 .recover_from_tx_stall = iwl_bg_monitor_recover,
327 .check_plcp_health = iwl_good_plcp_health,
328 .check_ack_health = iwl_good_ack_health,
280}; 329};
281 330
282static const struct iwl_ops iwl6000_ops = { 331static const struct iwl_ops iwl6000_ops = {
283 .ucode = &iwl5000_ucode,
284 .lib = &iwl6000_lib, 332 .lib = &iwl6000_lib,
285 .hcmd = &iwl5000_hcmd, 333 .hcmd = &iwlagn_hcmd,
286 .utils = &iwl5000_hcmd_utils, 334 .utils = &iwlagn_hcmd_utils,
287 .led = &iwlagn_led_ops, 335 .led = &iwlagn_led_ops,
288}; 336};
289 337
290static struct iwl_lib_ops iwl6050_lib = { 338static struct iwl_lib_ops iwl6050_lib = {
291 .set_hw_params = iwl6000_hw_set_hw_params, 339 .set_hw_params = iwl6050_hw_set_hw_params,
292 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 340 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
293 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 341 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
294 .txq_set_sched = iwl5000_txq_set_sched, 342 .txq_set_sched = iwlagn_txq_set_sched,
295 .txq_agg_enable = iwl5000_txq_agg_enable, 343 .txq_agg_enable = iwlagn_txq_agg_enable,
296 .txq_agg_disable = iwl5000_txq_agg_disable, 344 .txq_agg_disable = iwlagn_txq_agg_disable,
297 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 345 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
298 .txq_free_tfd = iwl_hw_txq_free_tfd, 346 .txq_free_tfd = iwl_hw_txq_free_tfd,
299 .txq_init = iwl_hw_tx_queue_init, 347 .txq_init = iwl_hw_tx_queue_init,
300 .rx_handler_setup = iwl5000_rx_handler_setup, 348 .rx_handler_setup = iwlagn_rx_handler_setup,
301 .setup_deferred_work = iwl5000_setup_deferred_work, 349 .setup_deferred_work = iwlagn_setup_deferred_work,
302 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 350 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
303 .load_ucode = iwl5000_load_ucode, 351 .load_ucode = iwlagn_load_ucode,
304 .dump_nic_event_log = iwl_dump_nic_event_log, 352 .dump_nic_event_log = iwl_dump_nic_event_log,
305 .dump_nic_error_log = iwl_dump_nic_error_log, 353 .dump_nic_error_log = iwl_dump_nic_error_log,
306 .dump_csr = iwl_dump_csr, 354 .dump_csr = iwl_dump_csr,
307 .dump_fh = iwl_dump_fh, 355 .dump_fh = iwl_dump_fh,
308 .init_alive_start = iwl5000_init_alive_start, 356 .init_alive_start = iwlagn_init_alive_start,
309 .alive_notify = iwl5000_alive_notify, 357 .alive_notify = iwlagn_alive_notify,
310 .send_tx_power = iwl5000_send_tx_power, 358 .send_tx_power = iwlagn_send_tx_power,
311 .update_chain_flags = iwl_update_chain_flags, 359 .update_chain_flags = iwl_update_chain_flags,
312 .set_channel_switch = iwl6000_hw_channel_switch, 360 .set_channel_switch = iwl6000_hw_channel_switch,
313 .apm_ops = { 361 .apm_ops = {
@@ -318,45 +366,90 @@ static struct iwl_lib_ops iwl6050_lib = {
318 }, 366 },
319 .eeprom_ops = { 367 .eeprom_ops = {
320 .regulatory_bands = { 368 .regulatory_bands = {
321 EEPROM_5000_REG_BAND_1_CHANNELS, 369 EEPROM_REG_BAND_1_CHANNELS,
322 EEPROM_5000_REG_BAND_2_CHANNELS, 370 EEPROM_REG_BAND_2_CHANNELS,
323 EEPROM_5000_REG_BAND_3_CHANNELS, 371 EEPROM_REG_BAND_3_CHANNELS,
324 EEPROM_5000_REG_BAND_4_CHANNELS, 372 EEPROM_REG_BAND_4_CHANNELS,
325 EEPROM_5000_REG_BAND_5_CHANNELS, 373 EEPROM_REG_BAND_5_CHANNELS,
326 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 374 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
327 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 375 EEPROM_REG_BAND_52_HT40_CHANNELS
328 }, 376 },
329 .verify_signature = iwlcore_eeprom_verify_signature, 377 .verify_signature = iwlcore_eeprom_verify_signature,
330 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 378 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
331 .release_semaphore = iwlcore_eeprom_release_semaphore, 379 .release_semaphore = iwlcore_eeprom_release_semaphore,
332 .calib_version = iwl5000_eeprom_calib_version, 380 .calib_version = iwlagn_eeprom_calib_version,
333 .query_addr = iwl5000_eeprom_query_addr, 381 .query_addr = iwlagn_eeprom_query_addr,
334 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 382 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
335 }, 383 },
336 .post_associate = iwl_post_associate, 384 .post_associate = iwl_post_associate,
337 .isr = iwl_isr_ict, 385 .isr = iwl_isr_ict,
338 .config_ap = iwl_config_ap, 386 .config_ap = iwl_config_ap,
339 .temp_ops = { 387 .temp_ops = {
340 .temperature = iwl5000_temperature, 388 .temperature = iwlagn_temperature,
341 .set_ct_kill = iwl6000_set_ct_threshold, 389 .set_ct_kill = iwl6000_set_ct_threshold,
342 .set_calib_version = iwl6050_set_calib_version, 390 .set_calib_version = iwl6050_set_calib_version,
343 }, 391 },
344 .add_bcast_station = iwl_add_bcast_station, 392 .manage_ibss_station = iwlagn_manage_ibss_station,
393 .debugfs_ops = {
394 .rx_stats_read = iwl_ucode_rx_stats_read,
395 .tx_stats_read = iwl_ucode_tx_stats_read,
396 .general_stats_read = iwl_ucode_general_stats_read,
397 },
398 .recover_from_tx_stall = iwl_bg_monitor_recover,
399 .check_plcp_health = iwl_good_plcp_health,
400 .check_ack_health = iwl_good_ack_health,
345}; 401};
346 402
347static const struct iwl_ops iwl6050_ops = { 403static const struct iwl_ops iwl6050_ops = {
348 .ucode = &iwl5000_ucode,
349 .lib = &iwl6050_lib, 404 .lib = &iwl6050_lib,
350 .hcmd = &iwl5000_hcmd, 405 .hcmd = &iwlagn_hcmd,
351 .utils = &iwl5000_hcmd_utils, 406 .utils = &iwlagn_hcmd_utils,
352 .led = &iwlagn_led_ops, 407 .led = &iwlagn_led_ops,
353}; 408};
354 409
410
411struct iwl_cfg iwl6000g2a_2agn_cfg = {
412 .name = "6000 Series 2x2 AGN Gen2a",
413 .fw_name_pre = IWL6000G2A_FW_PRE,
414 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
415 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
416 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
417 .ops = &iwl6000_ops,
418 .eeprom_size = OTP_LOW_IMAGE_SIZE,
419 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
420 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
421 .num_of_queues = IWLAGN_NUM_QUEUES,
422 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
423 .mod_params = &iwlagn_mod_params,
424 .valid_tx_ant = ANT_AB,
425 .valid_rx_ant = ANT_AB,
426 .pll_cfg_val = 0,
427 .set_l0s = true,
428 .use_bsm = false,
429 .pa_type = IWL_PA_SYSTEM,
430 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
431 .shadow_ram_support = true,
432 .ht_greenfield_support = true,
433 .led_compensation = 51,
434 .use_rts_for_ht = true, /* use rts/cts protection */
435 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
436 .supports_idle = true,
437 .adv_thermal_throttle = true,
438 .support_ct_kill_exit = true,
439 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
440 .chain_noise_scale = 1000,
441 .monitor_recover_period = IWL_MONITORING_PERIOD,
442 .max_event_log_size = 512,
443 .ucode_tracing = true,
444 .sensitivity_calib_by_driver = true,
445 .chain_noise_calib_by_driver = true,
446};
447
355/* 448/*
356 * "i": Internal configuration, use internal Power Amplifier 449 * "i": Internal configuration, use internal Power Amplifier
357 */ 450 */
358struct iwl_cfg iwl6000i_2agn_cfg = { 451struct iwl_cfg iwl6000i_2agn_cfg = {
359 .name = "6000 Series 2x2 AGN", 452 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
360 .fw_name_pre = IWL6000_FW_PRE, 453 .fw_name_pre = IWL6000_FW_PRE,
361 .ucode_api_max = IWL6000_UCODE_API_MAX, 454 .ucode_api_max = IWL6000_UCODE_API_MAX,
362 .ucode_api_min = IWL6000_UCODE_API_MIN, 455 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -364,10 +457,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
364 .ops = &iwl6000_ops, 457 .ops = &iwl6000_ops,
365 .eeprom_size = OTP_LOW_IMAGE_SIZE, 458 .eeprom_size = OTP_LOW_IMAGE_SIZE,
366 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 459 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
367 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 460 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
368 .num_of_queues = IWL50_NUM_QUEUES, 461 .num_of_queues = IWLAGN_NUM_QUEUES,
369 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 462 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
370 .mod_params = &iwl50_mod_params, 463 .mod_params = &iwlagn_mod_params,
371 .valid_tx_ant = ANT_BC, 464 .valid_tx_ant = ANT_BC,
372 .valid_rx_ant = ANT_BC, 465 .valid_rx_ant = ANT_BC,
373 .pll_cfg_val = 0, 466 .pll_cfg_val = 0,
@@ -385,10 +478,15 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
385 .support_ct_kill_exit = true, 478 .support_ct_kill_exit = true,
386 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 479 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
387 .chain_noise_scale = 1000, 480 .chain_noise_scale = 1000,
481 .monitor_recover_period = IWL_MONITORING_PERIOD,
482 .max_event_log_size = 1024,
483 .ucode_tracing = true,
484 .sensitivity_calib_by_driver = true,
485 .chain_noise_calib_by_driver = true,
388}; 486};
389 487
390struct iwl_cfg iwl6000i_2abg_cfg = { 488struct iwl_cfg iwl6000i_2abg_cfg = {
391 .name = "6000 Series 2x2 ABG", 489 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
392 .fw_name_pre = IWL6000_FW_PRE, 490 .fw_name_pre = IWL6000_FW_PRE,
393 .ucode_api_max = IWL6000_UCODE_API_MAX, 491 .ucode_api_max = IWL6000_UCODE_API_MAX,
394 .ucode_api_min = IWL6000_UCODE_API_MIN, 492 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -396,10 +494,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
396 .ops = &iwl6000_ops, 494 .ops = &iwl6000_ops,
397 .eeprom_size = OTP_LOW_IMAGE_SIZE, 495 .eeprom_size = OTP_LOW_IMAGE_SIZE,
398 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 496 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
399 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 497 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
400 .num_of_queues = IWL50_NUM_QUEUES, 498 .num_of_queues = IWLAGN_NUM_QUEUES,
401 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 499 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
402 .mod_params = &iwl50_mod_params, 500 .mod_params = &iwlagn_mod_params,
403 .valid_tx_ant = ANT_BC, 501 .valid_tx_ant = ANT_BC,
404 .valid_rx_ant = ANT_BC, 502 .valid_rx_ant = ANT_BC,
405 .pll_cfg_val = 0, 503 .pll_cfg_val = 0,
@@ -408,7 +506,6 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
408 .pa_type = IWL_PA_INTERNAL, 506 .pa_type = IWL_PA_INTERNAL,
409 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 507 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
410 .shadow_ram_support = true, 508 .shadow_ram_support = true,
411 .ht_greenfield_support = true,
412 .led_compensation = 51, 509 .led_compensation = 51,
413 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 510 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
414 .supports_idle = true, 511 .supports_idle = true,
@@ -416,10 +513,15 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
416 .support_ct_kill_exit = true, 513 .support_ct_kill_exit = true,
417 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 514 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
418 .chain_noise_scale = 1000, 515 .chain_noise_scale = 1000,
516 .monitor_recover_period = IWL_MONITORING_PERIOD,
517 .max_event_log_size = 1024,
518 .ucode_tracing = true,
519 .sensitivity_calib_by_driver = true,
520 .chain_noise_calib_by_driver = true,
419}; 521};
420 522
421struct iwl_cfg iwl6000i_2bg_cfg = { 523struct iwl_cfg iwl6000i_2bg_cfg = {
422 .name = "6000 Series 2x2 BG", 524 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
423 .fw_name_pre = IWL6000_FW_PRE, 525 .fw_name_pre = IWL6000_FW_PRE,
424 .ucode_api_max = IWL6000_UCODE_API_MAX, 526 .ucode_api_max = IWL6000_UCODE_API_MAX,
425 .ucode_api_min = IWL6000_UCODE_API_MIN, 527 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -427,10 +529,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
427 .ops = &iwl6000_ops, 529 .ops = &iwl6000_ops,
428 .eeprom_size = OTP_LOW_IMAGE_SIZE, 530 .eeprom_size = OTP_LOW_IMAGE_SIZE,
429 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 531 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
430 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 532 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
431 .num_of_queues = IWL50_NUM_QUEUES, 533 .num_of_queues = IWLAGN_NUM_QUEUES,
432 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 534 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
433 .mod_params = &iwl50_mod_params, 535 .mod_params = &iwlagn_mod_params,
434 .valid_tx_ant = ANT_BC, 536 .valid_tx_ant = ANT_BC,
435 .valid_rx_ant = ANT_BC, 537 .valid_rx_ant = ANT_BC,
436 .pll_cfg_val = 0, 538 .pll_cfg_val = 0,
@@ -439,7 +541,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
439 .pa_type = IWL_PA_INTERNAL, 541 .pa_type = IWL_PA_INTERNAL,
440 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 542 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
441 .shadow_ram_support = true, 543 .shadow_ram_support = true,
442 .ht_greenfield_support = true,
443 .led_compensation = 51, 544 .led_compensation = 51,
444 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 545 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
445 .supports_idle = true, 546 .supports_idle = true,
@@ -447,10 +548,15 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
447 .support_ct_kill_exit = true, 548 .support_ct_kill_exit = true,
448 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 549 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
449 .chain_noise_scale = 1000, 550 .chain_noise_scale = 1000,
551 .monitor_recover_period = IWL_MONITORING_PERIOD,
552 .max_event_log_size = 1024,
553 .ucode_tracing = true,
554 .sensitivity_calib_by_driver = true,
555 .chain_noise_calib_by_driver = true,
450}; 556};
451 557
452struct iwl_cfg iwl6050_2agn_cfg = { 558struct iwl_cfg iwl6050_2agn_cfg = {
453 .name = "6050 Series 2x2 AGN", 559 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
454 .fw_name_pre = IWL6050_FW_PRE, 560 .fw_name_pre = IWL6050_FW_PRE,
455 .ucode_api_max = IWL6050_UCODE_API_MAX, 561 .ucode_api_max = IWL6050_UCODE_API_MAX,
456 .ucode_api_min = IWL6050_UCODE_API_MIN, 562 .ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -458,10 +564,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
458 .ops = &iwl6050_ops, 564 .ops = &iwl6050_ops,
459 .eeprom_size = OTP_LOW_IMAGE_SIZE, 565 .eeprom_size = OTP_LOW_IMAGE_SIZE,
460 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 566 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
461 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 567 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
462 .num_of_queues = IWL50_NUM_QUEUES, 568 .num_of_queues = IWLAGN_NUM_QUEUES,
463 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 569 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
464 .mod_params = &iwl50_mod_params, 570 .mod_params = &iwlagn_mod_params,
465 .valid_tx_ant = ANT_AB, 571 .valid_tx_ant = ANT_AB,
466 .valid_rx_ant = ANT_AB, 572 .valid_rx_ant = ANT_AB,
467 .pll_cfg_val = 0, 573 .pll_cfg_val = 0,
@@ -479,10 +585,15 @@ struct iwl_cfg iwl6050_2agn_cfg = {
479 .support_ct_kill_exit = true, 585 .support_ct_kill_exit = true,
480 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 586 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
481 .chain_noise_scale = 1500, 587 .chain_noise_scale = 1500,
588 .monitor_recover_period = IWL_MONITORING_PERIOD,
589 .max_event_log_size = 1024,
590 .ucode_tracing = true,
591 .sensitivity_calib_by_driver = true,
592 .chain_noise_calib_by_driver = true,
482}; 593};
483 594
484struct iwl_cfg iwl6050_2abg_cfg = { 595struct iwl_cfg iwl6050_2abg_cfg = {
485 .name = "6050 Series 2x2 ABG", 596 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
486 .fw_name_pre = IWL6050_FW_PRE, 597 .fw_name_pre = IWL6050_FW_PRE,
487 .ucode_api_max = IWL6050_UCODE_API_MAX, 598 .ucode_api_max = IWL6050_UCODE_API_MAX,
488 .ucode_api_min = IWL6050_UCODE_API_MIN, 599 .ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -490,10 +601,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
490 .ops = &iwl6050_ops, 601 .ops = &iwl6050_ops,
491 .eeprom_size = OTP_LOW_IMAGE_SIZE, 602 .eeprom_size = OTP_LOW_IMAGE_SIZE,
492 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 603 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
493 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 604 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
494 .num_of_queues = IWL50_NUM_QUEUES, 605 .num_of_queues = IWLAGN_NUM_QUEUES,
495 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 606 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
496 .mod_params = &iwl50_mod_params, 607 .mod_params = &iwlagn_mod_params,
497 .valid_tx_ant = ANT_AB, 608 .valid_tx_ant = ANT_AB,
498 .valid_rx_ant = ANT_AB, 609 .valid_rx_ant = ANT_AB,
499 .pll_cfg_val = 0, 610 .pll_cfg_val = 0,
@@ -502,7 +613,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
502 .pa_type = IWL_PA_SYSTEM, 613 .pa_type = IWL_PA_SYSTEM,
503 .max_ll_items = OTP_MAX_LL_ITEMS_6x50, 614 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
504 .shadow_ram_support = true, 615 .shadow_ram_support = true,
505 .ht_greenfield_support = true,
506 .led_compensation = 51, 616 .led_compensation = 51,
507 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 617 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
508 .supports_idle = true, 618 .supports_idle = true,
@@ -510,10 +620,15 @@ struct iwl_cfg iwl6050_2abg_cfg = {
510 .support_ct_kill_exit = true, 620 .support_ct_kill_exit = true,
511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 621 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
512 .chain_noise_scale = 1500, 622 .chain_noise_scale = 1500,
623 .monitor_recover_period = IWL_MONITORING_PERIOD,
624 .max_event_log_size = 1024,
625 .ucode_tracing = true,
626 .sensitivity_calib_by_driver = true,
627 .chain_noise_calib_by_driver = true,
513}; 628};
514 629
515struct iwl_cfg iwl6000_3agn_cfg = { 630struct iwl_cfg iwl6000_3agn_cfg = {
516 .name = "6000 Series 3x3 AGN", 631 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
517 .fw_name_pre = IWL6000_FW_PRE, 632 .fw_name_pre = IWL6000_FW_PRE,
518 .ucode_api_max = IWL6000_UCODE_API_MAX, 633 .ucode_api_max = IWL6000_UCODE_API_MAX,
519 .ucode_api_min = IWL6000_UCODE_API_MIN, 634 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -521,10 +636,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
521 .ops = &iwl6000_ops, 636 .ops = &iwl6000_ops,
522 .eeprom_size = OTP_LOW_IMAGE_SIZE, 637 .eeprom_size = OTP_LOW_IMAGE_SIZE,
523 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 638 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
524 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 639 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
525 .num_of_queues = IWL50_NUM_QUEUES, 640 .num_of_queues = IWLAGN_NUM_QUEUES,
526 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 641 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
527 .mod_params = &iwl50_mod_params, 642 .mod_params = &iwlagn_mod_params,
528 .valid_tx_ant = ANT_ABC, 643 .valid_tx_ant = ANT_ABC,
529 .valid_rx_ant = ANT_ABC, 644 .valid_rx_ant = ANT_ABC,
530 .pll_cfg_val = 0, 645 .pll_cfg_val = 0,
@@ -542,7 +657,13 @@ struct iwl_cfg iwl6000_3agn_cfg = {
542 .support_ct_kill_exit = true, 657 .support_ct_kill_exit = true,
543 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 658 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
544 .chain_noise_scale = 1000, 659 .chain_noise_scale = 1000,
660 .monitor_recover_period = IWL_MONITORING_PERIOD,
661 .max_event_log_size = 1024,
662 .ucode_tracing = true,
663 .sensitivity_calib_by_driver = true,
664 .chain_noise_calib_by_driver = true,
545}; 665};
546 666
547MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 667MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
548MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 668MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
669MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
new file mode 100644
index 000000000000..48c023b4ca36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -0,0 +1,850 @@
1/******************************************************************************
2*
3* GPL LICENSE SUMMARY
4*
5* Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6*
7* This program is free software; you can redistribute it and/or modify
8* it under the terms of version 2 of the GNU General Public License as
9* published by the Free Software Foundation.
10*
11* This program is distributed in the hope that it will be useful, but
12* WITHOUT ANY WARRANTY; without even the implied warranty of
13* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14* General Public License for more details.
15*
16* You should have received a copy of the GNU General Public License
17* along with this program; if not, write to the Free Software
18* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19* USA
20*
21* The full GNU General Public License is included in this distribution
22* in the file called LICENSE.GPL.
23*
24* Contact Information:
25* Intel Linux Wireless <ilw@linux.intel.com>
26* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27*****************************************************************************/
28
29#include "iwl-agn-debugfs.h"
30
31ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
32 size_t count, loff_t *ppos)
33 {
34 struct iwl_priv *priv = file->private_data;
35 int pos = 0;
36 char *buf;
37 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
38 sizeof(struct statistics_rx_non_phy) * 40 +
39 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
40 ssize_t ret;
41 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
42 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
43 struct statistics_rx_non_phy *general, *accum_general;
44 struct statistics_rx_non_phy *delta_general, *max_general;
45 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
46
47 if (!iwl_is_alive(priv))
48 return -EAGAIN;
49
50 buf = kzalloc(bufsz, GFP_KERNEL);
51 if (!buf) {
52 IWL_ERR(priv, "Can not allocate Buffer\n");
53 return -ENOMEM;
54 }
55
56 /*
57 * the statistic information display here is based on
58 * the last statistics notification from uCode
59 * might not reflect the current uCode activity
60 */
61 ofdm = &priv->statistics.rx.ofdm;
62 cck = &priv->statistics.rx.cck;
63 general = &priv->statistics.rx.general;
64 ht = &priv->statistics.rx.ofdm_ht;
65 accum_ofdm = &priv->accum_statistics.rx.ofdm;
66 accum_cck = &priv->accum_statistics.rx.cck;
67 accum_general = &priv->accum_statistics.rx.general;
68 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
69 delta_ofdm = &priv->delta_statistics.rx.ofdm;
70 delta_cck = &priv->delta_statistics.rx.cck;
71 delta_general = &priv->delta_statistics.rx.general;
72 delta_ht = &priv->delta_statistics.rx.ofdm_ht;
73 max_ofdm = &priv->max_delta.rx.ofdm;
74 max_cck = &priv->max_delta.rx.cck;
75 max_general = &priv->max_delta.rx.general;
76 max_ht = &priv->max_delta.rx.ofdm_ht;
77
78 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
79 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
80 "acumulative delta max\n",
81 "Statistics_Rx - OFDM:");
82 pos += scnprintf(buf + pos, bufsz - pos,
83 " %-30s %10u %10u %10u %10u\n",
84 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
85 accum_ofdm->ina_cnt,
86 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
87 pos += scnprintf(buf + pos, bufsz - pos,
88 " %-30s %10u %10u %10u %10u\n",
89 "fina_cnt:",
90 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
91 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
92 pos += scnprintf(buf + pos, bufsz - pos,
93 " %-30s %10u %10u %10u %10u\n",
94 "plcp_err:",
95 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
96 delta_ofdm->plcp_err, max_ofdm->plcp_err);
97 pos += scnprintf(buf + pos, bufsz - pos,
98 " %-30s %10u %10u %10u %10u\n", "crc32_err:",
99 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
100 delta_ofdm->crc32_err, max_ofdm->crc32_err);
101 pos += scnprintf(buf + pos, bufsz - pos,
102 " %-30s %10u %10u %10u %10u\n", "overrun_err:",
103 le32_to_cpu(ofdm->overrun_err),
104 accum_ofdm->overrun_err, delta_ofdm->overrun_err,
105 max_ofdm->overrun_err);
106 pos += scnprintf(buf + pos, bufsz - pos,
107 " %-30s %10u %10u %10u %10u\n",
108 "early_overrun_err:",
109 le32_to_cpu(ofdm->early_overrun_err),
110 accum_ofdm->early_overrun_err,
111 delta_ofdm->early_overrun_err,
112 max_ofdm->early_overrun_err);
113 pos += scnprintf(buf + pos, bufsz - pos,
114 " %-30s %10u %10u %10u %10u\n",
115 "crc32_good:", le32_to_cpu(ofdm->crc32_good),
116 accum_ofdm->crc32_good, delta_ofdm->crc32_good,
117 max_ofdm->crc32_good);
118 pos += scnprintf(buf + pos, bufsz - pos,
119 " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
120 le32_to_cpu(ofdm->false_alarm_cnt),
121 accum_ofdm->false_alarm_cnt,
122 delta_ofdm->false_alarm_cnt,
123 max_ofdm->false_alarm_cnt);
124 pos += scnprintf(buf + pos, bufsz - pos,
125 " %-30s %10u %10u %10u %10u\n",
126 "fina_sync_err_cnt:",
127 le32_to_cpu(ofdm->fina_sync_err_cnt),
128 accum_ofdm->fina_sync_err_cnt,
129 delta_ofdm->fina_sync_err_cnt,
130 max_ofdm->fina_sync_err_cnt);
131 pos += scnprintf(buf + pos, bufsz - pos,
132 " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
133 le32_to_cpu(ofdm->sfd_timeout),
134 accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
135 max_ofdm->sfd_timeout);
136 pos += scnprintf(buf + pos, bufsz - pos,
137 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
138 le32_to_cpu(ofdm->fina_timeout),
139 accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
140 max_ofdm->fina_timeout);
141 pos += scnprintf(buf + pos, bufsz - pos,
142 " %-30s %10u %10u %10u %10u\n",
143 "unresponded_rts:",
144 le32_to_cpu(ofdm->unresponded_rts),
145 accum_ofdm->unresponded_rts,
146 delta_ofdm->unresponded_rts,
147 max_ofdm->unresponded_rts);
148 pos += scnprintf(buf + pos, bufsz - pos,
149 " %-30s %10u %10u %10u %10u\n",
150 "rxe_frame_lmt_ovrun:",
151 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
152 accum_ofdm->rxe_frame_limit_overrun,
153 delta_ofdm->rxe_frame_limit_overrun,
154 max_ofdm->rxe_frame_limit_overrun);
155 pos += scnprintf(buf + pos, bufsz - pos,
156 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
157 le32_to_cpu(ofdm->sent_ack_cnt),
158 accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
159 max_ofdm->sent_ack_cnt);
160 pos += scnprintf(buf + pos, bufsz - pos,
161 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
162 le32_to_cpu(ofdm->sent_cts_cnt),
163 accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
164 max_ofdm->sent_cts_cnt);
165 pos += scnprintf(buf + pos, bufsz - pos,
166 " %-30s %10u %10u %10u %10u\n",
167 "sent_ba_rsp_cnt:",
168 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
169 accum_ofdm->sent_ba_rsp_cnt,
170 delta_ofdm->sent_ba_rsp_cnt,
171 max_ofdm->sent_ba_rsp_cnt);
172 pos += scnprintf(buf + pos, bufsz - pos,
173 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
174 le32_to_cpu(ofdm->dsp_self_kill),
175 accum_ofdm->dsp_self_kill,
176 delta_ofdm->dsp_self_kill,
177 max_ofdm->dsp_self_kill);
178 pos += scnprintf(buf + pos, bufsz - pos,
179 " %-30s %10u %10u %10u %10u\n",
180 "mh_format_err:",
181 le32_to_cpu(ofdm->mh_format_err),
182 accum_ofdm->mh_format_err,
183 delta_ofdm->mh_format_err,
184 max_ofdm->mh_format_err);
185 pos += scnprintf(buf + pos, bufsz - pos,
186 " %-30s %10u %10u %10u %10u\n",
187 "re_acq_main_rssi_sum:",
188 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
189 accum_ofdm->re_acq_main_rssi_sum,
190 delta_ofdm->re_acq_main_rssi_sum,
191 max_ofdm->re_acq_main_rssi_sum);
192
193 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
194 "acumulative delta max\n",
195 "Statistics_Rx - CCK:");
196 pos += scnprintf(buf + pos, bufsz - pos,
197 " %-30s %10u %10u %10u %10u\n",
198 "ina_cnt:",
199 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
200 delta_cck->ina_cnt, max_cck->ina_cnt);
201 pos += scnprintf(buf + pos, bufsz - pos,
202 " %-30s %10u %10u %10u %10u\n",
203 "fina_cnt:",
204 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
205 delta_cck->fina_cnt, max_cck->fina_cnt);
206 pos += scnprintf(buf + pos, bufsz - pos,
207 " %-30s %10u %10u %10u %10u\n",
208 "plcp_err:",
209 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
210 delta_cck->plcp_err, max_cck->plcp_err);
211 pos += scnprintf(buf + pos, bufsz - pos,
212 " %-30s %10u %10u %10u %10u\n",
213 "crc32_err:",
214 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
215 delta_cck->crc32_err, max_cck->crc32_err);
216 pos += scnprintf(buf + pos, bufsz - pos,
217 " %-30s %10u %10u %10u %10u\n",
218 "overrun_err:",
219 le32_to_cpu(cck->overrun_err),
220 accum_cck->overrun_err, delta_cck->overrun_err,
221 max_cck->overrun_err);
222 pos += scnprintf(buf + pos, bufsz - pos,
223 " %-30s %10u %10u %10u %10u\n",
224 "early_overrun_err:",
225 le32_to_cpu(cck->early_overrun_err),
226 accum_cck->early_overrun_err,
227 delta_cck->early_overrun_err,
228 max_cck->early_overrun_err);
229 pos += scnprintf(buf + pos, bufsz - pos,
230 " %-30s %10u %10u %10u %10u\n",
231 "crc32_good:",
232 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
233 delta_cck->crc32_good, max_cck->crc32_good);
234 pos += scnprintf(buf + pos, bufsz - pos,
235 " %-30s %10u %10u %10u %10u\n",
236 "false_alarm_cnt:",
237 le32_to_cpu(cck->false_alarm_cnt),
238 accum_cck->false_alarm_cnt,
239 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
240 pos += scnprintf(buf + pos, bufsz - pos,
241 " %-30s %10u %10u %10u %10u\n",
242 "fina_sync_err_cnt:",
243 le32_to_cpu(cck->fina_sync_err_cnt),
244 accum_cck->fina_sync_err_cnt,
245 delta_cck->fina_sync_err_cnt,
246 max_cck->fina_sync_err_cnt);
247 pos += scnprintf(buf + pos, bufsz - pos,
248 " %-30s %10u %10u %10u %10u\n",
249 "sfd_timeout:",
250 le32_to_cpu(cck->sfd_timeout),
251 accum_cck->sfd_timeout, delta_cck->sfd_timeout,
252 max_cck->sfd_timeout);
253 pos += scnprintf(buf + pos, bufsz - pos,
254 " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
255 le32_to_cpu(cck->fina_timeout),
256 accum_cck->fina_timeout, delta_cck->fina_timeout,
257 max_cck->fina_timeout);
258 pos += scnprintf(buf + pos, bufsz - pos,
259 " %-30s %10u %10u %10u %10u\n",
260 "unresponded_rts:",
261 le32_to_cpu(cck->unresponded_rts),
262 accum_cck->unresponded_rts, delta_cck->unresponded_rts,
263 max_cck->unresponded_rts);
264 pos += scnprintf(buf + pos, bufsz - pos,
265 " %-30s %10u %10u %10u %10u\n",
266 "rxe_frame_lmt_ovrun:",
267 le32_to_cpu(cck->rxe_frame_limit_overrun),
268 accum_cck->rxe_frame_limit_overrun,
269 delta_cck->rxe_frame_limit_overrun,
270 max_cck->rxe_frame_limit_overrun);
271 pos += scnprintf(buf + pos, bufsz - pos,
272 " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
273 le32_to_cpu(cck->sent_ack_cnt),
274 accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
275 max_cck->sent_ack_cnt);
276 pos += scnprintf(buf + pos, bufsz - pos,
277 " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
278 le32_to_cpu(cck->sent_cts_cnt),
279 accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
280 max_cck->sent_cts_cnt);
281 pos += scnprintf(buf + pos, bufsz - pos,
282 " %-30s %10u %10u %10u %10u\n", "sent_ba_rsp_cnt:",
283 le32_to_cpu(cck->sent_ba_rsp_cnt),
284 accum_cck->sent_ba_rsp_cnt,
285 delta_cck->sent_ba_rsp_cnt,
286 max_cck->sent_ba_rsp_cnt);
287 pos += scnprintf(buf + pos, bufsz - pos,
288 " %-30s %10u %10u %10u %10u\n", "dsp_self_kill:",
289 le32_to_cpu(cck->dsp_self_kill),
290 accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
291 max_cck->dsp_self_kill);
292 pos += scnprintf(buf + pos, bufsz - pos,
293 " %-30s %10u %10u %10u %10u\n", "mh_format_err:",
294 le32_to_cpu(cck->mh_format_err),
295 accum_cck->mh_format_err, delta_cck->mh_format_err,
296 max_cck->mh_format_err);
297 pos += scnprintf(buf + pos, bufsz - pos,
298 " %-30s %10u %10u %10u %10u\n",
299 "re_acq_main_rssi_sum:",
300 le32_to_cpu(cck->re_acq_main_rssi_sum),
301 accum_cck->re_acq_main_rssi_sum,
302 delta_cck->re_acq_main_rssi_sum,
303 max_cck->re_acq_main_rssi_sum);
304
305 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
306 "acumulative delta max\n",
307 "Statistics_Rx - GENERAL:");
308 pos += scnprintf(buf + pos, bufsz - pos,
309 " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
310 le32_to_cpu(general->bogus_cts),
311 accum_general->bogus_cts, delta_general->bogus_cts,
312 max_general->bogus_cts);
313 pos += scnprintf(buf + pos, bufsz - pos,
314 " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
315 le32_to_cpu(general->bogus_ack),
316 accum_general->bogus_ack, delta_general->bogus_ack,
317 max_general->bogus_ack);
318 pos += scnprintf(buf + pos, bufsz - pos,
319 " %-30s %10u %10u %10u %10u\n",
320 "non_bssid_frames:",
321 le32_to_cpu(general->non_bssid_frames),
322 accum_general->non_bssid_frames,
323 delta_general->non_bssid_frames,
324 max_general->non_bssid_frames);
325 pos += scnprintf(buf + pos, bufsz - pos,
326 " %-30s %10u %10u %10u %10u\n",
327 "filtered_frames:",
328 le32_to_cpu(general->filtered_frames),
329 accum_general->filtered_frames,
330 delta_general->filtered_frames,
331 max_general->filtered_frames);
332 pos += scnprintf(buf + pos, bufsz - pos,
333 " %-30s %10u %10u %10u %10u\n",
334 "non_channel_beacons:",
335 le32_to_cpu(general->non_channel_beacons),
336 accum_general->non_channel_beacons,
337 delta_general->non_channel_beacons,
338 max_general->non_channel_beacons);
339 pos += scnprintf(buf + pos, bufsz - pos,
340 " %-30s %10u %10u %10u %10u\n",
341 "channel_beacons:",
342 le32_to_cpu(general->channel_beacons),
343 accum_general->channel_beacons,
344 delta_general->channel_beacons,
345 max_general->channel_beacons);
346 pos += scnprintf(buf + pos, bufsz - pos,
347 " %-30s %10u %10u %10u %10u\n",
348 "num_missed_bcon:",
349 le32_to_cpu(general->num_missed_bcon),
350 accum_general->num_missed_bcon,
351 delta_general->num_missed_bcon,
352 max_general->num_missed_bcon);
353 pos += scnprintf(buf + pos, bufsz - pos,
354 " %-30s %10u %10u %10u %10u\n",
355 "adc_rx_saturation_time:",
356 le32_to_cpu(general->adc_rx_saturation_time),
357 accum_general->adc_rx_saturation_time,
358 delta_general->adc_rx_saturation_time,
359 max_general->adc_rx_saturation_time);
360 pos += scnprintf(buf + pos, bufsz - pos,
361 " %-30s %10u %10u %10u %10u\n",
362 "ina_detect_search_tm:",
363 le32_to_cpu(general->ina_detection_search_time),
364 accum_general->ina_detection_search_time,
365 delta_general->ina_detection_search_time,
366 max_general->ina_detection_search_time);
367 pos += scnprintf(buf + pos, bufsz - pos,
368 " %-30s %10u %10u %10u %10u\n",
369 "beacon_silence_rssi_a:",
370 le32_to_cpu(general->beacon_silence_rssi_a),
371 accum_general->beacon_silence_rssi_a,
372 delta_general->beacon_silence_rssi_a,
373 max_general->beacon_silence_rssi_a);
374 pos += scnprintf(buf + pos, bufsz - pos,
375 " %-30s %10u %10u %10u %10u\n",
376 "beacon_silence_rssi_b:",
377 le32_to_cpu(general->beacon_silence_rssi_b),
378 accum_general->beacon_silence_rssi_b,
379 delta_general->beacon_silence_rssi_b,
380 max_general->beacon_silence_rssi_b);
381 pos += scnprintf(buf + pos, bufsz - pos,
382 " %-30s %10u %10u %10u %10u\n",
383 "beacon_silence_rssi_c:",
384 le32_to_cpu(general->beacon_silence_rssi_c),
385 accum_general->beacon_silence_rssi_c,
386 delta_general->beacon_silence_rssi_c,
387 max_general->beacon_silence_rssi_c);
388 pos += scnprintf(buf + pos, bufsz - pos,
389 " %-30s %10u %10u %10u %10u\n",
390 "interference_data_flag:",
391 le32_to_cpu(general->interference_data_flag),
392 accum_general->interference_data_flag,
393 delta_general->interference_data_flag,
394 max_general->interference_data_flag);
395 pos += scnprintf(buf + pos, bufsz - pos,
396 " %-30s %10u %10u %10u %10u\n",
397 "channel_load:",
398 le32_to_cpu(general->channel_load),
399 accum_general->channel_load,
400 delta_general->channel_load,
401 max_general->channel_load);
402 pos += scnprintf(buf + pos, bufsz - pos,
403 " %-30s %10u %10u %10u %10u\n",
404 "dsp_false_alarms:",
405 le32_to_cpu(general->dsp_false_alarms),
406 accum_general->dsp_false_alarms,
407 delta_general->dsp_false_alarms,
408 max_general->dsp_false_alarms);
409 pos += scnprintf(buf + pos, bufsz - pos,
410 " %-30s %10u %10u %10u %10u\n",
411 "beacon_rssi_a:",
412 le32_to_cpu(general->beacon_rssi_a),
413 accum_general->beacon_rssi_a,
414 delta_general->beacon_rssi_a,
415 max_general->beacon_rssi_a);
416 pos += scnprintf(buf + pos, bufsz - pos,
417 " %-30s %10u %10u %10u %10u\n",
418 "beacon_rssi_b:",
419 le32_to_cpu(general->beacon_rssi_b),
420 accum_general->beacon_rssi_b,
421 delta_general->beacon_rssi_b,
422 max_general->beacon_rssi_b);
423 pos += scnprintf(buf + pos, bufsz - pos,
424 " %-30s %10u %10u %10u %10u\n",
425 "beacon_rssi_c:",
426 le32_to_cpu(general->beacon_rssi_c),
427 accum_general->beacon_rssi_c,
428 delta_general->beacon_rssi_c,
429 max_general->beacon_rssi_c);
430 pos += scnprintf(buf + pos, bufsz - pos,
431 " %-30s %10u %10u %10u %10u\n",
432 "beacon_energy_a:",
433 le32_to_cpu(general->beacon_energy_a),
434 accum_general->beacon_energy_a,
435 delta_general->beacon_energy_a,
436 max_general->beacon_energy_a);
437 pos += scnprintf(buf + pos, bufsz - pos,
438 " %-30s %10u %10u %10u %10u\n",
439 "beacon_energy_b:",
440 le32_to_cpu(general->beacon_energy_b),
441 accum_general->beacon_energy_b,
442 delta_general->beacon_energy_b,
443 max_general->beacon_energy_b);
444 pos += scnprintf(buf + pos, bufsz - pos,
445 " %-30s %10u %10u %10u %10u\n",
446 "beacon_energy_c:",
447 le32_to_cpu(general->beacon_energy_c),
448 accum_general->beacon_energy_c,
449 delta_general->beacon_energy_c,
450 max_general->beacon_energy_c);
451
452 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
453 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
454 "acumulative delta max\n",
455 "Statistics_Rx - OFDM_HT:");
456 pos += scnprintf(buf + pos, bufsz - pos,
457 " %-30s %10u %10u %10u %10u\n",
458 "plcp_err:",
459 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
460 delta_ht->plcp_err, max_ht->plcp_err);
461 pos += scnprintf(buf + pos, bufsz - pos,
462 " %-30s %10u %10u %10u %10u\n",
463 "overrun_err:",
464 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
465 delta_ht->overrun_err, max_ht->overrun_err);
466 pos += scnprintf(buf + pos, bufsz - pos,
467 " %-30s %10u %10u %10u %10u\n",
468 "early_overrun_err:",
469 le32_to_cpu(ht->early_overrun_err),
470 accum_ht->early_overrun_err,
471 delta_ht->early_overrun_err,
472 max_ht->early_overrun_err);
473 pos += scnprintf(buf + pos, bufsz - pos,
474 " %-30s %10u %10u %10u %10u\n",
475 "crc32_good:",
476 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
477 delta_ht->crc32_good, max_ht->crc32_good);
478 pos += scnprintf(buf + pos, bufsz - pos,
479 " %-30s %10u %10u %10u %10u\n",
480 "crc32_err:",
481 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
482 delta_ht->crc32_err, max_ht->crc32_err);
483 pos += scnprintf(buf + pos, bufsz - pos,
484 " %-30s %10u %10u %10u %10u\n",
485 "mh_format_err:",
486 le32_to_cpu(ht->mh_format_err),
487 accum_ht->mh_format_err,
488 delta_ht->mh_format_err, max_ht->mh_format_err);
489 pos += scnprintf(buf + pos, bufsz - pos,
490 " %-30s %10u %10u %10u %10u\n",
491 "agg_crc32_good:",
492 le32_to_cpu(ht->agg_crc32_good),
493 accum_ht->agg_crc32_good,
494 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
495 pos += scnprintf(buf + pos, bufsz - pos,
496 " %-30s %10u %10u %10u %10u\n",
497 "agg_mpdu_cnt:",
498 le32_to_cpu(ht->agg_mpdu_cnt),
499 accum_ht->agg_mpdu_cnt,
500 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
501 pos += scnprintf(buf + pos, bufsz - pos,
502 " %-30s %10u %10u %10u %10u\n",
503 "agg_cnt:",
504 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
505 delta_ht->agg_cnt, max_ht->agg_cnt);
506 pos += scnprintf(buf + pos, bufsz - pos,
507 " %-30s %10u %10u %10u %10u\n",
508 "unsupport_mcs:",
509 le32_to_cpu(ht->unsupport_mcs),
510 accum_ht->unsupport_mcs,
511 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
512
513 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
514 kfree(buf);
515 return ret;
516}
517
518ssize_t iwl_ucode_tx_stats_read(struct file *file,
519 char __user *user_buf,
520 size_t count, loff_t *ppos)
521{
522 struct iwl_priv *priv = file->private_data;
523 int pos = 0;
524 char *buf;
525 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
526 ssize_t ret;
527 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
528
529 if (!iwl_is_alive(priv))
530 return -EAGAIN;
531
532 buf = kzalloc(bufsz, GFP_KERNEL);
533 if (!buf) {
534 IWL_ERR(priv, "Can not allocate Buffer\n");
535 return -ENOMEM;
536 }
537
538 /* the statistic information display here is based on
539 * the last statistics notification from uCode
540 * might not reflect the current uCode activity
541 */
542 tx = &priv->statistics.tx;
543 accum_tx = &priv->accum_statistics.tx;
544 delta_tx = &priv->delta_statistics.tx;
545 max_tx = &priv->max_delta.tx;
546 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
547 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
548 "acumulative delta max\n",
549 "Statistics_Tx:");
550 pos += scnprintf(buf + pos, bufsz - pos,
551 " %-30s %10u %10u %10u %10u\n",
552 "preamble:",
553 le32_to_cpu(tx->preamble_cnt),
554 accum_tx->preamble_cnt,
555 delta_tx->preamble_cnt, max_tx->preamble_cnt);
556 pos += scnprintf(buf + pos, bufsz - pos,
557 " %-30s %10u %10u %10u %10u\n",
558 "rx_detected_cnt:",
559 le32_to_cpu(tx->rx_detected_cnt),
560 accum_tx->rx_detected_cnt,
561 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
562 pos += scnprintf(buf + pos, bufsz - pos,
563 " %-30s %10u %10u %10u %10u\n",
564 "bt_prio_defer_cnt:",
565 le32_to_cpu(tx->bt_prio_defer_cnt),
566 accum_tx->bt_prio_defer_cnt,
567 delta_tx->bt_prio_defer_cnt,
568 max_tx->bt_prio_defer_cnt);
569 pos += scnprintf(buf + pos, bufsz - pos,
570 " %-30s %10u %10u %10u %10u\n",
571 "bt_prio_kill_cnt:",
572 le32_to_cpu(tx->bt_prio_kill_cnt),
573 accum_tx->bt_prio_kill_cnt,
574 delta_tx->bt_prio_kill_cnt,
575 max_tx->bt_prio_kill_cnt);
576 pos += scnprintf(buf + pos, bufsz - pos,
577 " %-30s %10u %10u %10u %10u\n",
578 "few_bytes_cnt:",
579 le32_to_cpu(tx->few_bytes_cnt),
580 accum_tx->few_bytes_cnt,
581 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
582 pos += scnprintf(buf + pos, bufsz - pos,
583 " %-30s %10u %10u %10u %10u\n",
584 "cts_timeout:",
585 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
586 delta_tx->cts_timeout, max_tx->cts_timeout);
587 pos += scnprintf(buf + pos, bufsz - pos,
588 " %-30s %10u %10u %10u %10u\n",
589 "ack_timeout:",
590 le32_to_cpu(tx->ack_timeout),
591 accum_tx->ack_timeout,
592 delta_tx->ack_timeout, max_tx->ack_timeout);
593 pos += scnprintf(buf + pos, bufsz - pos,
594 " %-30s %10u %10u %10u %10u\n",
595 "expected_ack_cnt:",
596 le32_to_cpu(tx->expected_ack_cnt),
597 accum_tx->expected_ack_cnt,
598 delta_tx->expected_ack_cnt,
599 max_tx->expected_ack_cnt);
600 pos += scnprintf(buf + pos, bufsz - pos,
601 " %-30s %10u %10u %10u %10u\n",
602 "actual_ack_cnt:",
603 le32_to_cpu(tx->actual_ack_cnt),
604 accum_tx->actual_ack_cnt,
605 delta_tx->actual_ack_cnt,
606 max_tx->actual_ack_cnt);
607 pos += scnprintf(buf + pos, bufsz - pos,
608 " %-30s %10u %10u %10u %10u\n",
609 "dump_msdu_cnt:",
610 le32_to_cpu(tx->dump_msdu_cnt),
611 accum_tx->dump_msdu_cnt,
612 delta_tx->dump_msdu_cnt,
613 max_tx->dump_msdu_cnt);
614 pos += scnprintf(buf + pos, bufsz - pos,
615 " %-30s %10u %10u %10u %10u\n",
616 "abort_nxt_frame_mismatch:",
617 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
618 accum_tx->burst_abort_next_frame_mismatch_cnt,
619 delta_tx->burst_abort_next_frame_mismatch_cnt,
620 max_tx->burst_abort_next_frame_mismatch_cnt);
621 pos += scnprintf(buf + pos, bufsz - pos,
622 " %-30s %10u %10u %10u %10u\n",
623 "abort_missing_nxt_frame:",
624 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
625 accum_tx->burst_abort_missing_next_frame_cnt,
626 delta_tx->burst_abort_missing_next_frame_cnt,
627 max_tx->burst_abort_missing_next_frame_cnt);
628 pos += scnprintf(buf + pos, bufsz - pos,
629 " %-30s %10u %10u %10u %10u\n",
630 "cts_timeout_collision:",
631 le32_to_cpu(tx->cts_timeout_collision),
632 accum_tx->cts_timeout_collision,
633 delta_tx->cts_timeout_collision,
634 max_tx->cts_timeout_collision);
635 pos += scnprintf(buf + pos, bufsz - pos,
636 " %-30s %10u %10u %10u %10u\n",
637 "ack_ba_timeout_collision:",
638 le32_to_cpu(tx->ack_or_ba_timeout_collision),
639 accum_tx->ack_or_ba_timeout_collision,
640 delta_tx->ack_or_ba_timeout_collision,
641 max_tx->ack_or_ba_timeout_collision);
642 pos += scnprintf(buf + pos, bufsz - pos,
643 " %-30s %10u %10u %10u %10u\n",
644 "agg ba_timeout:",
645 le32_to_cpu(tx->agg.ba_timeout),
646 accum_tx->agg.ba_timeout,
647 delta_tx->agg.ba_timeout,
648 max_tx->agg.ba_timeout);
649 pos += scnprintf(buf + pos, bufsz - pos,
650 " %-30s %10u %10u %10u %10u\n",
651 "agg ba_resched_frames:",
652 le32_to_cpu(tx->agg.ba_reschedule_frames),
653 accum_tx->agg.ba_reschedule_frames,
654 delta_tx->agg.ba_reschedule_frames,
655 max_tx->agg.ba_reschedule_frames);
656 pos += scnprintf(buf + pos, bufsz - pos,
657 " %-30s %10u %10u %10u %10u\n",
658 "agg scd_query_agg_frame:",
659 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
660 accum_tx->agg.scd_query_agg_frame_cnt,
661 delta_tx->agg.scd_query_agg_frame_cnt,
662 max_tx->agg.scd_query_agg_frame_cnt);
663 pos += scnprintf(buf + pos, bufsz - pos,
664 " %-30s %10u %10u %10u %10u\n",
665 "agg scd_query_no_agg:",
666 le32_to_cpu(tx->agg.scd_query_no_agg),
667 accum_tx->agg.scd_query_no_agg,
668 delta_tx->agg.scd_query_no_agg,
669 max_tx->agg.scd_query_no_agg);
670 pos += scnprintf(buf + pos, bufsz - pos,
671 " %-30s %10u %10u %10u %10u\n",
672 "agg scd_query_agg:",
673 le32_to_cpu(tx->agg.scd_query_agg),
674 accum_tx->agg.scd_query_agg,
675 delta_tx->agg.scd_query_agg,
676 max_tx->agg.scd_query_agg);
677 pos += scnprintf(buf + pos, bufsz - pos,
678 " %-30s %10u %10u %10u %10u\n",
679 "agg scd_query_mismatch:",
680 le32_to_cpu(tx->agg.scd_query_mismatch),
681 accum_tx->agg.scd_query_mismatch,
682 delta_tx->agg.scd_query_mismatch,
683 max_tx->agg.scd_query_mismatch);
684 pos += scnprintf(buf + pos, bufsz - pos,
685 " %-30s %10u %10u %10u %10u\n",
686 "agg frame_not_ready:",
687 le32_to_cpu(tx->agg.frame_not_ready),
688 accum_tx->agg.frame_not_ready,
689 delta_tx->agg.frame_not_ready,
690 max_tx->agg.frame_not_ready);
691 pos += scnprintf(buf + pos, bufsz - pos,
692 " %-30s %10u %10u %10u %10u\n",
693 "agg underrun:",
694 le32_to_cpu(tx->agg.underrun),
695 accum_tx->agg.underrun,
696 delta_tx->agg.underrun, max_tx->agg.underrun);
697 pos += scnprintf(buf + pos, bufsz - pos,
698 " %-30s %10u %10u %10u %10u\n",
699 "agg bt_prio_kill:",
700 le32_to_cpu(tx->agg.bt_prio_kill),
701 accum_tx->agg.bt_prio_kill,
702 delta_tx->agg.bt_prio_kill,
703 max_tx->agg.bt_prio_kill);
704 pos += scnprintf(buf + pos, bufsz - pos,
705 " %-30s %10u %10u %10u %10u\n",
706 "agg rx_ba_rsp_cnt:",
707 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
708 accum_tx->agg.rx_ba_rsp_cnt,
709 delta_tx->agg.rx_ba_rsp_cnt,
710 max_tx->agg.rx_ba_rsp_cnt);
711
712 if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
713 pos += scnprintf(buf + pos, bufsz - pos,
714 "tx power: (1/2 dB step)\n");
715 if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
716 pos += scnprintf(buf + pos, bufsz - pos,
717 "\tantenna A: 0x%X\n",
718 tx->tx_power.ant_a);
719 if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
720 pos += scnprintf(buf + pos, bufsz - pos,
721 "\tantenna B: 0x%X\n",
722 tx->tx_power.ant_b);
723 if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
724 pos += scnprintf(buf + pos, bufsz - pos,
725 "\tantenna C: 0x%X\n",
726 tx->tx_power.ant_c);
727 }
728 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
729 kfree(buf);
730 return ret;
731}
732
733ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
734 size_t count, loff_t *ppos)
735{
736 struct iwl_priv *priv = file->private_data;
737 int pos = 0;
738 char *buf;
739 int bufsz = sizeof(struct statistics_general) * 10 + 300;
740 ssize_t ret;
741 struct statistics_general *general, *accum_general;
742 struct statistics_general *delta_general, *max_general;
743 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
744 struct statistics_div *div, *accum_div, *delta_div, *max_div;
745
746 if (!iwl_is_alive(priv))
747 return -EAGAIN;
748
749 buf = kzalloc(bufsz, GFP_KERNEL);
750 if (!buf) {
751 IWL_ERR(priv, "Can not allocate Buffer\n");
752 return -ENOMEM;
753 }
754
755 /* the statistic information display here is based on
756 * the last statistics notification from uCode
757 * might not reflect the current uCode activity
758 */
759 general = &priv->statistics.general;
760 dbg = &priv->statistics.general.dbg;
761 div = &priv->statistics.general.div;
762 accum_general = &priv->accum_statistics.general;
763 delta_general = &priv->delta_statistics.general;
764 max_general = &priv->max_delta.general;
765 accum_dbg = &priv->accum_statistics.general.dbg;
766 delta_dbg = &priv->delta_statistics.general.dbg;
767 max_dbg = &priv->max_delta.general.dbg;
768 accum_div = &priv->accum_statistics.general.div;
769 delta_div = &priv->delta_statistics.general.div;
770 max_div = &priv->max_delta.general.div;
771 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
772 pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
773 "acumulative delta max\n",
774 "Statistics_General:");
775 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
776 "temperature:",
777 le32_to_cpu(general->temperature));
778 pos += scnprintf(buf + pos, bufsz - pos, " %-30s %10u\n",
779 "temperature_m:",
780 le32_to_cpu(general->temperature_m));
781 pos += scnprintf(buf + pos, bufsz - pos,
782 " %-30s %10u %10u %10u %10u\n",
783 "burst_check:",
784 le32_to_cpu(dbg->burst_check),
785 accum_dbg->burst_check,
786 delta_dbg->burst_check, max_dbg->burst_check);
787 pos += scnprintf(buf + pos, bufsz - pos,
788 " %-30s %10u %10u %10u %10u\n",
789 "burst_count:",
790 le32_to_cpu(dbg->burst_count),
791 accum_dbg->burst_count,
792 delta_dbg->burst_count, max_dbg->burst_count);
793 pos += scnprintf(buf + pos, bufsz - pos,
794 " %-30s %10u %10u %10u %10u\n",
795 "sleep_time:",
796 le32_to_cpu(general->sleep_time),
797 accum_general->sleep_time,
798 delta_general->sleep_time, max_general->sleep_time);
799 pos += scnprintf(buf + pos, bufsz - pos,
800 " %-30s %10u %10u %10u %10u\n",
801 "slots_out:",
802 le32_to_cpu(general->slots_out),
803 accum_general->slots_out,
804 delta_general->slots_out, max_general->slots_out);
805 pos += scnprintf(buf + pos, bufsz - pos,
806 " %-30s %10u %10u %10u %10u\n",
807 "slots_idle:",
808 le32_to_cpu(general->slots_idle),
809 accum_general->slots_idle,
810 delta_general->slots_idle, max_general->slots_idle);
811 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
812 le32_to_cpu(general->ttl_timestamp));
813 pos += scnprintf(buf + pos, bufsz - pos,
814 " %-30s %10u %10u %10u %10u\n",
815 "tx_on_a:",
816 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
817 delta_div->tx_on_a, max_div->tx_on_a);
818 pos += scnprintf(buf + pos, bufsz - pos,
819 " %-30s %10u %10u %10u %10u\n",
820 "tx_on_b:",
821 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
822 delta_div->tx_on_b, max_div->tx_on_b);
823 pos += scnprintf(buf + pos, bufsz - pos,
824 " %-30s %10u %10u %10u %10u\n",
825 "exec_time:",
826 le32_to_cpu(div->exec_time), accum_div->exec_time,
827 delta_div->exec_time, max_div->exec_time);
828 pos += scnprintf(buf + pos, bufsz - pos,
829 " %-30s %10u %10u %10u %10u\n",
830 "probe_time:",
831 le32_to_cpu(div->probe_time), accum_div->probe_time,
832 delta_div->probe_time, max_div->probe_time);
833 pos += scnprintf(buf + pos, bufsz - pos,
834 " %-30s %10u %10u %10u %10u\n",
835 "rx_enable_counter:",
836 le32_to_cpu(general->rx_enable_counter),
837 accum_general->rx_enable_counter,
838 delta_general->rx_enable_counter,
839 max_general->rx_enable_counter);
840 pos += scnprintf(buf + pos, bufsz - pos,
841 " %-30s %10u %10u %10u %10u\n",
842 "num_of_sos_states:",
843 le32_to_cpu(general->num_of_sos_states),
844 accum_general->num_of_sos_states,
845 delta_general->num_of_sos_states,
846 max_general->num_of_sos_states);
847 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
848 kfree(buf);
849 return ret;
850}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
new file mode 100644
index 000000000000..59b1f25f0d85
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.h
@@ -0,0 +1,56 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include "iwl-dev.h"
30#include "iwl-core.h"
31#include "iwl-debug.h"
32
33#ifdef CONFIG_IWLWIFI_DEBUGFS
34ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
35 size_t count, loff_t *ppos);
36ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
37 size_t count, loff_t *ppos);
38ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
39 size_t count, loff_t *ppos);
40#else
41static ssize_t iwl_ucode_rx_stats_read(struct file *file, char __user *user_buf,
42 size_t count, loff_t *ppos)
43{
44 return 0;
45}
46static ssize_t iwl_ucode_tx_stats_read(struct file *file, char __user *user_buf,
47 size_t count, loff_t *ppos)
48{
49 return 0;
50}
51static ssize_t iwl_ucode_general_stats_read(struct file *file, char __user *user_buf,
52 size_t count, loff_t *ppos)
53{
54 return 0;
55}
56#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
new file mode 100644
index 000000000000..44ef5d93befc
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -0,0 +1,276 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-agn.h"
39
40static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
41{
42 int ret = 0;
43 struct iwl5000_rxon_assoc_cmd rxon_assoc;
44 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
45 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
46
47 if ((rxon1->flags == rxon2->flags) &&
48 (rxon1->filter_flags == rxon2->filter_flags) &&
49 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
50 (rxon1->ofdm_ht_single_stream_basic_rates ==
51 rxon2->ofdm_ht_single_stream_basic_rates) &&
52 (rxon1->ofdm_ht_dual_stream_basic_rates ==
53 rxon2->ofdm_ht_dual_stream_basic_rates) &&
54 (rxon1->ofdm_ht_triple_stream_basic_rates ==
55 rxon2->ofdm_ht_triple_stream_basic_rates) &&
56 (rxon1->acquisition_data == rxon2->acquisition_data) &&
57 (rxon1->rx_chain == rxon2->rx_chain) &&
58 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
59 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
60 return 0;
61 }
62
63 rxon_assoc.flags = priv->staging_rxon.flags;
64 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
65 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
66 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
67 rxon_assoc.reserved1 = 0;
68 rxon_assoc.reserved2 = 0;
69 rxon_assoc.reserved3 = 0;
70 rxon_assoc.ofdm_ht_single_stream_basic_rates =
71 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
72 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
73 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
74 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
75 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
76 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
77 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
78
79 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
80 sizeof(rxon_assoc), &rxon_assoc, NULL);
81 if (ret)
82 return ret;
83
84 return ret;
85}
86
87static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
88{
89 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
90 .valid = cpu_to_le32(valid_tx_ant),
91 };
92
93 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
94 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
95 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
96 sizeof(struct iwl_tx_ant_config_cmd),
97 &tx_ant_cmd);
98 } else {
99 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
100 return -EOPNOTSUPP;
101 }
102}
103
104/* Currently this is the superset of everything */
105static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
106{
107 return len;
108}
109
110static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
111{
112 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
113 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
114 memcpy(addsta, cmd, size);
115 /* resrved in 5000 */
116 addsta->rate_n_flags = cpu_to_le16(0);
117 return size;
118}
119
120static void iwlagn_gain_computation(struct iwl_priv *priv,
121 u32 average_noise[NUM_RX_CHAINS],
122 u16 min_average_noise_antenna_i,
123 u32 min_average_noise,
124 u8 default_chain)
125{
126 int i;
127 s32 delta_g;
128 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
129
130 /*
131 * Find Gain Code for the chains based on "default chain"
132 */
133 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
134 if ((data->disconn_array[i])) {
135 data->delta_gain_code[i] = 0;
136 continue;
137 }
138
139 delta_g = (priv->cfg->chain_noise_scale *
140 ((s32)average_noise[default_chain] -
141 (s32)average_noise[i])) / 1500;
142
143 /* bound gain by 2 bits value max, 3rd bit is sign */
144 data->delta_gain_code[i] =
145 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
146
147 if (delta_g < 0)
148 /*
149 * set negative sign ...
150 * note to Intel developers: This is uCode API format,
151 * not the format of any internal device registers.
152 * Do not change this format for e.g. 6050 or similar
153 * devices. Change format only if more resolution
154 * (i.e. more than 2 bits magnitude) is needed.
155 */
156 data->delta_gain_code[i] |= (1 << 2);
157 }
158
159 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
160 data->delta_gain_code[1], data->delta_gain_code[2]);
161
162 if (!data->radio_write) {
163 struct iwl_calib_chain_noise_gain_cmd cmd;
164
165 memset(&cmd, 0, sizeof(cmd));
166
167 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
168 cmd.hdr.first_group = 0;
169 cmd.hdr.groups_num = 1;
170 cmd.hdr.data_valid = 1;
171 cmd.delta_gain_1 = data->delta_gain_code[1];
172 cmd.delta_gain_2 = data->delta_gain_code[2];
173 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
174 sizeof(cmd), &cmd, NULL);
175
176 data->radio_write = 1;
177 data->state = IWL_CHAIN_NOISE_CALIBRATED;
178 }
179
180 data->chain_noise_a = 0;
181 data->chain_noise_b = 0;
182 data->chain_noise_c = 0;
183 data->chain_signal_a = 0;
184 data->chain_signal_b = 0;
185 data->chain_signal_c = 0;
186 data->beacon_count = 0;
187}
188
189static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
190{
191 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
192 int ret;
193
194 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
195 struct iwl_calib_chain_noise_reset_cmd cmd;
196 memset(&cmd, 0, sizeof(cmd));
197
198 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
199 cmd.hdr.first_group = 0;
200 cmd.hdr.groups_num = 1;
201 cmd.hdr.data_valid = 1;
202 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
203 sizeof(cmd), &cmd);
204 if (ret)
205 IWL_ERR(priv,
206 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
207 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
208 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
209 }
210}
211
212static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
213 __le32 *tx_flags)
214{
215 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
216 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
217 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
218 else
219 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
220}
221
222/* Calc max signal level (dBm) among 3 possible receivers */
223static int iwlagn_calc_rssi(struct iwl_priv *priv,
224 struct iwl_rx_phy_res *rx_resp)
225{
226 /* data from PHY/DSP regarding signal strength, etc.,
227 * contents are always there, not configurable by host
228 */
229 struct iwl5000_non_cfg_phy *ncphy =
230 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
231 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
232 u8 agc;
233
234 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
235 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
236
237 /* Find max rssi among 3 possible receivers.
238 * These values are measured by the digital signal processor (DSP).
239 * They should stay fairly constant even as the signal strength varies,
240 * if the radio's automatic gain control (AGC) is working right.
241 * AGC value (see below) will provide the "interesting" info.
242 */
243 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
244 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
245 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
246 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
247 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
248
249 max_rssi = max_t(u32, rssi_a, rssi_b);
250 max_rssi = max_t(u32, max_rssi, rssi_c);
251
252 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
253 rssi_a, rssi_b, rssi_c, max_rssi, agc);
254
255 /* dBm = max_rssi dB - agc dB - constant.
256 * Higher AGC (higher radio gain) means lower signal. */
257 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
258}
259
260struct iwl_hcmd_ops iwlagn_hcmd = {
261 .rxon_assoc = iwlagn_send_rxon_assoc,
262 .commit_rxon = iwl_commit_rxon,
263 .set_rxon_chain = iwl_set_rxon_chain,
264 .set_tx_ant = iwlagn_send_tx_ant_config,
265 .send_bt_config = iwl_send_bt_config,
266};
267
268struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
269 .get_hcmd_size = iwlagn_get_hcmd_size,
270 .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
271 .gain_computation = iwlagn_gain_computation,
272 .chain_noise_reset = iwlagn_chain_noise_reset,
273 .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
274 .calc_rssi = iwlagn_calc_rssi,
275 .request_scan = iwlagn_request_scan,
276};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 000000000000..f9a3fbb6338f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,118 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
65 */
66
67#ifndef __iwl_agn_hw_h__
68#define __iwl_agn_hw_h__
69
70#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
71#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
72
73#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
74#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
75
76#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
77 IWLAGN_RTC_INST_LOWER_BOUND)
78#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
79 IWLAGN_RTC_DATA_LOWER_BOUND)
80
81/* RSSI to dBm */
82#define IWLAGN_RSSI_OFFSET 44
83
84/* PCI registers */
85#define PCI_CFG_RETRY_TIMEOUT 0x041
86
87/* PCI register values */
88#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
89#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
90
91#define IWLAGN_DEFAULT_TX_RETRY 15
92
93/* Limit range of txpower output target to be between these values */
94#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
95#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
96
97/* EEPROM */
98#define IWLAGN_EEPROM_IMG_SIZE 2048
99
100#define IWLAGN_CMD_FIFO_NUM 7
101#define IWLAGN_NUM_QUEUES 20
102#define IWLAGN_NUM_AMPDU_QUEUES 10
103#define IWLAGN_FIRST_AMPDU_QUEUE 10
104
105/* Fixed (non-configurable) rx data from phy */
106
107/**
108 * struct iwlagn_schedq_bc_tbl scheduler byte count table
109 * base physical address provided by SCD_DRAM_BASE_ADDR
110 * @tfd_offset 0-12 - tx command byte count
111 * 12-16 - station index
112 */
113struct iwlagn_scd_bc_tbl {
114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
115} __attribute__ ((packed));
116
117
118#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
new file mode 100644
index 000000000000..a273e373b7b0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -0,0 +1,307 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-agn.h"
38#include "iwl-helpers.h"
39
40#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
41
42/* Free dram table */
43void iwl_free_isr_ict(struct iwl_priv *priv)
44{
45 if (priv->_agn.ict_tbl_vir) {
46 dma_free_coherent(&priv->pci_dev->dev,
47 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
48 priv->_agn.ict_tbl_vir,
49 priv->_agn.ict_tbl_dma);
50 priv->_agn.ict_tbl_vir = NULL;
51 }
52}
53
54
55/* allocate dram shared table it is a PAGE_SIZE aligned
56 * also reset all data related to ICT table interrupt.
57 */
58int iwl_alloc_isr_ict(struct iwl_priv *priv)
59{
60
61 if (priv->cfg->use_isr_legacy)
62 return 0;
63 /* allocate shrared data table */
64 priv->_agn.ict_tbl_vir =
65 dma_alloc_coherent(&priv->pci_dev->dev,
66 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
67 &priv->_agn.ict_tbl_dma, GFP_KERNEL);
68 if (!priv->_agn.ict_tbl_vir)
69 return -ENOMEM;
70
71 /* align table to PAGE_SIZE boundry */
72 priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
73
74 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
75 (unsigned long long)priv->_agn.ict_tbl_dma,
76 (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
77 (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
78
79 priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
80 (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
81
82 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
83 priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
84 (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
85
86 /* reset table and index to all 0 */
87 memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
88 priv->_agn.ict_index = 0;
89
90 /* add periodic RX interrupt */
91 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
92 return 0;
93}
94
95/* Device is going up inform it about using ICT interrupt table,
96 * also we need to tell the driver to start using ICT interrupt.
97 */
98int iwl_reset_ict(struct iwl_priv *priv)
99{
100 u32 val;
101 unsigned long flags;
102
103 if (!priv->_agn.ict_tbl_vir)
104 return 0;
105
106 spin_lock_irqsave(&priv->lock, flags);
107 iwl_disable_interrupts(priv);
108
109 memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
110
111 val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
112
113 val |= CSR_DRAM_INT_TBL_ENABLE;
114 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
115
116 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
117 "aligned dma address %Lx\n",
118 val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
119
120 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
121 priv->_agn.use_ict = true;
122 priv->_agn.ict_index = 0;
123 iwl_write32(priv, CSR_INT, priv->inta_mask);
124 iwl_enable_interrupts(priv);
125 spin_unlock_irqrestore(&priv->lock, flags);
126
127 return 0;
128}
129
130/* Device is going down disable ict interrupt usage */
131void iwl_disable_ict(struct iwl_priv *priv)
132{
133 unsigned long flags;
134
135 spin_lock_irqsave(&priv->lock, flags);
136 priv->_agn.use_ict = false;
137 spin_unlock_irqrestore(&priv->lock, flags);
138}
139
140static irqreturn_t iwl_isr(int irq, void *data)
141{
142 struct iwl_priv *priv = data;
143 u32 inta, inta_mask;
144 unsigned long flags;
145#ifdef CONFIG_IWLWIFI_DEBUG
146 u32 inta_fh;
147#endif
148 if (!priv)
149 return IRQ_NONE;
150
151 spin_lock_irqsave(&priv->lock, flags);
152
153 /* Disable (but don't clear!) interrupts here to avoid
154 * back-to-back ISRs and sporadic interrupts from our NIC.
155 * If we have something to service, the tasklet will re-enable ints.
156 * If we *don't* have something, we'll re-enable before leaving here. */
157 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
158 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
159
160 /* Discover which interrupts are active/pending */
161 inta = iwl_read32(priv, CSR_INT);
162
163 /* Ignore interrupt if there's nothing in NIC to service.
164 * This may be due to IRQ shared with another device,
165 * or due to sporadic interrupts thrown from our NIC. */
166 if (!inta) {
167 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
168 goto none;
169 }
170
171 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
172 /* Hardware disappeared. It might have already raised
173 * an interrupt */
174 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
175 goto unplugged;
176 }
177
178#ifdef CONFIG_IWLWIFI_DEBUG
179 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
180 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
181 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
182 "fh 0x%08x\n", inta, inta_mask, inta_fh);
183 }
184#endif
185
186 priv->_agn.inta |= inta;
187 /* iwl_irq_tasklet() will service interrupts and re-enable them */
188 if (likely(inta))
189 tasklet_schedule(&priv->irq_tasklet);
190 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
191 iwl_enable_interrupts(priv);
192
193 unplugged:
194 spin_unlock_irqrestore(&priv->lock, flags);
195 return IRQ_HANDLED;
196
197 none:
198 /* re-enable interrupts here since we don't have anything to service. */
199 /* only Re-enable if diabled by irq and no schedules tasklet. */
200 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
201 iwl_enable_interrupts(priv);
202
203 spin_unlock_irqrestore(&priv->lock, flags);
204 return IRQ_NONE;
205}
206
207/* interrupt handler using ict table, with this interrupt driver will
208 * stop using INTA register to get device's interrupt, reading this register
209 * is expensive, device will write interrupts in ICT dram table, increment
210 * index then will fire interrupt to driver, driver will OR all ICT table
211 * entries from current index up to table entry with 0 value. the result is
212 * the interrupt we need to service, driver will set the entries back to 0 and
213 * set index.
214 */
215irqreturn_t iwl_isr_ict(int irq, void *data)
216{
217 struct iwl_priv *priv = data;
218 u32 inta, inta_mask;
219 u32 val = 0;
220 unsigned long flags;
221
222 if (!priv)
223 return IRQ_NONE;
224
225 /* dram interrupt table not set yet,
226 * use legacy interrupt.
227 */
228 if (!priv->_agn.use_ict)
229 return iwl_isr(irq, data);
230
231 spin_lock_irqsave(&priv->lock, flags);
232
233 /* Disable (but don't clear!) interrupts here to avoid
234 * back-to-back ISRs and sporadic interrupts from our NIC.
235 * If we have something to service, the tasklet will re-enable ints.
236 * If we *don't* have something, we'll re-enable before leaving here.
237 */
238 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
239 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
240
241
242 /* Ignore interrupt if there's nothing in NIC to service.
243 * This may be due to IRQ shared with another device,
244 * or due to sporadic interrupts thrown from our NIC. */
245 if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
246 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
247 goto none;
248 }
249
250 /* read all entries that not 0 start with ict_index */
251 while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
252
253 val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
254 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
255 priv->_agn.ict_index,
256 le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
257 priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
258 priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
259 ICT_COUNT);
260
261 }
262
263 /* We should not get this value, just ignore it. */
264 if (val == 0xffffffff)
265 val = 0;
266
267 /*
268 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
269 * (bit 15 before shifting it to 31) to clear when using interrupt
270 * coalescing. fortunately, bits 18 and 19 stay set when this happens
271 * so we use them to decide on the real state of the Rx bit.
272 * In order words, bit 15 is set if bit 18 or bit 19 are set.
273 */
274 if (val & 0xC0000)
275 val |= 0x8000;
276
277 inta = (0xff & val) | ((0xff00 & val) << 16);
278 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
279 inta, inta_mask, val);
280
281 inta &= priv->inta_mask;
282 priv->_agn.inta |= inta;
283
284 /* iwl_irq_tasklet() will service interrupts and re-enable them */
285 if (likely(inta))
286 tasklet_schedule(&priv->irq_tasklet);
287 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
288 /* Allow interrupt if was disabled by this handler and
289 * no tasklet was schedules, We should not enable interrupt,
290 * tasklet will enable it.
291 */
292 iwl_enable_interrupts(priv);
293 }
294
295 spin_unlock_irqrestore(&priv->lock, flags);
296 return IRQ_HANDLED;
297
298 none:
299 /* re-enable interrupts here since we don't have anything to service.
300 * only Re-enable if disabled by irq.
301 */
302 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
303 iwl_enable_interrupts(priv);
304
305 spin_unlock_irqrestore(&priv->lock, flags);
306 return IRQ_NONE;
307}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 000000000000..1004cfc403b1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,1530 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41#include "iwl-sta.h"
42
43static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
44{
45 return le32_to_cpup((__le32 *)&tx_resp->status +
46 tx_resp->frame_count) & MAX_SN;
47}
48
49static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
50 struct iwl_ht_agg *agg,
51 struct iwl5000_tx_resp *tx_resp,
52 int txq_id, u16 start_idx)
53{
54 u16 status;
55 struct agg_tx_status *frame_status = &tx_resp->status;
56 struct ieee80211_tx_info *info = NULL;
57 struct ieee80211_hdr *hdr = NULL;
58 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
59 int i, sh, idx;
60 u16 seq;
61
62 if (agg->wait_for_ba)
63 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
64
65 agg->frame_count = tx_resp->frame_count;
66 agg->start_idx = start_idx;
67 agg->rate_n_flags = rate_n_flags;
68 agg->bitmap = 0;
69
70 /* # frames attempted by Tx command */
71 if (agg->frame_count == 1) {
72 /* Only one frame was attempted; no block-ack will arrive */
73 status = le16_to_cpu(frame_status[0].status);
74 idx = start_idx;
75
76 /* FIXME: code repetition */
77 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
78 agg->frame_count, agg->start_idx, idx);
79
80 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
81 info->status.rates[0].count = tx_resp->failure_frame + 1;
82 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
83 info->flags |= iwl_tx_status_to_mac80211(status);
84 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
85
86 /* FIXME: code repetition end */
87
88 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
89 status & 0xff, tx_resp->failure_frame);
90 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
91
92 agg->wait_for_ba = 0;
93 } else {
94 /* Two or more frames were attempted; expect block-ack */
95 u64 bitmap = 0;
96 int start = agg->start_idx;
97
98 /* Construct bit-map of pending frames within Tx window */
99 for (i = 0; i < agg->frame_count; i++) {
100 u16 sc;
101 status = le16_to_cpu(frame_status[i].status);
102 seq = le16_to_cpu(frame_status[i].sequence);
103 idx = SEQ_TO_INDEX(seq);
104 txq_id = SEQ_TO_QUEUE(seq);
105
106 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
107 AGG_TX_STATE_ABORT_MSK))
108 continue;
109
110 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
111 agg->frame_count, txq_id, idx);
112
113 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
114 if (!hdr) {
115 IWL_ERR(priv,
116 "BUG_ON idx doesn't point to valid skb"
117 " idx=%d, txq_id=%d\n", idx, txq_id);
118 return -1;
119 }
120
121 sc = le16_to_cpu(hdr->seq_ctrl);
122 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
123 IWL_ERR(priv,
124 "BUG_ON idx doesn't match seq control"
125 " idx=%d, seq_idx=%d, seq=%d\n",
126 idx, SEQ_TO_SN(sc),
127 hdr->seq_ctrl);
128 return -1;
129 }
130
131 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
132 i, idx, SEQ_TO_SN(sc));
133
134 sh = idx - start;
135 if (sh > 64) {
136 sh = (start - idx) + 0xff;
137 bitmap = bitmap << sh;
138 sh = 0;
139 start = idx;
140 } else if (sh < -64)
141 sh = 0xff - (start - idx);
142 else if (sh < 0) {
143 sh = start - idx;
144 start = idx;
145 bitmap = bitmap << sh;
146 sh = 0;
147 }
148 bitmap |= 1ULL << sh;
149 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
150 start, (unsigned long long)bitmap);
151 }
152
153 agg->bitmap = bitmap;
154 agg->start_idx = start;
155 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
156 agg->frame_count, agg->start_idx,
157 (unsigned long long)agg->bitmap);
158
159 if (bitmap)
160 agg->wait_for_ba = 1;
161 }
162 return 0;
163}
164
165void iwl_check_abort_status(struct iwl_priv *priv,
166 u8 frame_count, u32 status)
167{
168 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
169 IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
170 }
171}
172
173static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
174 struct iwl_rx_mem_buffer *rxb)
175{
176 struct iwl_rx_packet *pkt = rxb_addr(rxb);
177 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
178 int txq_id = SEQ_TO_QUEUE(sequence);
179 int index = SEQ_TO_INDEX(sequence);
180 struct iwl_tx_queue *txq = &priv->txq[txq_id];
181 struct ieee80211_tx_info *info;
182 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
183 u32 status = le16_to_cpu(tx_resp->status.status);
184 int tid;
185 int sta_id;
186 int freed;
187
188 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
189 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
190 "is out of range [0-%d] %d %d\n", txq_id,
191 index, txq->q.n_bd, txq->q.write_ptr,
192 txq->q.read_ptr);
193 return;
194 }
195
196 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
197 memset(&info->status, 0, sizeof(info->status));
198
199 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
200 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
201
202 if (txq->sched_retry) {
203 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
204 struct iwl_ht_agg *agg = NULL;
205
206 agg = &priv->stations[sta_id].tid[tid].agg;
207
208 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
209
210 /* check if BAR is needed */
211 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
212 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
213
214 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
215 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
216 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
217 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
218 scd_ssn , index, txq_id, txq->swq_id);
219
220 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
221 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
222
223 if (priv->mac80211_registered &&
224 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
225 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
226 if (agg->state == IWL_AGG_OFF)
227 iwl_wake_queue(priv, txq_id);
228 else
229 iwl_wake_queue(priv, txq->swq_id);
230 }
231 }
232 } else {
233 BUG_ON(txq_id != txq->swq_id);
234
235 info->status.rates[0].count = tx_resp->failure_frame + 1;
236 info->flags |= iwl_tx_status_to_mac80211(status);
237 iwlagn_hwrate_to_tx_control(priv,
238 le32_to_cpu(tx_resp->rate_n_flags),
239 info);
240
241 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
242 "0x%x retries %d\n",
243 txq_id,
244 iwl_get_tx_fail_reason(status), status,
245 le32_to_cpu(tx_resp->rate_n_flags),
246 tx_resp->failure_frame);
247
248 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
249 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
250
251 if (priv->mac80211_registered &&
252 (iwl_queue_space(&txq->q) > txq->q.low_mark))
253 iwl_wake_queue(priv, txq_id);
254 }
255
256 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
257
258 iwl_check_abort_status(priv, tx_resp->frame_count, status);
259}
260
261void iwlagn_rx_handler_setup(struct iwl_priv *priv)
262{
263 /* init calibration handlers */
264 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
265 iwlagn_rx_calib_result;
266 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
267 iwlagn_rx_calib_complete;
268 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
269}
270
271void iwlagn_setup_deferred_work(struct iwl_priv *priv)
272{
273 /* in agn, the tx power calibration is done in uCode */
274 priv->disable_tx_power_cal = 1;
275}
276
277int iwlagn_hw_valid_rtc_data_addr(u32 addr)
278{
279 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
280 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
281}
282
283int iwlagn_send_tx_power(struct iwl_priv *priv)
284{
285 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
286 u8 tx_ant_cfg_cmd;
287
288 /* half dBm need to multiply */
289 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
290
291 if (priv->tx_power_lmt_in_half_dbm &&
292 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
293 /*
294 * For the newer devices which using enhanced/extend tx power
295 * table in EEPROM, the format is in half dBm. driver need to
296 * convert to dBm format before report to mac80211.
297 * By doing so, there is a possibility of 1/2 dBm resolution
298 * lost. driver will perform "round-up" operation before
299 * reporting, but it will cause 1/2 dBm tx power over the
300 * regulatory limit. Perform the checking here, if the
301 * "tx_power_user_lmt" is higher than EEPROM value (in
302 * half-dBm format), lower the tx power based on EEPROM
303 */
304 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
305 }
306 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
307 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
308
309 if (IWL_UCODE_API(priv->ucode_ver) == 1)
310 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
311 else
312 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
313
314 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
315 sizeof(tx_power_cmd), &tx_power_cmd,
316 NULL);
317}
318
319void iwlagn_temperature(struct iwl_priv *priv)
320{
321 /* store temperature from statistics (in Celsius) */
322 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
323 iwl_tt_handler(priv);
324}
325
326u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
327{
328 struct iwl_eeprom_calib_hdr {
329 u8 version;
330 u8 pa_type;
331 u16 voltage;
332 } *hdr;
333
334 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
335 EEPROM_CALIB_ALL);
336 return hdr->version;
337
338}
339
340/*
341 * EEPROM
342 */
343static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
344{
345 u16 offset = 0;
346
347 if ((address & INDIRECT_ADDRESS) == 0)
348 return address;
349
350 switch (address & INDIRECT_TYPE_MSK) {
351 case INDIRECT_HOST:
352 offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
353 break;
354 case INDIRECT_GENERAL:
355 offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
356 break;
357 case INDIRECT_REGULATORY:
358 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
359 break;
360 case INDIRECT_CALIBRATION:
361 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
362 break;
363 case INDIRECT_PROCESS_ADJST:
364 offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
365 break;
366 case INDIRECT_OTHERS:
367 offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
368 break;
369 default:
370 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
371 address & INDIRECT_TYPE_MSK);
372 break;
373 }
374
375 /* translate the offset from words to byte */
376 return (address & ADDRESS_MSK) + (offset << 1);
377}
378
379const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
380 size_t offset)
381{
382 u32 address = eeprom_indirect_address(priv, offset);
383 BUG_ON(address >= priv->cfg->eeprom_size);
384 return &priv->eeprom[address];
385}
386
387struct iwl_mod_params iwlagn_mod_params = {
388 .amsdu_size_8K = 1,
389 .restart_fw = 1,
390 /* the rest are 0 by default */
391};
392
393void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
394{
395 unsigned long flags;
396 int i;
397 spin_lock_irqsave(&rxq->lock, flags);
398 INIT_LIST_HEAD(&rxq->rx_free);
399 INIT_LIST_HEAD(&rxq->rx_used);
400 /* Fill the rx_used queue with _all_ of the Rx buffers */
401 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
402 /* In the reset function, these buffers may have been allocated
403 * to an SKB, so we need to unmap and free potential storage */
404 if (rxq->pool[i].page != NULL) {
405 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
406 PAGE_SIZE << priv->hw_params.rx_page_order,
407 PCI_DMA_FROMDEVICE);
408 __iwl_free_pages(priv, rxq->pool[i].page);
409 rxq->pool[i].page = NULL;
410 }
411 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
412 }
413
414 for (i = 0; i < RX_QUEUE_SIZE; i++)
415 rxq->queue[i] = NULL;
416
417 /* Set us so that we have processed and used all buffers, but have
418 * not restocked the Rx queue with fresh buffers */
419 rxq->read = rxq->write = 0;
420 rxq->write_actual = 0;
421 rxq->free_count = 0;
422 spin_unlock_irqrestore(&rxq->lock, flags);
423}
424
425int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
426{
427 u32 rb_size;
428 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
429 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
430
431 if (!priv->cfg->use_isr_legacy)
432 rb_timeout = RX_RB_TIMEOUT;
433
434 if (priv->cfg->mod_params->amsdu_size_8K)
435 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
436 else
437 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
438
439 /* Stop Rx DMA */
440 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
441
442 /* Reset driver's Rx queue write index */
443 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
444
445 /* Tell device where to find RBD circular buffer in DRAM */
446 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
447 (u32)(rxq->dma_addr >> 8));
448
449 /* Tell device where in DRAM to update its Rx status */
450 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
451 rxq->rb_stts_dma >> 4);
452
453 /* Enable Rx DMA
454 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
455 * the credit mechanism in 5000 HW RX FIFO
456 * Direct rx interrupts to hosts
457 * Rx buffer size 4 or 8k
458 * RB timeout 0x10
459 * 256 RBDs
460 */
461 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
462 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
463 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
464 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
465 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
466 rb_size|
467 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
468 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
469
470 /* Set interrupt coalescing timer to default (2048 usecs) */
471 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
472
473 return 0;
474}
475
476int iwlagn_hw_nic_init(struct iwl_priv *priv)
477{
478 unsigned long flags;
479 struct iwl_rx_queue *rxq = &priv->rxq;
480 int ret;
481
482 /* nic_init */
483 spin_lock_irqsave(&priv->lock, flags);
484 priv->cfg->ops->lib->apm_ops.init(priv);
485
486 /* Set interrupt coalescing calibration timer to default (512 usecs) */
487 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
488
489 spin_unlock_irqrestore(&priv->lock, flags);
490
491 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
492
493 priv->cfg->ops->lib->apm_ops.config(priv);
494
495 /* Allocate the RX queue, or reset if it is already allocated */
496 if (!rxq->bd) {
497 ret = iwl_rx_queue_alloc(priv);
498 if (ret) {
499 IWL_ERR(priv, "Unable to initialize Rx queue\n");
500 return -ENOMEM;
501 }
502 } else
503 iwlagn_rx_queue_reset(priv, rxq);
504
505 iwlagn_rx_replenish(priv);
506
507 iwlagn_rx_init(priv, rxq);
508
509 spin_lock_irqsave(&priv->lock, flags);
510
511 rxq->need_update = 1;
512 iwl_rx_queue_update_write_ptr(priv, rxq);
513
514 spin_unlock_irqrestore(&priv->lock, flags);
515
516 /* Allocate or reset and init all Tx and Command queues */
517 if (!priv->txq) {
518 ret = iwlagn_txq_ctx_alloc(priv);
519 if (ret)
520 return ret;
521 } else
522 iwlagn_txq_ctx_reset(priv);
523
524 set_bit(STATUS_INIT, &priv->status);
525
526 return 0;
527}
528
529/**
530 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
531 */
532static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
533 dma_addr_t dma_addr)
534{
535 return cpu_to_le32((u32)(dma_addr >> 8));
536}
537
538/**
539 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
540 *
541 * If there are slots in the RX queue that need to be restocked,
542 * and we have free pre-allocated buffers, fill the ranks as much
543 * as we can, pulling from rx_free.
544 *
545 * This moves the 'write' index forward to catch up with 'processed', and
546 * also updates the memory address in the firmware to reference the new
547 * target buffer.
548 */
549void iwlagn_rx_queue_restock(struct iwl_priv *priv)
550{
551 struct iwl_rx_queue *rxq = &priv->rxq;
552 struct list_head *element;
553 struct iwl_rx_mem_buffer *rxb;
554 unsigned long flags;
555
556 spin_lock_irqsave(&rxq->lock, flags);
557 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
558 /* The overwritten rxb must be a used one */
559 rxb = rxq->queue[rxq->write];
560 BUG_ON(rxb && rxb->page);
561
562 /* Get next free Rx buffer, remove from free list */
563 element = rxq->rx_free.next;
564 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
565 list_del(element);
566
567 /* Point to Rx buffer via next RBD in circular buffer */
568 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
569 rxb->page_dma);
570 rxq->queue[rxq->write] = rxb;
571 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
572 rxq->free_count--;
573 }
574 spin_unlock_irqrestore(&rxq->lock, flags);
575 /* If the pre-allocated buffer pool is dropping low, schedule to
576 * refill it */
577 if (rxq->free_count <= RX_LOW_WATERMARK)
578 queue_work(priv->workqueue, &priv->rx_replenish);
579
580
581 /* If we've added more space for the firmware to place data, tell it.
582 * Increment device's write pointer in multiples of 8. */
583 if (rxq->write_actual != (rxq->write & ~0x7)) {
584 spin_lock_irqsave(&rxq->lock, flags);
585 rxq->need_update = 1;
586 spin_unlock_irqrestore(&rxq->lock, flags);
587 iwl_rx_queue_update_write_ptr(priv, rxq);
588 }
589}
590
591/**
592 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
593 *
594 * When moving to rx_free an SKB is allocated for the slot.
595 *
596 * Also restock the Rx queue via iwl_rx_queue_restock.
597 * This is called as a scheduled work item (except for during initialization)
598 */
599void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
600{
601 struct iwl_rx_queue *rxq = &priv->rxq;
602 struct list_head *element;
603 struct iwl_rx_mem_buffer *rxb;
604 struct page *page;
605 unsigned long flags;
606 gfp_t gfp_mask = priority;
607
608 while (1) {
609 spin_lock_irqsave(&rxq->lock, flags);
610 if (list_empty(&rxq->rx_used)) {
611 spin_unlock_irqrestore(&rxq->lock, flags);
612 return;
613 }
614 spin_unlock_irqrestore(&rxq->lock, flags);
615
616 if (rxq->free_count > RX_LOW_WATERMARK)
617 gfp_mask |= __GFP_NOWARN;
618
619 if (priv->hw_params.rx_page_order > 0)
620 gfp_mask |= __GFP_COMP;
621
622 /* Alloc a new receive buffer */
623 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
624 if (!page) {
625 if (net_ratelimit())
626 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
627 "order: %d\n",
628 priv->hw_params.rx_page_order);
629
630 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
631 net_ratelimit())
632 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
633 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
634 rxq->free_count);
635 /* We don't reschedule replenish work here -- we will
636 * call the restock method and if it still needs
637 * more buffers it will schedule replenish */
638 return;
639 }
640
641 spin_lock_irqsave(&rxq->lock, flags);
642
643 if (list_empty(&rxq->rx_used)) {
644 spin_unlock_irqrestore(&rxq->lock, flags);
645 __free_pages(page, priv->hw_params.rx_page_order);
646 return;
647 }
648 element = rxq->rx_used.next;
649 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
650 list_del(element);
651
652 spin_unlock_irqrestore(&rxq->lock, flags);
653
654 BUG_ON(rxb->page);
655 rxb->page = page;
656 /* Get physical address of the RB */
657 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
658 PAGE_SIZE << priv->hw_params.rx_page_order,
659 PCI_DMA_FROMDEVICE);
660 /* dma address must be no more than 36 bits */
661 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
662 /* and also 256 byte aligned! */
663 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
664
665 spin_lock_irqsave(&rxq->lock, flags);
666
667 list_add_tail(&rxb->list, &rxq->rx_free);
668 rxq->free_count++;
669 priv->alloc_rxb_page++;
670
671 spin_unlock_irqrestore(&rxq->lock, flags);
672 }
673}
674
675void iwlagn_rx_replenish(struct iwl_priv *priv)
676{
677 unsigned long flags;
678
679 iwlagn_rx_allocate(priv, GFP_KERNEL);
680
681 spin_lock_irqsave(&priv->lock, flags);
682 iwlagn_rx_queue_restock(priv);
683 spin_unlock_irqrestore(&priv->lock, flags);
684}
685
686void iwlagn_rx_replenish_now(struct iwl_priv *priv)
687{
688 iwlagn_rx_allocate(priv, GFP_ATOMIC);
689
690 iwlagn_rx_queue_restock(priv);
691}
692
693/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
694 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
695 * This free routine walks the list of POOL entries and if SKB is set to
696 * non NULL it is unmapped and freed
697 */
698void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
699{
700 int i;
701 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
702 if (rxq->pool[i].page != NULL) {
703 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
704 PAGE_SIZE << priv->hw_params.rx_page_order,
705 PCI_DMA_FROMDEVICE);
706 __iwl_free_pages(priv, rxq->pool[i].page);
707 rxq->pool[i].page = NULL;
708 }
709 }
710
711 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
712 rxq->dma_addr);
713 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
714 rxq->rb_stts, rxq->rb_stts_dma);
715 rxq->bd = NULL;
716 rxq->rb_stts = NULL;
717}
718
719int iwlagn_rxq_stop(struct iwl_priv *priv)
720{
721
722 /* stop Rx DMA */
723 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
724 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
725 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
726
727 return 0;
728}
729
730int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
731{
732 int idx = 0;
733 int band_offset = 0;
734
735 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
736 if (rate_n_flags & RATE_MCS_HT_MSK) {
737 idx = (rate_n_flags & 0xff);
738 return idx;
739 /* Legacy rate format, search for match in table */
740 } else {
741 if (band == IEEE80211_BAND_5GHZ)
742 band_offset = IWL_FIRST_OFDM_RATE;
743 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
744 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
745 return idx - band_offset;
746 }
747
748 return -1;
749}
750
751/* Calc max signal level (dBm) among 3 possible receivers */
752static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
753 struct iwl_rx_phy_res *rx_resp)
754{
755 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
756}
757
758#ifdef CONFIG_IWLWIFI_DEBUG
759/**
760 * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
761 *
762 * You may hack this function to show different aspects of received frames,
763 * including selective frame dumps.
764 * group100 parameter selects whether to show 1 out of 100 good data frames.
765 * All beacon and probe response frames are printed.
766 */
767static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
768 struct iwl_rx_phy_res *phy_res, u16 length,
769 struct ieee80211_hdr *header, int group100)
770{
771 u32 to_us;
772 u32 print_summary = 0;
773 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
774 u32 hundred = 0;
775 u32 dataframe = 0;
776 __le16 fc;
777 u16 seq_ctl;
778 u16 channel;
779 u16 phy_flags;
780 u32 rate_n_flags;
781 u32 tsf_low;
782 int rssi;
783
784 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
785 return;
786
787 /* MAC header */
788 fc = header->frame_control;
789 seq_ctl = le16_to_cpu(header->seq_ctrl);
790
791 /* metadata */
792 channel = le16_to_cpu(phy_res->channel);
793 phy_flags = le16_to_cpu(phy_res->phy_flags);
794 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
795
796 /* signal statistics */
797 rssi = iwlagn_calc_rssi(priv, phy_res);
798 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
799
800 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
801
802 /* if data frame is to us and all is good,
803 * (optionally) print summary for only 1 out of every 100 */
804 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
805 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
806 dataframe = 1;
807 if (!group100)
808 print_summary = 1; /* print each frame */
809 else if (priv->framecnt_to_us < 100) {
810 priv->framecnt_to_us++;
811 print_summary = 0;
812 } else {
813 priv->framecnt_to_us = 0;
814 print_summary = 1;
815 hundred = 1;
816 }
817 } else {
818 /* print summary for all other frames */
819 print_summary = 1;
820 }
821
822 if (print_summary) {
823 char *title;
824 int rate_idx;
825 u32 bitrate;
826
827 if (hundred)
828 title = "100Frames";
829 else if (ieee80211_has_retry(fc))
830 title = "Retry";
831 else if (ieee80211_is_assoc_resp(fc))
832 title = "AscRsp";
833 else if (ieee80211_is_reassoc_resp(fc))
834 title = "RasRsp";
835 else if (ieee80211_is_probe_resp(fc)) {
836 title = "PrbRsp";
837 print_dump = 1; /* dump frame contents */
838 } else if (ieee80211_is_beacon(fc)) {
839 title = "Beacon";
840 print_dump = 1; /* dump frame contents */
841 } else if (ieee80211_is_atim(fc))
842 title = "ATIM";
843 else if (ieee80211_is_auth(fc))
844 title = "Auth";
845 else if (ieee80211_is_deauth(fc))
846 title = "DeAuth";
847 else if (ieee80211_is_disassoc(fc))
848 title = "DisAssoc";
849 else
850 title = "Frame";
851
852 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
853 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
854 bitrate = 0;
855 WARN_ON_ONCE(1);
856 } else {
857 bitrate = iwl_rates[rate_idx].ieee / 2;
858 }
859
860 /* print frame summary.
861 * MAC addresses show just the last byte (for brevity),
862 * but you can hack it to show more, if you'd like to. */
863 if (dataframe)
864 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
865 "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
866 title, le16_to_cpu(fc), header->addr1[5],
867 length, rssi, channel, bitrate);
868 else {
869 /* src/dst addresses assume managed mode */
870 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
871 "len=%u, rssi=%d, tim=%lu usec, "
872 "phy=0x%02x, chnl=%d\n",
873 title, le16_to_cpu(fc), header->addr1[5],
874 header->addr3[5], length, rssi,
875 tsf_low - priv->scan_start_tsf,
876 phy_flags, channel);
877 }
878 }
879 if (print_dump)
880 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
881}
882#endif
883
884static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
885{
886 u32 decrypt_out = 0;
887
888 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
889 RX_RES_STATUS_STATION_FOUND)
890 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
891 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
892
893 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
894
895 /* packet was not encrypted */
896 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
897 RX_RES_STATUS_SEC_TYPE_NONE)
898 return decrypt_out;
899
900 /* packet was encrypted with unknown alg */
901 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
902 RX_RES_STATUS_SEC_TYPE_ERR)
903 return decrypt_out;
904
905 /* decryption was not done in HW */
906 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
907 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
908 return decrypt_out;
909
910 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
911
912 case RX_RES_STATUS_SEC_TYPE_CCMP:
913 /* alg is CCM: check MIC only */
914 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
915 /* Bad MIC */
916 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
917 else
918 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
919
920 break;
921
922 case RX_RES_STATUS_SEC_TYPE_TKIP:
923 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
924 /* Bad TTAK */
925 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
926 break;
927 }
928 /* fall through if TTAK OK */
929 default:
930 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
931 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
932 else
933 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
934 break;
935 }
936
937 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
938 decrypt_in, decrypt_out);
939
940 return decrypt_out;
941}
942
943static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
944 struct ieee80211_hdr *hdr,
945 u16 len,
946 u32 ampdu_status,
947 struct iwl_rx_mem_buffer *rxb,
948 struct ieee80211_rx_status *stats)
949{
950 struct sk_buff *skb;
951 __le16 fc = hdr->frame_control;
952
953 /* We only process data packets if the interface is open */
954 if (unlikely(!priv->is_open)) {
955 IWL_DEBUG_DROP_LIMIT(priv,
956 "Dropping packet while interface is not open.\n");
957 return;
958 }
959
960 /* In case of HW accelerated crypto and bad decryption, drop */
961 if (!priv->cfg->mod_params->sw_crypto &&
962 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
963 return;
964
965 skb = dev_alloc_skb(128);
966 if (!skb) {
967 IWL_ERR(priv, "dev_alloc_skb failed\n");
968 return;
969 }
970
971 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
972
973 iwl_update_stats(priv, false, fc, len);
974 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
975
976 ieee80211_rx(priv->hw, skb);
977 priv->alloc_rxb_page--;
978 rxb->page = NULL;
979}
980
981/* Called for REPLY_RX (legacy ABG frames), or
982 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
983void iwlagn_rx_reply_rx(struct iwl_priv *priv,
984 struct iwl_rx_mem_buffer *rxb)
985{
986 struct ieee80211_hdr *header;
987 struct ieee80211_rx_status rx_status;
988 struct iwl_rx_packet *pkt = rxb_addr(rxb);
989 struct iwl_rx_phy_res *phy_res;
990 __le32 rx_pkt_status;
991 struct iwl4965_rx_mpdu_res_start *amsdu;
992 u32 len;
993 u32 ampdu_status;
994 u32 rate_n_flags;
995
996 /**
997 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
998 * REPLY_RX: physical layer info is in this buffer
999 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1000 * command and cached in priv->last_phy_res
1001 *
1002 * Here we set up local variables depending on which command is
1003 * received.
1004 */
1005 if (pkt->hdr.cmd == REPLY_RX) {
1006 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1007 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1008 + phy_res->cfg_phy_cnt);
1009
1010 len = le16_to_cpu(phy_res->byte_count);
1011 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1012 phy_res->cfg_phy_cnt + len);
1013 ampdu_status = le32_to_cpu(rx_pkt_status);
1014 } else {
1015 if (!priv->_agn.last_phy_res_valid) {
1016 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1017 return;
1018 }
1019 phy_res = &priv->_agn.last_phy_res;
1020 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1021 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1022 len = le16_to_cpu(amsdu->byte_count);
1023 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1024 ampdu_status = iwlagn_translate_rx_status(priv,
1025 le32_to_cpu(rx_pkt_status));
1026 }
1027
1028 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1029 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1030 phy_res->cfg_phy_cnt);
1031 return;
1032 }
1033
1034 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1035 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1036 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1037 le32_to_cpu(rx_pkt_status));
1038 return;
1039 }
1040
1041 /* This will be used in several places later */
1042 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1043
1044 /* rx_status carries information about the packet to mac80211 */
1045 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1046 rx_status.freq =
1047 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1048 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1049 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1050 rx_status.rate_idx =
1051 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1052 rx_status.flag = 0;
1053
1054 /* TSF isn't reliable. In order to allow smooth user experience,
1055 * this W/A doesn't propagate it to the mac80211 */
1056 /*rx_status.flag |= RX_FLAG_TSFT;*/
1057
1058 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1059
1060 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1061 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1062
1063#ifdef CONFIG_IWLWIFI_DEBUG
1064 /* Set "1" to report good data frames in groups of 100 */
1065 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1066 iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
1067#endif
1068 iwl_dbg_log_rx_data_frame(priv, len, header);
1069 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1070 rx_status.signal, (unsigned long long)rx_status.mactime);
1071
1072 /*
1073 * "antenna number"
1074 *
1075 * It seems that the antenna field in the phy flags value
1076 * is actually a bit field. This is undefined by radiotap,
1077 * it wants an actual antenna number but I always get "7"
1078 * for most legacy frames I receive indicating that the
1079 * same frame was received on all three RX chains.
1080 *
1081 * I think this field should be removed in favor of a
1082 * new 802.11n radiotap field "RX chains" that is defined
1083 * as a bitmask.
1084 */
1085 rx_status.antenna =
1086 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1087 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1088
1089 /* set the preamble flag if appropriate */
1090 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1091 rx_status.flag |= RX_FLAG_SHORTPRE;
1092
1093 /* Set up the HT phy flags */
1094 if (rate_n_flags & RATE_MCS_HT_MSK)
1095 rx_status.flag |= RX_FLAG_HT;
1096 if (rate_n_flags & RATE_MCS_HT40_MSK)
1097 rx_status.flag |= RX_FLAG_40MHZ;
1098 if (rate_n_flags & RATE_MCS_SGI_MSK)
1099 rx_status.flag |= RX_FLAG_SHORT_GI;
1100
1101 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1102 rxb, &rx_status);
1103}
1104
1105/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1106 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1107void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1108 struct iwl_rx_mem_buffer *rxb)
1109{
1110 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1111 priv->_agn.last_phy_res_valid = true;
1112 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1113 sizeof(struct iwl_rx_phy_res));
1114}
1115
1116static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
1117 struct ieee80211_vif *vif,
1118 enum ieee80211_band band,
1119 struct iwl_scan_channel *scan_ch)
1120{
1121 const struct ieee80211_supported_band *sband;
1122 const struct iwl_channel_info *ch_info;
1123 u16 passive_dwell = 0;
1124 u16 active_dwell = 0;
1125 int i, added = 0;
1126 u16 channel = 0;
1127
1128 sband = iwl_get_hw_mode(priv, band);
1129 if (!sband) {
1130 IWL_ERR(priv, "invalid band\n");
1131 return added;
1132 }
1133
1134 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
1135 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1136
1137 if (passive_dwell <= active_dwell)
1138 passive_dwell = active_dwell + 1;
1139
1140 /* only scan single channel, good enough to reset the RF */
1141 /* pick the first valid not in-use channel */
1142 if (band == IEEE80211_BAND_5GHZ) {
1143 for (i = 14; i < priv->channel_count; i++) {
1144 if (priv->channel_info[i].channel !=
1145 le16_to_cpu(priv->staging_rxon.channel)) {
1146 channel = priv->channel_info[i].channel;
1147 ch_info = iwl_get_channel_info(priv,
1148 band, channel);
1149 if (is_channel_valid(ch_info))
1150 break;
1151 }
1152 }
1153 } else {
1154 for (i = 0; i < 14; i++) {
1155 if (priv->channel_info[i].channel !=
1156 le16_to_cpu(priv->staging_rxon.channel)) {
1157 channel =
1158 priv->channel_info[i].channel;
1159 ch_info = iwl_get_channel_info(priv,
1160 band, channel);
1161 if (is_channel_valid(ch_info))
1162 break;
1163 }
1164 }
1165 }
1166 if (channel) {
1167 scan_ch->channel = cpu_to_le16(channel);
1168 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1169 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1170 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1171 /* Set txpower levels to defaults */
1172 scan_ch->dsp_atten = 110;
1173 if (band == IEEE80211_BAND_5GHZ)
1174 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1175 else
1176 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1177 added++;
1178 } else
1179 IWL_ERR(priv, "no valid channel found\n");
1180 return added;
1181}
1182
1183static int iwl_get_channels_for_scan(struct iwl_priv *priv,
1184 struct ieee80211_vif *vif,
1185 enum ieee80211_band band,
1186 u8 is_active, u8 n_probes,
1187 struct iwl_scan_channel *scan_ch)
1188{
1189 struct ieee80211_channel *chan;
1190 const struct ieee80211_supported_band *sband;
1191 const struct iwl_channel_info *ch_info;
1192 u16 passive_dwell = 0;
1193 u16 active_dwell = 0;
1194 int added, i;
1195 u16 channel;
1196
1197 sband = iwl_get_hw_mode(priv, band);
1198 if (!sband)
1199 return 0;
1200
1201 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1202 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1203
1204 if (passive_dwell <= active_dwell)
1205 passive_dwell = active_dwell + 1;
1206
1207 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
1208 chan = priv->scan_request->channels[i];
1209
1210 if (chan->band != band)
1211 continue;
1212
1213 channel = ieee80211_frequency_to_channel(chan->center_freq);
1214 scan_ch->channel = cpu_to_le16(channel);
1215
1216 ch_info = iwl_get_channel_info(priv, band, channel);
1217 if (!is_channel_valid(ch_info)) {
1218 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
1219 channel);
1220 continue;
1221 }
1222
1223 if (!is_active || is_channel_passive(ch_info) ||
1224 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
1225 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
1226 else
1227 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
1228
1229 if (n_probes)
1230 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
1231
1232 scan_ch->active_dwell = cpu_to_le16(active_dwell);
1233 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
1234
1235 /* Set txpower levels to defaults */
1236 scan_ch->dsp_atten = 110;
1237
1238 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
1239 * power level:
1240 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
1241 */
1242 if (band == IEEE80211_BAND_5GHZ)
1243 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
1244 else
1245 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
1246
1247 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
1248 channel, le32_to_cpu(scan_ch->type),
1249 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1250 "ACTIVE" : "PASSIVE",
1251 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
1252 active_dwell : passive_dwell);
1253
1254 scan_ch++;
1255 added++;
1256 }
1257
1258 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1259 return added;
1260}
1261
1262void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1263{
1264 struct iwl_host_cmd cmd = {
1265 .id = REPLY_SCAN_CMD,
1266 .len = sizeof(struct iwl_scan_cmd),
1267 .flags = CMD_SIZE_HUGE,
1268 };
1269 struct iwl_scan_cmd *scan;
1270 struct ieee80211_conf *conf = NULL;
1271 u32 rate_flags = 0;
1272 u16 cmd_len;
1273 u16 rx_chain = 0;
1274 enum ieee80211_band band;
1275 u8 n_probes = 0;
1276 u8 rx_ant = priv->hw_params.valid_rx_ant;
1277 u8 rate;
1278 bool is_active = false;
1279 int chan_mod;
1280 u8 active_chains;
1281
1282 conf = ieee80211_get_hw_conf(priv->hw);
1283
1284 cancel_delayed_work(&priv->scan_check);
1285
1286 if (!iwl_is_ready(priv)) {
1287 IWL_WARN(priv, "request scan called when driver not ready.\n");
1288 goto done;
1289 }
1290
1291 /* Make sure the scan wasn't canceled before this queued work
1292 * was given the chance to run... */
1293 if (!test_bit(STATUS_SCANNING, &priv->status))
1294 goto done;
1295
1296 /* This should never be called or scheduled if there is currently
1297 * a scan active in the hardware. */
1298 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
1299 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
1300 "Ignoring second request.\n");
1301 goto done;
1302 }
1303
1304 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
1305 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
1306 goto done;
1307 }
1308
1309 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1310 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
1311 goto done;
1312 }
1313
1314 if (iwl_is_rfkill(priv)) {
1315 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
1316 goto done;
1317 }
1318
1319 if (!test_bit(STATUS_READY, &priv->status)) {
1320 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
1321 goto done;
1322 }
1323
1324 if (!priv->scan_cmd) {
1325 priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
1326 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
1327 if (!priv->scan_cmd) {
1328 IWL_DEBUG_SCAN(priv,
1329 "fail to allocate memory for scan\n");
1330 goto done;
1331 }
1332 }
1333 scan = priv->scan_cmd;
1334 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
1335
1336 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
1337 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
1338
1339 if (iwl_is_associated(priv)) {
1340 u16 interval = 0;
1341 u32 extra;
1342 u32 suspend_time = 100;
1343 u32 scan_suspend_time = 100;
1344 unsigned long flags;
1345
1346 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
1347 spin_lock_irqsave(&priv->lock, flags);
1348 interval = vif ? vif->bss_conf.beacon_int : 0;
1349 spin_unlock_irqrestore(&priv->lock, flags);
1350
1351 scan->suspend_time = 0;
1352 scan->max_out_time = cpu_to_le32(200 * 1024);
1353 if (!interval)
1354 interval = suspend_time;
1355
1356 extra = (suspend_time / interval) << 22;
1357 scan_suspend_time = (extra |
1358 ((suspend_time % interval) * 1024));
1359 scan->suspend_time = cpu_to_le32(scan_suspend_time);
1360 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
1361 scan_suspend_time, interval);
1362 }
1363
1364 if (priv->is_internal_short_scan) {
1365 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
1366 } else if (priv->scan_request->n_ssids) {
1367 int i, p = 0;
1368 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
1369 for (i = 0; i < priv->scan_request->n_ssids; i++) {
1370 /* always does wildcard anyway */
1371 if (!priv->scan_request->ssids[i].ssid_len)
1372 continue;
1373 scan->direct_scan[p].id = WLAN_EID_SSID;
1374 scan->direct_scan[p].len =
1375 priv->scan_request->ssids[i].ssid_len;
1376 memcpy(scan->direct_scan[p].ssid,
1377 priv->scan_request->ssids[i].ssid,
1378 priv->scan_request->ssids[i].ssid_len);
1379 n_probes++;
1380 p++;
1381 }
1382 is_active = true;
1383 } else
1384 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
1385
1386 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
1387 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
1388 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
1389
1390 switch (priv->scan_band) {
1391 case IEEE80211_BAND_2GHZ:
1392 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
1393 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
1394 >> RXON_FLG_CHANNEL_MODE_POS;
1395 if (chan_mod == CHANNEL_MODE_PURE_40) {
1396 rate = IWL_RATE_6M_PLCP;
1397 } else {
1398 rate = IWL_RATE_1M_PLCP;
1399 rate_flags = RATE_MCS_CCK_MSK;
1400 }
1401 scan->good_CRC_th = IWL_GOOD_CRC_TH_DISABLED;
1402 break;
1403 case IEEE80211_BAND_5GHZ:
1404 rate = IWL_RATE_6M_PLCP;
1405 /*
1406 * If active scanning is requested but a certain channel is
1407 * marked passive, we can do active scanning if we detect
1408 * transmissions.
1409 *
1410 * There is an issue with some firmware versions that triggers
1411 * a sysassert on a "good CRC threshold" of zero (== disabled),
1412 * on a radar channel even though this means that we should NOT
1413 * send probes.
1414 *
1415 * The "good CRC threshold" is the number of frames that we
1416 * need to receive during our dwell time on a channel before
1417 * sending out probes -- setting this to a huge value will
1418 * mean we never reach it, but at the same time work around
1419 * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
1420 * here instead of IWL_GOOD_CRC_TH_DISABLED.
1421 */
1422 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
1423 IWL_GOOD_CRC_TH_NEVER;
1424 break;
1425 default:
1426 IWL_WARN(priv, "Invalid scan band count\n");
1427 goto done;
1428 }
1429
1430 band = priv->scan_band;
1431
1432 if (priv->cfg->scan_antennas[band])
1433 rx_ant = priv->cfg->scan_antennas[band];
1434
1435 priv->scan_tx_ant[band] =
1436 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
1437 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
1438 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
1439
1440 /* In power save mode use one chain, otherwise use all chains */
1441 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
1442 /* rx_ant has been set to all valid chains previously */
1443 active_chains = rx_ant &
1444 ((u8)(priv->chain_noise_data.active_chains));
1445 if (!active_chains)
1446 active_chains = rx_ant;
1447
1448 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
1449 priv->chain_noise_data.active_chains);
1450
1451 rx_ant = first_antenna(active_chains);
1452 }
1453 /* MIMO is not used here, but value is required */
1454 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
1455 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1456 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
1457 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1458 scan->rx_chain = cpu_to_le16(rx_chain);
1459 if (!priv->is_internal_short_scan) {
1460 cmd_len = iwl_fill_probe_req(priv,
1461 (struct ieee80211_mgmt *)scan->data,
1462 priv->scan_request->ie,
1463 priv->scan_request->ie_len,
1464 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1465 } else {
1466 cmd_len = iwl_fill_probe_req(priv,
1467 (struct ieee80211_mgmt *)scan->data,
1468 NULL, 0,
1469 IWL_MAX_SCAN_SIZE - sizeof(*scan));
1470
1471 }
1472 scan->tx_cmd.len = cpu_to_le16(cmd_len);
1473
1474 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
1475 RXON_FILTER_BCON_AWARE_MSK);
1476
1477 if (priv->is_internal_short_scan) {
1478 scan->channel_count =
1479 iwl_get_single_channel_for_scan(priv, vif, band,
1480 (void *)&scan->data[le16_to_cpu(
1481 scan->tx_cmd.len)]);
1482 } else {
1483 scan->channel_count =
1484 iwl_get_channels_for_scan(priv, vif, band,
1485 is_active, n_probes,
1486 (void *)&scan->data[le16_to_cpu(
1487 scan->tx_cmd.len)]);
1488 }
1489 if (scan->channel_count == 0) {
1490 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
1491 goto done;
1492 }
1493
1494 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1495 scan->channel_count * sizeof(struct iwl_scan_channel);
1496 cmd.data = scan;
1497 scan->len = cpu_to_le16(cmd.len);
1498
1499 set_bit(STATUS_SCAN_HW, &priv->status);
1500 if (iwl_send_cmd_sync(priv, &cmd))
1501 goto done;
1502
1503 queue_delayed_work(priv->workqueue, &priv->scan_check,
1504 IWL_SCAN_CHECK_WATCHDOG);
1505
1506 return;
1507
1508 done:
1509 /* Cannot perform scan. Make sure we clear scanning
1510 * bits from status so next scan request can be performed.
1511 * If we don't clear scanning status bit here all next scan
1512 * will fail
1513 */
1514 clear_bit(STATUS_SCAN_HW, &priv->status);
1515 clear_bit(STATUS_SCANNING, &priv->status);
1516 /* inform mac80211 scan aborted */
1517 queue_work(priv->workqueue, &priv->scan_completed);
1518}
1519
1520int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1521 struct ieee80211_vif *vif, bool add)
1522{
1523 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1524
1525 if (add)
1526 return iwl_add_bssid_station(priv, vif->bss_conf.bssid, true,
1527 &vif_priv->ibss_bssid_sta_id);
1528 return iwl_remove_station(priv, vif_priv->ibss_bssid_sta_id,
1529 vif->bss_conf.bssid);
1530}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116d329f..cf4a95bae4ff 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -295,11 +295,11 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
295 return tl->total; 295 return tl->total;
296} 296}
297 297
298static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv, 298static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
299 struct iwl_lq_sta *lq_data, u8 tid, 299 struct iwl_lq_sta *lq_data, u8 tid,
300 struct ieee80211_sta *sta) 300 struct ieee80211_sta *sta)
301{ 301{
302 int ret; 302 int ret = -EAGAIN;
303 303
304 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) { 304 if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
305 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 305 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
@@ -313,29 +313,29 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
313 */ 313 */
314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n", 314 IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
315 tid); 315 tid);
316 ret = ieee80211_stop_tx_ba_session(sta, tid, 316 ieee80211_stop_tx_ba_session(sta, tid,
317 WLAN_BACK_INITIATOR); 317 WLAN_BACK_INITIATOR);
318 } 318 }
319 } 319 } else
320 IWL_ERR(priv, "Fail finding valid aggregation tid: %d\n", tid);
321 return ret;
320} 322}
321 323
322static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, 324static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
323 struct iwl_lq_sta *lq_data, 325 struct iwl_lq_sta *lq_data,
324 struct ieee80211_sta *sta) 326 struct ieee80211_sta *sta)
325{ 327{
326 if ((tid < TID_MAX_LOAD_COUNT)) 328 if ((tid < TID_MAX_LOAD_COUNT) &&
327 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 329 !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) {
328 else if (tid == IWL_AGG_ALL_TID) 330 if (priv->cfg->use_rts_for_ht) {
329 for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++) 331 /*
330 rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); 332 * switch to RTS/CTS if it is the prefer protection
331 if (priv->cfg->use_rts_for_ht) { 333 * method for HT traffic
332 /* 334 */
333 * switch to RTS/CTS if it is the prefer protection method 335 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
334 * for HT traffic 336 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
335 */ 337 iwlcore_commit_rxon(priv);
336 IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); 338 }
337 priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
338 iwlcore_commit_rxon(priv);
339 } 339 }
340} 340}
341 341
@@ -611,10 +611,6 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
611 struct ieee80211_hdr *hdr, 611 struct ieee80211_hdr *hdr,
612 enum iwl_table_type rate_type) 612 enum iwl_table_type rate_type)
613{ 613{
614 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
615 lq_sta->active_rate_basic)
616 return lq_sta->active_rate_basic;
617
618 if (is_legacy(rate_type)) { 614 if (is_legacy(rate_type)) {
619 return lq_sta->active_legacy_rate; 615 return lq_sta->active_legacy_rate;
620 } else { 616 } else {
@@ -775,6 +771,15 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
775 771
776 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 772 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
777 773
774 /* Treat uninitialized rate scaling data same as non-existing. */
775 if (!lq_sta) {
776 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
777 return;
778 } else if (!lq_sta->drv) {
779 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
780 return;
781 }
782
778 if (!ieee80211_is_data(hdr->frame_control) || 783 if (!ieee80211_is_data(hdr->frame_control) ||
779 info->flags & IEEE80211_TX_CTL_NO_ACK) 784 info->flags & IEEE80211_TX_CTL_NO_ACK)
780 return; 785 return;
@@ -784,10 +789,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
784 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 789 !(info->flags & IEEE80211_TX_STAT_AMPDU))
785 return; 790 return;
786 791
787 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
788 !lq_sta->ibss_sta_added)
789 return;
790
791 /* 792 /*
792 * Ignore this Tx frame response if its initial rate doesn't match 793 * Ignore this Tx frame response if its initial rate doesn't match
793 * that of latest Link Quality command. There may be stragglers 794 * that of latest Link Quality command. There may be stragglers
@@ -833,7 +834,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
833 lq_sta->missed_rate_counter++; 834 lq_sta->missed_rate_counter++;
834 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 835 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
835 lq_sta->missed_rate_counter = 0; 836 lq_sta->missed_rate_counter = 0;
836 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 837 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
837 } 838 }
838 /* Regardless, ignore this status info for outdated rate */ 839 /* Regardless, ignore this status info for outdated rate */
839 return; 840 return;
@@ -867,14 +868,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
867 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, 868 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
868 &rs_index); 869 &rs_index);
869 rs_collect_tx_data(curr_tbl, rs_index, 870 rs_collect_tx_data(curr_tbl, rs_index,
870 info->status.ampdu_ack_len, 871 info->status.ampdu_len,
871 info->status.ampdu_ack_map); 872 info->status.ampdu_ack_len);
872 873
873 /* Update success/fail counts if not searching for new mode */ 874 /* Update success/fail counts if not searching for new mode */
874 if (lq_sta->stay_in_tbl) { 875 if (lq_sta->stay_in_tbl) {
875 lq_sta->total_success += info->status.ampdu_ack_map; 876 lq_sta->total_success += info->status.ampdu_ack_len;
876 lq_sta->total_failed += (info->status.ampdu_ack_len - 877 lq_sta->total_failed += (info->status.ampdu_len -
877 info->status.ampdu_ack_map); 878 info->status.ampdu_ack_len);
878 } 879 }
879 } else { 880 } else {
880 /* 881 /*
@@ -1913,7 +1914,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
1913 /* Update uCode's rate table. */ 1914 /* Update uCode's rate table. */
1914 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 1915 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
1915 rs_fill_link_cmd(priv, lq_sta, rate); 1916 rs_fill_link_cmd(priv, lq_sta, rate);
1916 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1917 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
1917 1918
1918 return rate; 1919 return rate;
1919} 1920}
@@ -2002,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2002 /* rates available for this association, and for modulation mode */ 2003 /* rates available for this association, and for modulation mode */
2003 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); 2004 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
2004 2005
2005 IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask); 2006 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
2006 2007
2007 /* mask with station rate restriction */ 2008 /* mask with station rate restriction */
2008 if (is_legacy(tbl->lq_type)) { 2009 if (is_legacy(tbl->lq_type)) {
@@ -2077,10 +2078,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2077 } 2078 }
2078 /* Else we have enough samples; calculate estimate of 2079 /* Else we have enough samples; calculate estimate of
2079 * actual average throughput */ 2080 * actual average throughput */
2080 2081 if (window->average_tpt != ((window->success_ratio *
2081 /* Sanity-check TPT calculations */ 2082 tbl->expected_tpt[index] + 64) / 128)) {
2082 BUG_ON(window->average_tpt != ((window->success_ratio * 2083 IWL_ERR(priv, "expected_tpt should have been calculated by now\n");
2083 tbl->expected_tpt[index] + 64) / 128)); 2084 window->average_tpt = ((window->success_ratio *
2085 tbl->expected_tpt[index] + 64) / 128);
2086 }
2084 2087
2085 /* If we are searching for better modulation mode, check success. */ 2088 /* If we are searching for better modulation mode, check success. */
2086 if (lq_sta->search_better_tbl && 2089 if (lq_sta->search_better_tbl &&
@@ -2289,7 +2292,7 @@ lq_update:
2289 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", 2292 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2290 tbl->current_rate, index); 2293 tbl->current_rate, index);
2291 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 2294 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2292 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2295 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
2293 } else 2296 } else
2294 done_search = 1; 2297 done_search = 1;
2295 } 2298 }
@@ -2334,11 +2337,22 @@ out:
2334 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 2337 tbl->current_rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
2335 i = index; 2338 i = index;
2336 lq_sta->last_txrate_idx = i; 2339 lq_sta->last_txrate_idx = i;
2337
2338 return;
2339} 2340}
2340 2341
2341 2342/**
2343 * rs_initialize_lq - Initialize a station's hardware rate table
2344 *
2345 * The uCode's station table contains a table of fallback rates
2346 * for automatic fallback during transmission.
2347 *
2348 * NOTE: This sets up a default set of values. These will be replaced later
2349 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2350 * rc80211_simple.
2351 *
2352 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2353 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2354 * which requires station table entry to exist).
2355 */
2342static void rs_initialize_lq(struct iwl_priv *priv, 2356static void rs_initialize_lq(struct iwl_priv *priv,
2343 struct ieee80211_conf *conf, 2357 struct ieee80211_conf *conf,
2344 struct ieee80211_sta *sta, 2358 struct ieee80211_sta *sta,
@@ -2357,10 +2371,6 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2357 2371
2358 i = lq_sta->last_txrate_idx; 2372 i = lq_sta->last_txrate_idx;
2359 2373
2360 if ((lq_sta->lq.sta_id == 0xff) &&
2361 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
2362 goto out;
2363
2364 valid_tx_ant = priv->hw_params.valid_tx_ant; 2374 valid_tx_ant = priv->hw_params.valid_tx_ant;
2365 2375
2366 if (!lq_sta->search_better_tbl) 2376 if (!lq_sta->search_better_tbl)
@@ -2388,7 +2398,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2388 tbl->current_rate = rate; 2398 tbl->current_rate = rate;
2389 rs_set_expected_tpt_table(lq_sta, tbl); 2399 rs_set_expected_tpt_table(lq_sta, tbl);
2390 rs_fill_link_cmd(NULL, lq_sta, rate); 2400 rs_fill_link_cmd(NULL, lq_sta, rate);
2391 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2401 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2402 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
2392 out: 2403 out:
2393 return; 2404 return;
2394} 2405}
@@ -2399,10 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2399 2410
2400 struct sk_buff *skb = txrc->skb; 2411 struct sk_buff *skb = txrc->skb;
2401 struct ieee80211_supported_band *sband = txrc->sband; 2412 struct ieee80211_supported_band *sband = txrc->sband;
2402 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2413 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2403 struct ieee80211_conf *conf = &priv->hw->conf;
2404 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2405 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2406 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2414 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2407 struct iwl_lq_sta *lq_sta = priv_sta; 2415 struct iwl_lq_sta *lq_sta = priv_sta;
2408 int rate_idx; 2416 int rate_idx;
@@ -2420,30 +2428,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2420 lq_sta->max_rate_idx = -1; 2428 lq_sta->max_rate_idx = -1;
2421 } 2429 }
2422 2430
2431 /* Treat uninitialized rate scaling data same as non-existing. */
2432 if (lq_sta && !lq_sta->drv) {
2433 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2434 priv_sta = NULL;
2435 }
2436
2423 /* Send management frames and NO_ACK data using lowest rate. */ 2437 /* Send management frames and NO_ACK data using lowest rate. */
2424 if (rate_control_send_low(sta, priv_sta, txrc)) 2438 if (rate_control_send_low(sta, priv_sta, txrc))
2425 return; 2439 return;
2426 2440
2427 rate_idx = lq_sta->last_txrate_idx; 2441 rate_idx = lq_sta->last_txrate_idx;
2428 2442
2429 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2430 !lq_sta->ibss_sta_added) {
2431 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2432
2433 if (sta_id == IWL_INVALID_STATION) {
2434 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
2435 hdr->addr1);
2436 sta_id = iwl_add_station(priv, hdr->addr1,
2437 false, CMD_ASYNC, ht_cap);
2438 }
2439 if ((sta_id != IWL_INVALID_STATION)) {
2440 lq_sta->lq.sta_id = sta_id;
2441 lq_sta->lq.rs_table[0].rate_n_flags = 0;
2442 lq_sta->ibss_sta_added = 1;
2443 rs_initialize_lq(priv, conf, sta, lq_sta);
2444 }
2445 }
2446
2447 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { 2443 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2448 rate_idx -= IWL_FIRST_OFDM_RATE; 2444 rate_idx -= IWL_FIRST_OFDM_RATE;
2449 /* 6M and 9M shared same MCS index */ 2445 /* 6M and 9M shared same MCS index */
@@ -2493,16 +2489,25 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2493 return lq_sta; 2489 return lq_sta;
2494} 2490}
2495 2491
2496static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, 2492/*
2497 struct ieee80211_sta *sta, void *priv_sta) 2493 * Called after adding a new station to initialize rate scaling
2494 */
2495void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
2498{ 2496{
2499 int i, j; 2497 int i, j;
2500 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2498 struct ieee80211_hw *hw = priv->hw;
2501 struct ieee80211_conf *conf = &priv->hw->conf; 2499 struct ieee80211_conf *conf = &priv->hw->conf;
2502 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2500 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2503 struct iwl_lq_sta *lq_sta = priv_sta; 2501 struct iwl_station_priv *sta_priv;
2502 struct iwl_lq_sta *lq_sta;
2503 struct ieee80211_supported_band *sband;
2504
2505 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2506 lq_sta = &sta_priv->lq_sta;
2507 sband = hw->wiphy->bands[conf->channel->band];
2504 2508
2505 lq_sta->lq.sta_id = 0xff; 2509
2510 lq_sta->lq.sta_id = sta_id;
2506 2511
2507 for (j = 0; j < LQ_SIZE; j++) 2512 for (j = 0; j < LQ_SIZE; j++)
2508 for (i = 0; i < IWL_RATE_COUNT; i++) 2513 for (i = 0; i < IWL_RATE_COUNT; i++)
@@ -2514,39 +2519,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2514 for (i = 0; i < IWL_RATE_COUNT; i++) 2519 for (i = 0; i < IWL_RATE_COUNT; i++)
2515 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2520 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2516 2521
2517 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n"); 2522 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2523 sta_id);
2518 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2524 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2519 * the lowest or the highest rate.. Could consider using RSSI from 2525 * the lowest or the highest rate.. Could consider using RSSI from
2520 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2526 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2521 * after assoc.. */ 2527 * after assoc.. */
2522 2528
2523 lq_sta->ibss_sta_added = 0;
2524 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2525 u8 sta_id = iwl_find_station(priv,
2526 sta->addr);
2527
2528 /* for IBSS the call are from tasklet */
2529 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2530
2531 if (sta_id == IWL_INVALID_STATION) {
2532 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2533 sta_id = iwl_add_station(priv, sta->addr, false,
2534 CMD_ASYNC, ht_cap);
2535 }
2536 if ((sta_id != IWL_INVALID_STATION)) {
2537 lq_sta->lq.sta_id = sta_id;
2538 lq_sta->lq.rs_table[0].rate_n_flags = 0;
2539 }
2540 /* FIXME: this is w/a remove it later */
2541 priv->assoc_station_added = 1;
2542 }
2543
2544 lq_sta->is_dup = 0; 2529 lq_sta->is_dup = 0;
2545 lq_sta->max_rate_idx = -1; 2530 lq_sta->max_rate_idx = -1;
2546 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2531 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2547 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2532 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
2548 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2533 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2549 lq_sta->active_rate_basic = priv->active_rate_basic;
2550 lq_sta->band = priv->band; 2534 lq_sta->band = priv->band;
2551 /* 2535 /*
2552 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2536 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2574,8 +2558,17 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2574 lq_sta->active_mimo3_rate); 2558 lq_sta->active_mimo3_rate);
2575 2559
2576 /* These values will be overridden later */ 2560 /* These values will be overridden later */
2577 lq_sta->lq.general_params.single_stream_ant_msk = ANT_A; 2561 lq_sta->lq.general_params.single_stream_ant_msk =
2578 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB; 2562 first_antenna(priv->hw_params.valid_tx_ant);
2563 lq_sta->lq.general_params.dual_stream_ant_msk =
2564 priv->hw_params.valid_tx_ant &
2565 ~first_antenna(priv->hw_params.valid_tx_ant);
2566 if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
2567 lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
2568 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
2569 lq_sta->lq.general_params.dual_stream_ant_msk =
2570 priv->hw_params.valid_tx_ant;
2571 }
2579 2572
2580 /* as default allow aggregation for all tids */ 2573 /* as default allow aggregation for all tids */
2581 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID; 2574 lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
@@ -2794,7 +2787,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2794 2787
2795 if (lq_sta->dbg_fixed_rate) { 2788 if (lq_sta->dbg_fixed_rate) {
2796 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); 2789 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2797 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2790 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
2798 } 2791 }
2799 2792
2800 return count; 2793 return count;
@@ -2950,12 +2943,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2950 desc += sprintf(buff+desc, 2943 desc += sprintf(buff+desc,
2951 "Bit Rate= %d Mb/s\n", 2944 "Bit Rate= %d Mb/s\n",
2952 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); 2945 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
2953 desc += sprintf(buff+desc,
2954 "Signal Level= %d dBm\tNoise Level= %d dBm\n",
2955 priv->last_rx_rssi, priv->last_rx_noise);
2956 desc += sprintf(buff+desc,
2957 "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
2958 priv->last_tsf, priv->last_beacon_time);
2959 2946
2960 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); 2947 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2961 return ret; 2948 return ret;
@@ -2995,12 +2982,21 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
2995} 2982}
2996#endif 2983#endif
2997 2984
2985/*
2986 * Initialization of rate scaling information is done by driver after
2987 * the station is added. Since mac80211 calls this function before a
2988 * station is added we ignore it.
2989 */
2990static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2991 struct ieee80211_sta *sta, void *priv_sta)
2992{
2993}
2998static struct rate_control_ops rs_ops = { 2994static struct rate_control_ops rs_ops = {
2999 .module = NULL, 2995 .module = NULL,
3000 .name = RS_NAME, 2996 .name = RS_NAME,
3001 .tx_status = rs_tx_status, 2997 .tx_status = rs_tx_status,
3002 .get_rate = rs_get_rate, 2998 .get_rate = rs_get_rate,
3003 .rate_init = rs_rate_init, 2999 .rate_init = rs_rate_init_stub,
3004 .alloc = rs_alloc, 3000 .alloc = rs_alloc,
3005 .free = rs_free, 3001 .free = rs_free,
3006 .alloc_sta = rs_alloc_sta, 3002 .alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index e71923961e69..8292f6d48ec6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -403,7 +403,6 @@ struct iwl_lq_sta {
403 u8 is_green; 403 u8 is_green;
404 u8 is_dup; 404 u8 is_dup;
405 enum ieee80211_band band; 405 enum ieee80211_band band;
406 u8 ibss_sta_added;
407 406
408 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 407 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
409 u32 supp_rates; 408 u32 supp_rates;
@@ -411,7 +410,6 @@ struct iwl_lq_sta {
411 u16 active_siso_rate; 410 u16 active_siso_rate;
412 u16 active_mimo2_rate; 411 u16 active_mimo2_rate;
413 u16 active_mimo3_rate; 412 u16 active_mimo3_rate;
414 u16 active_rate_basic;
415 s8 max_rate_idx; /* Max rate set by user */ 413 s8 max_rate_idx; /* Max rate set by user */
416 u8 missed_rate_counter; 414 u8 missed_rate_counter;
417 415
@@ -479,6 +477,12 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
479 */ 477 */
480extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); 478extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
481 479
480/* Initialize station's rate scaling information after adding station */
481extern void iwl_rs_rate_init(struct iwl_priv *priv,
482 struct ieee80211_sta *sta, u8 sta_id);
483extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
484 struct ieee80211_sta *sta, u8 sta_id);
485
482/** 486/**
483 * iwl_rate_control_register - Register the rate control algorithm callbacks 487 * iwl_rate_control_register - Register the rate control algorithm callbacks
484 * 488 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 000000000000..c402bfc83f36
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,1340 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-agn-hw.h"
41#include "iwl-agn.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 /* this matches the mac80211 numbers */
71 2, 3, 3, 2, 1, 1, 0, 0
72};
73
74static const u8 ac_to_fifo[] = {
75 IWL_TX_FIFO_VO,
76 IWL_TX_FIFO_VI,
77 IWL_TX_FIFO_BE,
78 IWL_TX_FIFO_BK,
79};
80
81static inline int get_fifo_from_ac(u8 ac)
82{
83 return ac_to_fifo[ac];
84}
85
86static inline int get_ac_from_tid(u16 tid)
87{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
89 return tid_to_ac[tid];
90
91 /* no support for TIDs 8-15 yet */
92 return -EINVAL;
93}
94
95static inline int get_fifo_from_tid(u16 tid)
96{
97 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
98 return get_fifo_from_ac(tid_to_ac[tid]);
99
100 /* no support for TIDs 8-15 yet */
101 return -EINVAL;
102}
103
104/**
105 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
106 */
107void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
108 struct iwl_tx_queue *txq,
109 u16 byte_cnt)
110{
111 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
112 int write_ptr = txq->q.write_ptr;
113 int txq_id = txq->q.id;
114 u8 sec_ctl = 0;
115 u8 sta_id = 0;
116 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
117 __le16 bc_ent;
118
119 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
120
121 if (txq_id != IWL_CMD_QUEUE_NUM) {
122 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
123 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
124
125 switch (sec_ctl & TX_CMD_SEC_MSK) {
126 case TX_CMD_SEC_CCM:
127 len += CCMP_MIC_LEN;
128 break;
129 case TX_CMD_SEC_TKIP:
130 len += TKIP_ICV_LEN;
131 break;
132 case TX_CMD_SEC_WEP:
133 len += WEP_IV_LEN + WEP_ICV_LEN;
134 break;
135 }
136 }
137
138 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
139
140 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
141
142 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
143 scd_bc_tbl[txq_id].
144 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
145}
146
147void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
148 struct iwl_tx_queue *txq)
149{
150 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
151 int txq_id = txq->q.id;
152 int read_ptr = txq->q.read_ptr;
153 u8 sta_id = 0;
154 __le16 bc_ent;
155
156 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
157
158 if (txq_id != IWL_CMD_QUEUE_NUM)
159 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
160
161 bc_ent = cpu_to_le16(1 | (sta_id << 12));
162 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
163
164 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
165 scd_bc_tbl[txq_id].
166 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
167}
168
169static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
170 u16 txq_id)
171{
172 u32 tbl_dw_addr;
173 u32 tbl_dw;
174 u16 scd_q2ratid;
175
176 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
177
178 tbl_dw_addr = priv->scd_base_addr +
179 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
180
181 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
182
183 if (txq_id & 0x1)
184 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
185 else
186 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
187
188 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
189
190 return 0;
191}
192
193static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
194{
195 /* Simply stop the queue, but don't change any configuration;
196 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
197 iwl_write_prph(priv,
198 IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
199 (0 << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
200 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
201}
202
203void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
204 int txq_id, u32 index)
205{
206 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
207 (index & 0xff) | (txq_id << 8));
208 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(txq_id), index);
209}
210
211void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
212 struct iwl_tx_queue *txq,
213 int tx_fifo_id, int scd_retry)
214{
215 int txq_id = txq->q.id;
216 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
217
218 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_STATUS_BITS(txq_id),
219 (active << IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
220 (tx_fifo_id << IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF) |
221 (1 << IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL) |
222 IWLAGN_SCD_QUEUE_STTS_REG_MSK);
223
224 txq->sched_retry = scd_retry;
225
226 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
227 active ? "Activate" : "Deactivate",
228 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
229}
230
231int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
232 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
233{
234 unsigned long flags;
235 u16 ra_tid;
236
237 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
238 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
239 <= txq_id)) {
240 IWL_WARN(priv,
241 "queue number out of range: %d, must be %d to %d\n",
242 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
243 IWLAGN_FIRST_AMPDU_QUEUE +
244 priv->cfg->num_of_ampdu_queues - 1);
245 return -EINVAL;
246 }
247
248 ra_tid = BUILD_RAxTID(sta_id, tid);
249
250 /* Modify device's station table to Tx this TID */
251 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
252
253 spin_lock_irqsave(&priv->lock, flags);
254
255 /* Stop this Tx queue before configuring it */
256 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
257
258 /* Map receiver-address / traffic-ID to this queue */
259 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
260
261 /* Set this queue as a chain-building queue */
262 iwl_set_bits_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL, (1<<txq_id));
263
264 /* enable aggregations for the queue */
265 iwl_set_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1<<txq_id));
266
267 /* Place first TFD at index corresponding to start sequence number.
268 * Assumes that ssn_idx is valid (!= 0xFFF) */
269 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
270 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
271 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
272
273 /* Set up Tx window size and frame limit for this queue */
274 iwl_write_targ_mem(priv, priv->scd_base_addr +
275 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
276 sizeof(u32),
277 ((SCD_WIN_SIZE <<
278 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
279 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
280 ((SCD_FRAME_LIMIT <<
281 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
282 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
283
284 iwl_set_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
285
286 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
287 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
288
289 spin_unlock_irqrestore(&priv->lock, flags);
290
291 return 0;
292}
293
294int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
295 u16 ssn_idx, u8 tx_fifo)
296{
297 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
298 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
299 <= txq_id)) {
300 IWL_ERR(priv,
301 "queue number out of range: %d, must be %d to %d\n",
302 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
303 IWLAGN_FIRST_AMPDU_QUEUE +
304 priv->cfg->num_of_ampdu_queues - 1);
305 return -EINVAL;
306 }
307
308 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
309
310 iwl_clear_bits_prph(priv, IWLAGN_SCD_AGGR_SEL, (1 << txq_id));
311
312 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
313 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
314 /* supposes that ssn_idx is valid (!= 0xFFF) */
315 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
316
317 iwl_clear_bits_prph(priv, IWLAGN_SCD_INTERRUPT_MASK, (1 << txq_id));
318 iwl_txq_ctx_deactivate(priv, txq_id);
319 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
320
321 return 0;
322}
323
324/*
325 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
326 * must be called under priv->lock and mac access
327 */
328void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
329{
330 iwl_write_prph(priv, IWLAGN_SCD_TXFACT, mask);
331}
332
333static inline int get_queue_from_ac(u16 ac)
334{
335 return ac;
336}
337
338/*
339 * handle build REPLY_TX command notification.
340 */
341static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
342 struct iwl_tx_cmd *tx_cmd,
343 struct ieee80211_tx_info *info,
344 struct ieee80211_hdr *hdr,
345 u8 std_id)
346{
347 __le16 fc = hdr->frame_control;
348 __le32 tx_flags = tx_cmd->tx_flags;
349
350 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
351 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
352 tx_flags |= TX_CMD_FLG_ACK_MSK;
353 if (ieee80211_is_mgmt(fc))
354 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
355 if (ieee80211_is_probe_resp(fc) &&
356 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
357 tx_flags |= TX_CMD_FLG_TSF_MSK;
358 } else {
359 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
360 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
361 }
362
363 if (ieee80211_is_back_req(fc))
364 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
365
366
367 tx_cmd->sta_id = std_id;
368 if (ieee80211_has_morefrags(fc))
369 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
370
371 if (ieee80211_is_data_qos(fc)) {
372 u8 *qc = ieee80211_get_qos_ctl(hdr);
373 tx_cmd->tid_tspec = qc[0] & 0xf;
374 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
375 } else {
376 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
377 }
378
379 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
380
381 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
382 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
383
384 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
385 if (ieee80211_is_mgmt(fc)) {
386 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
387 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
388 else
389 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
390 } else {
391 tx_cmd->timeout.pm_frame_timeout = 0;
392 }
393
394 tx_cmd->driver_txop = 0;
395 tx_cmd->tx_flags = tx_flags;
396 tx_cmd->next_frame_len = 0;
397}
398
399#define RTS_DFAULT_RETRY_LIMIT 60
400
401static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
402 struct iwl_tx_cmd *tx_cmd,
403 struct ieee80211_tx_info *info,
404 __le16 fc)
405{
406 u32 rate_flags;
407 int rate_idx;
408 u8 rts_retry_limit;
409 u8 data_retry_limit;
410 u8 rate_plcp;
411
412 /* Set retry limit on DATA packets and Probe Responses*/
413 if (ieee80211_is_probe_resp(fc))
414 data_retry_limit = 3;
415 else
416 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
417 tx_cmd->data_retry_limit = data_retry_limit;
418
419 /* Set retry limit on RTS packets */
420 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
421 if (data_retry_limit < rts_retry_limit)
422 rts_retry_limit = data_retry_limit;
423 tx_cmd->rts_retry_limit = rts_retry_limit;
424
425 /* DATA packets will use the uCode station table for rate/antenna
426 * selection */
427 if (ieee80211_is_data(fc)) {
428 tx_cmd->initial_rate_index = 0;
429 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
430 return;
431 }
432
433 /**
434 * If the current TX rate stored in mac80211 has the MCS bit set, it's
435 * not really a TX rate. Thus, we use the lowest supported rate for
436 * this band. Also use the lowest supported rate if the stored rate
437 * index is invalid.
438 */
439 rate_idx = info->control.rates[0].idx;
440 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
441 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
442 rate_idx = rate_lowest_index(&priv->bands[info->band],
443 info->control.sta);
444 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
445 if (info->band == IEEE80211_BAND_5GHZ)
446 rate_idx += IWL_FIRST_OFDM_RATE;
447 /* Get PLCP rate for tx_cmd->rate_n_flags */
448 rate_plcp = iwl_rates[rate_idx].plcp;
449 /* Zero out flags for this packet */
450 rate_flags = 0;
451
452 /* Set CCK flag as needed */
453 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
454 rate_flags |= RATE_MCS_CCK_MSK;
455
456 /* Set up RTS and CTS flags for certain packets */
457 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
458 case cpu_to_le16(IEEE80211_STYPE_AUTH):
459 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
460 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
461 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
462 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
463 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
464 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
465 }
466 break;
467 default:
468 break;
469 }
470
471 /* Set up antennas */
472 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
473 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
474
475 /* Set the rate in the TX cmd */
476 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
477}
478
479static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
480 struct ieee80211_tx_info *info,
481 struct iwl_tx_cmd *tx_cmd,
482 struct sk_buff *skb_frag,
483 int sta_id)
484{
485 struct ieee80211_key_conf *keyconf = info->control.hw_key;
486
487 switch (keyconf->alg) {
488 case ALG_CCMP:
489 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
490 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
491 if (info->flags & IEEE80211_TX_CTL_AMPDU)
492 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
493 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
494 break;
495
496 case ALG_TKIP:
497 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
498 ieee80211_get_tkip_key(keyconf, skb_frag,
499 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
500 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
501 break;
502
503 case ALG_WEP:
504 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
505 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
506
507 if (keyconf->keylen == WEP_KEY_LEN_128)
508 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
509
510 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
511
512 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
513 "with key %d\n", keyconf->keyidx);
514 break;
515
516 default:
517 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
518 break;
519 }
520}
521
522/*
523 * start REPLY_TX command process
524 */
525int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
526{
527 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
528 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
529 struct ieee80211_sta *sta = info->control.sta;
530 struct iwl_station_priv *sta_priv = NULL;
531 struct iwl_tx_queue *txq;
532 struct iwl_queue *q;
533 struct iwl_device_cmd *out_cmd;
534 struct iwl_cmd_meta *out_meta;
535 struct iwl_tx_cmd *tx_cmd;
536 int swq_id, txq_id;
537 dma_addr_t phys_addr;
538 dma_addr_t txcmd_phys;
539 dma_addr_t scratch_phys;
540 u16 len, len_org, firstlen, secondlen;
541 u16 seq_number = 0;
542 __le16 fc;
543 u8 hdr_len;
544 u8 sta_id;
545 u8 wait_write_ptr = 0;
546 u8 tid = 0;
547 u8 *qc = NULL;
548 unsigned long flags;
549
550 spin_lock_irqsave(&priv->lock, flags);
551 if (iwl_is_rfkill(priv)) {
552 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
553 goto drop_unlock;
554 }
555
556 fc = hdr->frame_control;
557
558#ifdef CONFIG_IWLWIFI_DEBUG
559 if (ieee80211_is_auth(fc))
560 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
561 else if (ieee80211_is_assoc_req(fc))
562 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
563 else if (ieee80211_is_reassoc_req(fc))
564 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
565#endif
566
567 hdr_len = ieee80211_hdrlen(fc);
568
569 /* Find index into station table for destination station */
570 if (!info->control.sta)
571 sta_id = priv->hw_params.bcast_sta_id;
572 else
573 sta_id = iwl_sta_id(info->control.sta);
574 if (sta_id == IWL_INVALID_STATION) {
575 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
576 hdr->addr1);
577 goto drop_unlock;
578 }
579
580 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
581
582 if (sta)
583 sta_priv = (void *)sta->drv_priv;
584
585 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
586 sta_priv->asleep) {
587 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
588 /*
589 * This sends an asynchronous command to the device,
590 * but we can rely on it being processed before the
591 * next frame is processed -- and the next frame to
592 * this station is the one that will consume this
593 * counter.
594 * For now set the counter to just 1 since we do not
595 * support uAPSD yet.
596 */
597 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
598 }
599
600 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
601 if (ieee80211_is_data_qos(fc)) {
602 qc = ieee80211_get_qos_ctl(hdr);
603 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
604 if (unlikely(tid >= MAX_TID_COUNT))
605 goto drop_unlock;
606 seq_number = priv->stations[sta_id].tid[tid].seq_number;
607 seq_number &= IEEE80211_SCTL_SEQ;
608 hdr->seq_ctrl = hdr->seq_ctrl &
609 cpu_to_le16(IEEE80211_SCTL_FRAG);
610 hdr->seq_ctrl |= cpu_to_le16(seq_number);
611 seq_number += 0x10;
612 /* aggregation is on for this <sta,tid> */
613 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
614 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
615 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
616 }
617 }
618
619 txq = &priv->txq[txq_id];
620 swq_id = txq->swq_id;
621 q = &txq->q;
622
623 if (unlikely(iwl_queue_space(q) < q->high_mark))
624 goto drop_unlock;
625
626 if (ieee80211_is_data_qos(fc))
627 priv->stations[sta_id].tid[tid].tfds_in_queue++;
628
629 /* Set up driver data for this TFD */
630 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
631 txq->txb[q->write_ptr].skb[0] = skb;
632
633 /* Set up first empty entry in queue's array of Tx/cmd buffers */
634 out_cmd = txq->cmd[q->write_ptr];
635 out_meta = &txq->meta[q->write_ptr];
636 tx_cmd = &out_cmd->cmd.tx;
637 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
638 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
639
640 /*
641 * Set up the Tx-command (not MAC!) header.
642 * Store the chosen Tx queue and TFD index within the sequence field;
643 * after Tx, uCode's Tx response will return this value so driver can
644 * locate the frame within the tx queue and do post-tx processing.
645 */
646 out_cmd->hdr.cmd = REPLY_TX;
647 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
648 INDEX_TO_SEQ(q->write_ptr)));
649
650 /* Copy MAC header from skb into command buffer */
651 memcpy(tx_cmd->hdr, hdr, hdr_len);
652
653
654 /* Total # bytes to be transmitted */
655 len = (u16)skb->len;
656 tx_cmd->len = cpu_to_le16(len);
657
658 if (info->control.hw_key)
659 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
660
661 /* TODO need this for burst mode later on */
662 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
663 iwl_dbg_log_tx_data_frame(priv, len, hdr);
664
665 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
666
667 iwl_update_stats(priv, true, fc, len);
668 /*
669 * Use the first empty entry in this queue's command buffer array
670 * to contain the Tx command and MAC header concatenated together
671 * (payload data will be in another buffer).
672 * Size of this varies, due to varying MAC header length.
673 * If end is not dword aligned, we'll have 2 extra bytes at the end
674 * of the MAC header (device reads on dword boundaries).
675 * We'll tell device about this padding later.
676 */
677 len = sizeof(struct iwl_tx_cmd) +
678 sizeof(struct iwl_cmd_header) + hdr_len;
679
680 len_org = len;
681 firstlen = len = (len + 3) & ~3;
682
683 if (len_org != len)
684 len_org = 1;
685 else
686 len_org = 0;
687
688 /* Tell NIC about any 2-byte padding after MAC header */
689 if (len_org)
690 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
691
692 /* Physical address of this Tx command's header (not MAC header!),
693 * within command buffer array. */
694 txcmd_phys = pci_map_single(priv->pci_dev,
695 &out_cmd->hdr, len,
696 PCI_DMA_BIDIRECTIONAL);
697 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
698 pci_unmap_len_set(out_meta, len, len);
699 /* Add buffer containing Tx command and MAC(!) header to TFD's
700 * first entry */
701 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
702 txcmd_phys, len, 1, 0);
703
704 if (!ieee80211_has_morefrags(hdr->frame_control)) {
705 txq->need_update = 1;
706 if (qc)
707 priv->stations[sta_id].tid[tid].seq_number = seq_number;
708 } else {
709 wait_write_ptr = 1;
710 txq->need_update = 0;
711 }
712
713 /* Set up TFD's 2nd entry to point directly to remainder of skb,
714 * if any (802.11 null frames have no payload). */
715 secondlen = len = skb->len - hdr_len;
716 if (len) {
717 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
718 len, PCI_DMA_TODEVICE);
719 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
720 phys_addr, len,
721 0, 0);
722 }
723
724 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
725 offsetof(struct iwl_tx_cmd, scratch);
726
727 len = sizeof(struct iwl_tx_cmd) +
728 sizeof(struct iwl_cmd_header) + hdr_len;
729 /* take back ownership of DMA buffer to enable update */
730 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
731 len, PCI_DMA_BIDIRECTIONAL);
732 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
733 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
734
735 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
736 le16_to_cpu(out_cmd->hdr.sequence));
737 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
738 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
739 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
740
741 /* Set up entry for this TFD in Tx byte-count array */
742 if (info->flags & IEEE80211_TX_CTL_AMPDU)
743 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
744 le16_to_cpu(tx_cmd->len));
745
746 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
747 len, PCI_DMA_BIDIRECTIONAL);
748
749 trace_iwlwifi_dev_tx(priv,
750 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
751 sizeof(struct iwl_tfd),
752 &out_cmd->hdr, firstlen,
753 skb->data + hdr_len, secondlen);
754
755 /* Tell device the write index *just past* this latest filled TFD */
756 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
757 iwl_txq_update_write_ptr(priv, txq);
758 spin_unlock_irqrestore(&priv->lock, flags);
759
760 /*
761 * At this point the frame is "transmitted" successfully
762 * and we will get a TX status notification eventually,
763 * regardless of the value of ret. "ret" only indicates
764 * whether or not we should update the write pointer.
765 */
766
767 /* avoid atomic ops if it isn't an associated client */
768 if (sta_priv && sta_priv->client)
769 atomic_inc(&sta_priv->pending_frames);
770
771 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
772 if (wait_write_ptr) {
773 spin_lock_irqsave(&priv->lock, flags);
774 txq->need_update = 1;
775 iwl_txq_update_write_ptr(priv, txq);
776 spin_unlock_irqrestore(&priv->lock, flags);
777 } else {
778 iwl_stop_queue(priv, txq->swq_id);
779 }
780 }
781
782 return 0;
783
784drop_unlock:
785 spin_unlock_irqrestore(&priv->lock, flags);
786 return -1;
787}
788
789static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
790 struct iwl_dma_ptr *ptr, size_t size)
791{
792 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
793 GFP_KERNEL);
794 if (!ptr->addr)
795 return -ENOMEM;
796 ptr->size = size;
797 return 0;
798}
799
800static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
801 struct iwl_dma_ptr *ptr)
802{
803 if (unlikely(!ptr->addr))
804 return;
805
806 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
807 memset(ptr, 0, sizeof(*ptr));
808}
809
810/**
811 * iwlagn_hw_txq_ctx_free - Free TXQ Context
812 *
813 * Destroy all TX DMA queues and structures
814 */
815void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
816{
817 int txq_id;
818
819 /* Tx queues */
820 if (priv->txq) {
821 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
822 if (txq_id == IWL_CMD_QUEUE_NUM)
823 iwl_cmd_queue_free(priv);
824 else
825 iwl_tx_queue_free(priv, txq_id);
826 }
827 iwlagn_free_dma_ptr(priv, &priv->kw);
828
829 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
830
831 /* free tx queue structure */
832 iwl_free_txq_mem(priv);
833}
834
835/**
836 * iwlagn_txq_ctx_alloc - allocate TX queue context
837 * Allocate all Tx DMA structures and initialize them
838 *
839 * @param priv
840 * @return error code
841 */
842int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
843{
844 int ret;
845 int txq_id, slots_num;
846 unsigned long flags;
847
848 /* Free all tx/cmd queues and keep-warm buffer */
849 iwlagn_hw_txq_ctx_free(priv);
850
851 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
852 priv->hw_params.scd_bc_tbls_size);
853 if (ret) {
854 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
855 goto error_bc_tbls;
856 }
857 /* Alloc keep-warm buffer */
858 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
859 if (ret) {
860 IWL_ERR(priv, "Keep Warm allocation failed\n");
861 goto error_kw;
862 }
863
864 /* allocate tx queue structure */
865 ret = iwl_alloc_txq_mem(priv);
866 if (ret)
867 goto error;
868
869 spin_lock_irqsave(&priv->lock, flags);
870
871 /* Turn off all Tx DMA fifos */
872 priv->cfg->ops->lib->txq_set_sched(priv, 0);
873
874 /* Tell NIC where to find the "keep warm" buffer */
875 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
876
877 spin_unlock_irqrestore(&priv->lock, flags);
878
879 /* Alloc and init all Tx queues, including the command queue (#4) */
880 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
881 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
882 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
883 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
884 txq_id);
885 if (ret) {
886 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
887 goto error;
888 }
889 }
890
891 return ret;
892
893 error:
894 iwlagn_hw_txq_ctx_free(priv);
895 iwlagn_free_dma_ptr(priv, &priv->kw);
896 error_kw:
897 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
898 error_bc_tbls:
899 return ret;
900}
901
902void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
903{
904 int txq_id, slots_num;
905 unsigned long flags;
906
907 spin_lock_irqsave(&priv->lock, flags);
908
909 /* Turn off all Tx DMA fifos */
910 priv->cfg->ops->lib->txq_set_sched(priv, 0);
911
912 /* Tell NIC where to find the "keep warm" buffer */
913 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
914
915 spin_unlock_irqrestore(&priv->lock, flags);
916
917 /* Alloc and init all Tx queues, including the command queue (#4) */
918 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
919 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
920 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
921 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
922 }
923}
924
925/**
926 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
927 */
928void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
929{
930 int ch;
931 unsigned long flags;
932
933 /* Turn off all Tx DMA fifos */
934 spin_lock_irqsave(&priv->lock, flags);
935
936 priv->cfg->ops->lib->txq_set_sched(priv, 0);
937
938 /* Stop each Tx DMA channel, and wait for it to be idle */
939 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
940 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
941 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
942 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
943 1000);
944 }
945 spin_unlock_irqrestore(&priv->lock, flags);
946}
947
948/*
949 * Find first available (lowest unused) Tx Queue, mark it "active".
950 * Called only when finding queue for aggregation.
951 * Should never return anything < 7, because they should already
952 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
953 */
954static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
955{
956 int txq_id;
957
958 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
959 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
960 return txq_id;
961 return -1;
962}
963
964int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
965 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
966{
967 int sta_id;
968 int tx_fifo;
969 int txq_id;
970 int ret;
971 unsigned long flags;
972 struct iwl_tid_data *tid_data;
973
974 tx_fifo = get_fifo_from_tid(tid);
975 if (unlikely(tx_fifo < 0))
976 return tx_fifo;
977
978 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
979 __func__, sta->addr, tid);
980
981 sta_id = iwl_sta_id(sta);
982 if (sta_id == IWL_INVALID_STATION) {
983 IWL_ERR(priv, "Start AGG on invalid station\n");
984 return -ENXIO;
985 }
986 if (unlikely(tid >= MAX_TID_COUNT))
987 return -EINVAL;
988
989 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
990 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
991 return -ENXIO;
992 }
993
994 txq_id = iwlagn_txq_ctx_activate_free(priv);
995 if (txq_id == -1) {
996 IWL_ERR(priv, "No free aggregation queue available\n");
997 return -ENXIO;
998 }
999
1000 spin_lock_irqsave(&priv->sta_lock, flags);
1001 tid_data = &priv->stations[sta_id].tid[tid];
1002 *ssn = SEQ_TO_SN(tid_data->seq_number);
1003 tid_data->agg.txq_id = txq_id;
1004 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id);
1005 spin_unlock_irqrestore(&priv->sta_lock, flags);
1006
1007 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1008 sta_id, tid, *ssn);
1009 if (ret)
1010 return ret;
1011
1012 if (tid_data->tfds_in_queue == 0) {
1013 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1014 tid_data->agg.state = IWL_AGG_ON;
1015 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1016 } else {
1017 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1018 tid_data->tfds_in_queue);
1019 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1020 }
1021 return ret;
1022}
1023
1024int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
1025 struct ieee80211_sta *sta, u16 tid)
1026{
1027 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1028 struct iwl_tid_data *tid_data;
1029 int write_ptr, read_ptr;
1030 unsigned long flags;
1031
1032 tx_fifo_id = get_fifo_from_tid(tid);
1033 if (unlikely(tx_fifo_id < 0))
1034 return tx_fifo_id;
1035
1036 sta_id = iwl_sta_id(sta);
1037
1038 if (sta_id == IWL_INVALID_STATION) {
1039 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1040 return -ENXIO;
1041 }
1042
1043 if (priv->stations[sta_id].tid[tid].agg.state ==
1044 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1045 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1046 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1047 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1048 return 0;
1049 }
1050
1051 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1052 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1053
1054 tid_data = &priv->stations[sta_id].tid[tid];
1055 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1056 txq_id = tid_data->agg.txq_id;
1057 write_ptr = priv->txq[txq_id].q.write_ptr;
1058 read_ptr = priv->txq[txq_id].q.read_ptr;
1059
1060 /* The queue is not empty */
1061 if (write_ptr != read_ptr) {
1062 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1063 priv->stations[sta_id].tid[tid].agg.state =
1064 IWL_EMPTYING_HW_QUEUE_DELBA;
1065 return 0;
1066 }
1067
1068 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1069 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1070
1071 spin_lock_irqsave(&priv->lock, flags);
1072 /*
1073 * the only reason this call can fail is queue number out of range,
1074 * which can happen if uCode is reloaded and all the station
1075 * information are lost. if it is outside the range, there is no need
1076 * to deactivate the uCode queue, just return "success" to allow
1077 * mac80211 to clean up it own data.
1078 */
1079 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1080 tx_fifo_id);
1081 spin_unlock_irqrestore(&priv->lock, flags);
1082
1083 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1084
1085 return 0;
1086}
1087
1088int iwlagn_txq_check_empty(struct iwl_priv *priv,
1089 int sta_id, u8 tid, int txq_id)
1090{
1091 struct iwl_queue *q = &priv->txq[txq_id].q;
1092 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1093 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1094
1095 switch (priv->stations[sta_id].tid[tid].agg.state) {
1096 case IWL_EMPTYING_HW_QUEUE_DELBA:
1097 /* We are reclaiming the last packet of the */
1098 /* aggregated HW queue */
1099 if ((txq_id == tid_data->agg.txq_id) &&
1100 (q->read_ptr == q->write_ptr)) {
1101 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1102 int tx_fifo = get_fifo_from_tid(tid);
1103 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1104 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1105 ssn, tx_fifo);
1106 tid_data->agg.state = IWL_AGG_OFF;
1107 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1108 }
1109 break;
1110 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1111 /* We are reclaiming the last packet of the queue */
1112 if (tid_data->tfds_in_queue == 0) {
1113 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1114 tid_data->agg.state = IWL_AGG_ON;
1115 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1116 }
1117 break;
1118 }
1119 return 0;
1120}
1121
1122static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1123{
1124 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1125 struct ieee80211_sta *sta;
1126 struct iwl_station_priv *sta_priv;
1127
1128 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1129 if (sta) {
1130 sta_priv = (void *)sta->drv_priv;
1131 /* avoid atomic ops if this isn't a client */
1132 if (sta_priv->client &&
1133 atomic_dec_return(&sta_priv->pending_frames) == 0)
1134 ieee80211_sta_block_awake(priv->hw, sta, false);
1135 }
1136
1137 ieee80211_tx_status_irqsafe(priv->hw, skb);
1138}
1139
1140int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1141{
1142 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1143 struct iwl_queue *q = &txq->q;
1144 struct iwl_tx_info *tx_info;
1145 int nfreed = 0;
1146 struct ieee80211_hdr *hdr;
1147
1148 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1149 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1150 "is out of range [0-%d] %d %d.\n", txq_id,
1151 index, q->n_bd, q->write_ptr, q->read_ptr);
1152 return 0;
1153 }
1154
1155 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1156 q->read_ptr != index;
1157 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1158
1159 tx_info = &txq->txb[txq->q.read_ptr];
1160 iwlagn_tx_status(priv, tx_info->skb[0]);
1161
1162 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1163 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1164 nfreed++;
1165 tx_info->skb[0] = NULL;
1166
1167 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1168 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1169
1170 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1171 }
1172 return nfreed;
1173}
1174
1175/**
1176 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
1177 *
1178 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1179 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1180 */
1181static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1182 struct iwl_ht_agg *agg,
1183 struct iwl_compressed_ba_resp *ba_resp)
1184
1185{
1186 int i, sh, ack;
1187 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1188 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1189 u64 bitmap;
1190 int successes = 0;
1191 struct ieee80211_tx_info *info;
1192
1193 if (unlikely(!agg->wait_for_ba)) {
1194 IWL_ERR(priv, "Received BA when not expected\n");
1195 return -EINVAL;
1196 }
1197
1198 /* Mark that the expected block-ack response arrived */
1199 agg->wait_for_ba = 0;
1200 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1201
1202 /* Calculate shift to align block-ack bits with our Tx window bits */
1203 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1204 if (sh < 0) /* tbw something is wrong with indices */
1205 sh += 0x100;
1206
1207 /* don't use 64-bit values for now */
1208 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1209
1210 if (agg->frame_count > (64 - sh)) {
1211 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1212 return -1;
1213 }
1214
1215 /* check for success or failure according to the
1216 * transmitted bitmap and block-ack bitmap */
1217 bitmap &= agg->bitmap;
1218
1219 /* For each frame attempted in aggregation,
1220 * update driver's record of tx frame's status. */
1221 for (i = 0; i < agg->frame_count ; i++) {
1222 ack = bitmap & (1ULL << i);
1223 successes += !!ack;
1224 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1225 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1226 agg->start_idx + i);
1227 }
1228
1229 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1230 memset(&info->status, 0, sizeof(info->status));
1231 info->flags |= IEEE80211_TX_STAT_ACK;
1232 info->flags |= IEEE80211_TX_STAT_AMPDU;
1233 info->status.ampdu_ack_len = successes;
1234 info->status.ampdu_ack_map = bitmap;
1235 info->status.ampdu_len = agg->frame_count;
1236 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1237
1238 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1239
1240 return 0;
1241}
1242
1243/**
1244 * translate ucode response to mac80211 tx status control values
1245 */
1246void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1247 struct ieee80211_tx_info *info)
1248{
1249 struct ieee80211_tx_rate *r = &info->control.rates[0];
1250
1251 info->antenna_sel_tx =
1252 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1253 if (rate_n_flags & RATE_MCS_HT_MSK)
1254 r->flags |= IEEE80211_TX_RC_MCS;
1255 if (rate_n_flags & RATE_MCS_GF_MSK)
1256 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1257 if (rate_n_flags & RATE_MCS_HT40_MSK)
1258 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1259 if (rate_n_flags & RATE_MCS_DUP_MSK)
1260 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1261 if (rate_n_flags & RATE_MCS_SGI_MSK)
1262 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1263 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1264}
1265
1266/**
1267 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1268 *
1269 * Handles block-acknowledge notification from device, which reports success
1270 * of frames sent via aggregation.
1271 */
1272void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1273 struct iwl_rx_mem_buffer *rxb)
1274{
1275 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1276 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1277 struct iwl_tx_queue *txq = NULL;
1278 struct iwl_ht_agg *agg;
1279 int index;
1280 int sta_id;
1281 int tid;
1282
1283 /* "flow" corresponds to Tx queue */
1284 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1285
1286 /* "ssn" is start of block-ack Tx window, corresponds to index
1287 * (in Tx queue's circular buffer) of first TFD/frame in window */
1288 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1289
1290 if (scd_flow >= priv->hw_params.max_txq_num) {
1291 IWL_ERR(priv,
1292 "BUG_ON scd_flow is bigger than number of queues\n");
1293 return;
1294 }
1295
1296 txq = &priv->txq[scd_flow];
1297 sta_id = ba_resp->sta_id;
1298 tid = ba_resp->tid;
1299 agg = &priv->stations[sta_id].tid[tid].agg;
1300
1301 /* Find index just before block-ack window */
1302 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1303
1304 /* TODO: Need to get this copy more safely - now good for debug */
1305
1306 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1307 "sta_id = %d\n",
1308 agg->wait_for_ba,
1309 (u8 *) &ba_resp->sta_addr_lo32,
1310 ba_resp->sta_id);
1311 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1312 "%d, scd_ssn = %d\n",
1313 ba_resp->tid,
1314 ba_resp->seq_ctl,
1315 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1316 ba_resp->scd_flow,
1317 ba_resp->scd_ssn);
1318 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1319 agg->start_idx,
1320 (unsigned long long)agg->bitmap);
1321
1322 /* Update driver's record of ACK vs. not for each frame in window */
1323 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1324
1325 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1326 * block-ack window (we assume that they've been successfully
1327 * transmitted ... if not, it's too late anyway). */
1328 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1329 /* calculate mac80211 ampdu sw queue to wake */
1330 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
1331 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1332
1333 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1334 priv->mac80211_registered &&
1335 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1336 iwl_wake_queue(priv, txq->swq_id);
1337
1338 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1339 }
1340}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 000000000000..637286c396fe
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,425 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
53};
54
55static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
56 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
57 0, COEX_UNASSOC_IDLE_FLAGS},
58 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
59 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
60 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
61 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
62 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
63 0, COEX_CALIBRATION_FLAGS},
64 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
65 0, COEX_PERIODIC_CALIBRATION_FLAGS},
66 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
67 0, COEX_CONNECTION_ESTAB_FLAGS},
68 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
69 0, COEX_ASSOCIATED_IDLE_FLAGS},
70 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
71 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
72 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
73 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
74 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
75 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
76 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
77 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
78 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
79 0, COEX_STAND_ALONE_DEBUG_FLAGS},
80 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
81 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
82 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
83 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
84};
85
86/*
87 * ucode
88 */
89static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
90 struct fw_desc *image, u32 dst_addr)
91{
92 dma_addr_t phy_addr = image->p_addr;
93 u32 byte_cnt = image->len;
94 int ret;
95
96 priv->ucode_write_complete = 0;
97
98 iwl_write_direct32(priv,
99 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
100 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
101
102 iwl_write_direct32(priv,
103 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
104
105 iwl_write_direct32(priv,
106 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
107 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
108
109 iwl_write_direct32(priv,
110 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
111 (iwl_get_dma_hi_addr(phy_addr)
112 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
113
114 iwl_write_direct32(priv,
115 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
116 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
117 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
118 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
119
120 iwl_write_direct32(priv,
121 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
122 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
123 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
124 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
125
126 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
127 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
128 priv->ucode_write_complete, 5 * HZ);
129 if (ret == -ERESTARTSYS) {
130 IWL_ERR(priv, "Could not load the %s uCode section due "
131 "to interrupt\n", name);
132 return ret;
133 }
134 if (!ret) {
135 IWL_ERR(priv, "Could not load the %s uCode section\n",
136 name);
137 return -ETIMEDOUT;
138 }
139
140 return 0;
141}
142
143static int iwlagn_load_given_ucode(struct iwl_priv *priv,
144 struct fw_desc *inst_image,
145 struct fw_desc *data_image)
146{
147 int ret = 0;
148
149 ret = iwlagn_load_section(priv, "INST", inst_image,
150 IWLAGN_RTC_INST_LOWER_BOUND);
151 if (ret)
152 return ret;
153
154 return iwlagn_load_section(priv, "DATA", data_image,
155 IWLAGN_RTC_DATA_LOWER_BOUND);
156}
157
158int iwlagn_load_ucode(struct iwl_priv *priv)
159{
160 int ret = 0;
161
162 /* check whether init ucode should be loaded, or rather runtime ucode */
163 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
164 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
165 ret = iwlagn_load_given_ucode(priv,
166 &priv->ucode_init, &priv->ucode_init_data);
167 if (!ret) {
168 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
169 priv->ucode_type = UCODE_INIT;
170 }
171 } else {
172 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
173 "Loading runtime ucode...\n");
174 ret = iwlagn_load_given_ucode(priv,
175 &priv->ucode_code, &priv->ucode_data);
176 if (!ret) {
177 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
178 priv->ucode_type = UCODE_RT;
179 }
180 }
181
182 return ret;
183}
184
185/*
186 * Calibration
187 */
188static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
189{
190 struct iwl_calib_xtal_freq_cmd cmd;
191 __le16 *xtal_calib =
192 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
193
194 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
195 cmd.hdr.first_group = 0;
196 cmd.hdr.groups_num = 1;
197 cmd.hdr.data_valid = 1;
198 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
199 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
200 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
201 (u8 *)&cmd, sizeof(cmd));
202}
203
204static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
205{
206 struct iwl_calib_cfg_cmd calib_cfg_cmd;
207 struct iwl_host_cmd cmd = {
208 .id = CALIBRATION_CFG_CMD,
209 .len = sizeof(struct iwl_calib_cfg_cmd),
210 .data = &calib_cfg_cmd,
211 };
212
213 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
214 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
215 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
216 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
217 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
218
219 return iwl_send_cmd(priv, &cmd);
220}
221
222void iwlagn_rx_calib_result(struct iwl_priv *priv,
223 struct iwl_rx_mem_buffer *rxb)
224{
225 struct iwl_rx_packet *pkt = rxb_addr(rxb);
226 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
227 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
228 int index;
229
230 /* reduce the size of the length field itself */
231 len -= 4;
232
233 /* Define the order in which the results will be sent to the runtime
234 * uCode. iwl_send_calib_results sends them in a row according to
235 * their index. We sort them here
236 */
237 switch (hdr->op_code) {
238 case IWL_PHY_CALIBRATE_DC_CMD:
239 index = IWL_CALIB_DC;
240 break;
241 case IWL_PHY_CALIBRATE_LO_CMD:
242 index = IWL_CALIB_LO;
243 break;
244 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
245 index = IWL_CALIB_TX_IQ;
246 break;
247 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
248 index = IWL_CALIB_TX_IQ_PERD;
249 break;
250 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
251 index = IWL_CALIB_BASE_BAND;
252 break;
253 default:
254 IWL_ERR(priv, "Unknown calibration notification %d\n",
255 hdr->op_code);
256 return;
257 }
258 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
259}
260
261void iwlagn_rx_calib_complete(struct iwl_priv *priv,
262 struct iwl_rx_mem_buffer *rxb)
263{
264 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
265 queue_work(priv->workqueue, &priv->restart);
266}
267
268void iwlagn_init_alive_start(struct iwl_priv *priv)
269{
270 int ret = 0;
271
272 /* Check alive response for "valid" sign from uCode */
273 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
274 /* We had an error bringing up the hardware, so take it
275 * all the way back down so we can try again */
276 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
277 goto restart;
278 }
279
280 /* initialize uCode was loaded... verify inst image.
281 * This is a paranoid check, because we would not have gotten the
282 * "initialize" alive if code weren't properly loaded. */
283 if (iwl_verify_ucode(priv)) {
284 /* Runtime instruction load was bad;
285 * take it all the way back down so we can try again */
286 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
287 goto restart;
288 }
289
290 ret = priv->cfg->ops->lib->alive_notify(priv);
291 if (ret) {
292 IWL_WARN(priv,
293 "Could not complete ALIVE transition: %d\n", ret);
294 goto restart;
295 }
296
297 iwlagn_send_calib_cfg(priv);
298 return;
299
300restart:
301 /* real restart (first load init_ucode) */
302 queue_work(priv->workqueue, &priv->restart);
303}
304
305static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
306{
307 struct iwl_wimax_coex_cmd coex_cmd;
308
309 if (priv->cfg->support_wimax_coexist) {
310 /* UnMask wake up src at associated sleep */
311 coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
312
313 /* UnMask wake up src at unassociated sleep */
314 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
315 memcpy(coex_cmd.sta_prio, cu_priorities,
316 sizeof(struct iwl_wimax_coex_event_entry) *
317 COEX_NUM_OF_EVENTS);
318
319 /* enabling the coexistence feature */
320 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
321
322 /* enabling the priorities tables */
323 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
324 } else {
325 /* coexistence is disabled */
326 memset(&coex_cmd, 0, sizeof(coex_cmd));
327 }
328 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
329 sizeof(coex_cmd), &coex_cmd);
330}
331
332int iwlagn_alive_notify(struct iwl_priv *priv)
333{
334 u32 a;
335 unsigned long flags;
336 int i, chan;
337 u32 reg_val;
338
339 spin_lock_irqsave(&priv->lock, flags);
340
341 priv->scd_base_addr = iwl_read_prph(priv, IWLAGN_SCD_SRAM_BASE_ADDR);
342 a = priv->scd_base_addr + IWLAGN_SCD_CONTEXT_DATA_OFFSET;
343 for (; a < priv->scd_base_addr + IWLAGN_SCD_TX_STTS_BITMAP_OFFSET;
344 a += 4)
345 iwl_write_targ_mem(priv, a, 0);
346 for (; a < priv->scd_base_addr + IWLAGN_SCD_TRANSLATE_TBL_OFFSET;
347 a += 4)
348 iwl_write_targ_mem(priv, a, 0);
349 for (; a < priv->scd_base_addr +
350 IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
351 iwl_write_targ_mem(priv, a, 0);
352
353 iwl_write_prph(priv, IWLAGN_SCD_DRAM_BASE_ADDR,
354 priv->scd_bc_tbls.dma >> 10);
355
356 /* Enable DMA channel */
357 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
358 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
359 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
360 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
361
362 /* Update FH chicken bits */
363 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
364 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
365 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
366
367 iwl_write_prph(priv, IWLAGN_SCD_QUEUECHAIN_SEL,
368 IWLAGN_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
369 iwl_write_prph(priv, IWLAGN_SCD_AGGR_SEL, 0);
370
371 /* initiate the queues */
372 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
373 iwl_write_prph(priv, IWLAGN_SCD_QUEUE_RDPTR(i), 0);
374 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
375 iwl_write_targ_mem(priv, priv->scd_base_addr +
376 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
377 iwl_write_targ_mem(priv, priv->scd_base_addr +
378 IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(i) +
379 sizeof(u32),
380 ((SCD_WIN_SIZE <<
381 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
382 IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
383 ((SCD_FRAME_LIMIT <<
384 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
385 IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
386 }
387
388 iwl_write_prph(priv, IWLAGN_SCD_INTERRUPT_MASK,
389 IWL_MASK(0, priv->hw_params.max_txq_num));
390
391 /* Activate all Tx DMA/FIFO channels */
392 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
393
394 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
395
396 /* make sure all queue are not stopped */
397 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
398 for (i = 0; i < 4; i++)
399 atomic_set(&priv->queue_stop_count[i], 0);
400
401 /* reset to 0 to enable all the queue first */
402 priv->txq_ctx_active_msk = 0;
403 /* map qos queues to fifos one-to-one */
404 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
405
406 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
407 int ac = iwlagn_default_queue_to_tx_fifo[i];
408
409 iwl_txq_ctx_activate(priv, i);
410
411 if (ac == IWL_TX_FIFO_UNUSED)
412 continue;
413
414 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
415 }
416
417 spin_unlock_irqrestore(&priv->lock, flags);
418
419 iwlagn_send_wimax_coex(priv);
420
421 iwlagn_set_Xtal_calib(priv);
422 iwl_send_calib_results(priv);
423
424 return 0;
425}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 8b8e3e1cbb44..aef4f71f1981 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -55,6 +55,7 @@
55#include "iwl-helpers.h" 55#include "iwl-helpers.h"
56#include "iwl-sta.h" 56#include "iwl-sta.h"
57#include "iwl-calib.h" 57#include "iwl-calib.h"
58#include "iwl-agn.h"
58 59
59 60
60/****************************************************************************** 61/******************************************************************************
@@ -83,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
83MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
84MODULE_ALIAS("iwl4965"); 85MODULE_ALIAS("iwl4965");
85 86
86/*************** STATION TABLE MANAGEMENT ****
87 * mac80211 should be examined to determine if sta_info is duplicating
88 * the functionality provided here
89 */
90
91/**************************************************************/
92
93/** 87/**
94 * iwl_commit_rxon - commit staging_rxon to hardware 88 * iwl_commit_rxon - commit staging_rxon to hardware
95 * 89 *
@@ -144,9 +138,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
144 return 0; 138 return 0;
145 } 139 }
146 140
147 /* station table will be cleared */
148 priv->assoc_station_added = 0;
149
150 /* If we are currently associated and the new config requires 141 /* If we are currently associated and the new config requires
151 * an RXON_ASSOC and the new config wants the associated mask enabled, 142 * an RXON_ASSOC and the new config wants the associated mask enabled,
152 * we must clear the associated from the active configuration 143 * we must clear the associated from the active configuration
@@ -166,6 +157,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
166 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 157 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
167 return ret; 158 return ret;
168 } 159 }
160 iwl_clear_ucode_stations(priv);
161 iwl_restore_stations(priv);
162 ret = iwl_restore_default_wep_keys(priv);
163 if (ret) {
164 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
165 return ret;
166 }
169 } 167 }
170 168
171 IWL_DEBUG_INFO(priv, "Sending RXON\n" 169 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -179,9 +177,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
179 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 177 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
180 178
181 /* Apply the new configuration 179 /* Apply the new configuration
182 * RXON unassoc clears the station table in uCode, send it before 180 * RXON unassoc clears the station table in uCode so restoration of
183 * we add the bcast station. If assoc bit is set, we will send RXON 181 * stations is needed after it (the RXON command) completes
184 * after having added the bcast and bssid station.
185 */ 182 */
186 if (!new_assoc) { 183 if (!new_assoc) {
187 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 184 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -190,35 +187,19 @@ int iwl_commit_rxon(struct iwl_priv *priv)
190 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 187 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
191 return ret; 188 return ret;
192 } 189 }
190 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
193 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 191 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
192 iwl_clear_ucode_stations(priv);
193 iwl_restore_stations(priv);
194 ret = iwl_restore_default_wep_keys(priv);
195 if (ret) {
196 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
197 return ret;
198 }
194 } 199 }
195 200
196 iwl_clear_stations_table(priv);
197
198 priv->start_calib = 0; 201 priv->start_calib = 0;
199
200 /* Add the broadcast address so we can send broadcast frames */
201 priv->cfg->ops->lib->add_bcast_station(priv);
202
203
204 /* If we have set the ASSOC_MSK and we are in BSS mode then
205 * add the IWL_AP_ID to the station rate table */
206 if (new_assoc) { 202 if (new_assoc) {
207 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
208 ret = iwl_rxon_add_station(priv,
209 priv->active_rxon.bssid_addr, 1);
210 if (ret == IWL_INVALID_STATION) {
211 IWL_ERR(priv,
212 "Error adding AP address for TX.\n");
213 return -EIO;
214 }
215 priv->assoc_station_added = 1;
216 if (priv->default_wep_key &&
217 iwl_send_static_wepkey_cmd(priv, 0))
218 IWL_ERR(priv,
219 "Could not send WEP static key.\n");
220 }
221
222 /* 203 /*
223 * allow CTS-to-self if possible for new association. 204 * allow CTS-to-self if possible for new association.
224 * this is relevant only for 5000 series and up, 205 * this is relevant only for 5000 series and up,
@@ -907,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
907 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = 888 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
908 iwl_rx_missed_beacon_notif; 889 iwl_rx_missed_beacon_notif;
909 /* Rx handlers */ 890 /* Rx handlers */
910 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; 891 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
911 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; 892 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
912 /* block ack */ 893 /* block ack */
913 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; 894 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
914 /* Set up hardware specific Rx handlers */ 895 /* Set up hardware specific Rx handlers */
915 priv->cfg->ops->lib->rx_handler_setup(priv); 896 priv->cfg->ops->lib->rx_handler_setup(priv);
916} 897}
@@ -1038,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1038 count++; 1019 count++;
1039 if (count >= 8) { 1020 if (count >= 8) {
1040 rxq->read = i; 1021 rxq->read = i;
1041 iwl_rx_replenish_now(priv); 1022 iwlagn_rx_replenish_now(priv);
1042 count = 0; 1023 count = 0;
1043 } 1024 }
1044 } 1025 }
@@ -1047,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
1047 /* Backtrack one entry */ 1028 /* Backtrack one entry */
1048 rxq->read = i; 1029 rxq->read = i;
1049 if (fill_rx) 1030 if (fill_rx)
1050 iwl_rx_replenish_now(priv); 1031 iwlagn_rx_replenish_now(priv);
1051 else 1032 else
1052 iwl_rx_queue_restock(priv); 1033 iwlagn_rx_queue_restock(priv);
1053} 1034}
1054 1035
1055/* call this function to flush any scheduled tasklet */ 1036/* call this function to flush any scheduled tasklet */
@@ -1267,9 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1267 * hardware bugs here by ACKing all the possible interrupts so that 1248 * hardware bugs here by ACKing all the possible interrupts so that
1268 * interrupt coalescing can still be achieved. 1249 * interrupt coalescing can still be achieved.
1269 */ 1250 */
1270 iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); 1251 iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
1271 1252
1272 inta = priv->inta; 1253 inta = priv->_agn.inta;
1273 1254
1274#ifdef CONFIG_IWLWIFI_DEBUG 1255#ifdef CONFIG_IWLWIFI_DEBUG
1275 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1256 if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
@@ -1282,8 +1263,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1282 1263
1283 spin_unlock_irqrestore(&priv->lock, flags); 1264 spin_unlock_irqrestore(&priv->lock, flags);
1284 1265
1285 /* saved interrupt in inta variable now we can reset priv->inta */ 1266 /* saved interrupt in inta variable now we can reset priv->_agn.inta */
1286 priv->inta = 0; 1267 priv->_agn.inta = 0;
1287 1268
1288 /* Now service all interrupt bits discovered above. */ 1269 /* Now service all interrupt bits discovered above. */
1289 if (inta & CSR_INT_BIT_HW_ERR) { 1270 if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1448,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1448 iwl_enable_interrupts(priv); 1429 iwl_enable_interrupts(priv);
1449} 1430}
1450 1431
1432/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
1433#define ACK_CNT_RATIO (50)
1434#define BA_TIMEOUT_CNT (5)
1435#define BA_TIMEOUT_MAX (16)
1436
1437/**
1438 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
1439 *
1440 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
1441 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
1442 * operation state.
1443 */
1444bool iwl_good_ack_health(struct iwl_priv *priv,
1445 struct iwl_rx_packet *pkt)
1446{
1447 bool rc = true;
1448 int actual_ack_cnt_delta, expected_ack_cnt_delta;
1449 int ba_timeout_delta;
1450
1451 actual_ack_cnt_delta =
1452 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
1453 le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
1454 expected_ack_cnt_delta =
1455 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
1456 le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
1457 ba_timeout_delta =
1458 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
1459 le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
1460 if ((priv->_agn.agg_tids_count > 0) &&
1461 (expected_ack_cnt_delta > 0) &&
1462 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
1463 < ACK_CNT_RATIO) &&
1464 (ba_timeout_delta > BA_TIMEOUT_CNT)) {
1465 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
1466 " expected_ack_cnt = %d\n",
1467 actual_ack_cnt_delta, expected_ack_cnt_delta);
1468
1469#ifdef CONFIG_IWLWIFI_DEBUG
1470 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
1471 priv->delta_statistics.tx.rx_detected_cnt);
1472 IWL_DEBUG_RADIO(priv,
1473 "ack_or_ba_timeout_collision delta = %d\n",
1474 priv->delta_statistics.tx.
1475 ack_or_ba_timeout_collision);
1476#endif
1477 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
1478 ba_timeout_delta);
1479 if (!actual_ack_cnt_delta &&
1480 (ba_timeout_delta >= BA_TIMEOUT_MAX))
1481 rc = false;
1482 }
1483 return rc;
1484}
1485
1451 1486
1452/****************************************************************************** 1487/******************************************************************************
1453 * 1488 *
@@ -1471,9 +1506,13 @@ static void iwl_nic_start(struct iwl_priv *priv)
1471 iwl_write32(priv, CSR_RESET, 0); 1506 iwl_write32(priv, CSR_RESET, 0);
1472} 1507}
1473 1508
1509struct iwlagn_ucode_capabilities {
1510 u32 max_probe_length;
1511};
1474 1512
1475static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context); 1513static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
1476static int iwl_mac_setup_register(struct iwl_priv *priv); 1514static int iwl_mac_setup_register(struct iwl_priv *priv,
1515 struct iwlagn_ucode_capabilities *capa);
1477 1516
1478static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first) 1517static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
1479{ 1518{
@@ -1500,6 +1539,199 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
1500 iwl_ucode_callback); 1539 iwl_ucode_callback);
1501} 1540}
1502 1541
1542struct iwlagn_firmware_pieces {
1543 const void *inst, *data, *init, *init_data, *boot;
1544 size_t inst_size, data_size, init_size, init_data_size, boot_size;
1545
1546 u32 build;
1547};
1548
1549static int iwlagn_load_legacy_firmware(struct iwl_priv *priv,
1550 const struct firmware *ucode_raw,
1551 struct iwlagn_firmware_pieces *pieces)
1552{
1553 struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
1554 u32 api_ver, hdr_size;
1555 const u8 *src;
1556
1557 priv->ucode_ver = le32_to_cpu(ucode->ver);
1558 api_ver = IWL_UCODE_API(priv->ucode_ver);
1559
1560 switch (api_ver) {
1561 default:
1562 /*
1563 * 4965 doesn't revision the firmware file format
1564 * along with the API version, it always uses v1
1565 * file format.
1566 */
1567 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) !=
1568 CSR_HW_REV_TYPE_4965) {
1569 hdr_size = 28;
1570 if (ucode_raw->size < hdr_size) {
1571 IWL_ERR(priv, "File size too small!\n");
1572 return -EINVAL;
1573 }
1574 pieces->build = le32_to_cpu(ucode->u.v2.build);
1575 pieces->inst_size = le32_to_cpu(ucode->u.v2.inst_size);
1576 pieces->data_size = le32_to_cpu(ucode->u.v2.data_size);
1577 pieces->init_size = le32_to_cpu(ucode->u.v2.init_size);
1578 pieces->init_data_size = le32_to_cpu(ucode->u.v2.init_data_size);
1579 pieces->boot_size = le32_to_cpu(ucode->u.v2.boot_size);
1580 src = ucode->u.v2.data;
1581 break;
1582 }
1583 /* fall through for 4965 */
1584 case 0:
1585 case 1:
1586 case 2:
1587 hdr_size = 24;
1588 if (ucode_raw->size < hdr_size) {
1589 IWL_ERR(priv, "File size too small!\n");
1590 return -EINVAL;
1591 }
1592 pieces->build = 0;
1593 pieces->inst_size = le32_to_cpu(ucode->u.v1.inst_size);
1594 pieces->data_size = le32_to_cpu(ucode->u.v1.data_size);
1595 pieces->init_size = le32_to_cpu(ucode->u.v1.init_size);
1596 pieces->init_data_size = le32_to_cpu(ucode->u.v1.init_data_size);
1597 pieces->boot_size = le32_to_cpu(ucode->u.v1.boot_size);
1598 src = ucode->u.v1.data;
1599 break;
1600 }
1601
1602 /* Verify size of file vs. image size info in file's header */
1603 if (ucode_raw->size != hdr_size + pieces->inst_size +
1604 pieces->data_size + pieces->init_size +
1605 pieces->init_data_size + pieces->boot_size) {
1606
1607 IWL_ERR(priv,
1608 "uCode file size %d does not match expected size\n",
1609 (int)ucode_raw->size);
1610 return -EINVAL;
1611 }
1612
1613 pieces->inst = src;
1614 src += pieces->inst_size;
1615 pieces->data = src;
1616 src += pieces->data_size;
1617 pieces->init = src;
1618 src += pieces->init_size;
1619 pieces->init_data = src;
1620 src += pieces->init_data_size;
1621 pieces->boot = src;
1622 src += pieces->boot_size;
1623
1624 return 0;
1625}
1626
1627static int iwlagn_wanted_ucode_alternative = 1;
1628
1629static int iwlagn_load_firmware(struct iwl_priv *priv,
1630 const struct firmware *ucode_raw,
1631 struct iwlagn_firmware_pieces *pieces,
1632 struct iwlagn_ucode_capabilities *capa)
1633{
1634 struct iwl_tlv_ucode_header *ucode = (void *)ucode_raw->data;
1635 struct iwl_ucode_tlv *tlv;
1636 size_t len = ucode_raw->size;
1637 const u8 *data;
1638 int wanted_alternative = iwlagn_wanted_ucode_alternative, tmp;
1639 u64 alternatives;
1640
1641 if (len < sizeof(*ucode))
1642 return -EINVAL;
1643
1644 if (ucode->magic != cpu_to_le32(IWL_TLV_UCODE_MAGIC))
1645 return -EINVAL;
1646
1647 /*
1648 * Check which alternatives are present, and "downgrade"
1649 * when the chosen alternative is not present, warning
1650 * the user when that happens. Some files may not have
1651 * any alternatives, so don't warn in that case.
1652 */
1653 alternatives = le64_to_cpu(ucode->alternatives);
1654 tmp = wanted_alternative;
1655 if (wanted_alternative > 63)
1656 wanted_alternative = 63;
1657 while (wanted_alternative && !(alternatives & BIT(wanted_alternative)))
1658 wanted_alternative--;
1659 if (wanted_alternative && wanted_alternative != tmp)
1660 IWL_WARN(priv,
1661 "uCode alternative %d not available, choosing %d\n",
1662 tmp, wanted_alternative);
1663
1664 priv->ucode_ver = le32_to_cpu(ucode->ver);
1665 pieces->build = le32_to_cpu(ucode->build);
1666 data = ucode->data;
1667
1668 len -= sizeof(*ucode);
1669
1670 while (len >= sizeof(*tlv)) {
1671 u32 tlv_len;
1672 enum iwl_ucode_tlv_type tlv_type;
1673 u16 tlv_alt;
1674 const u8 *tlv_data;
1675
1676 len -= sizeof(*tlv);
1677 tlv = (void *)data;
1678
1679 tlv_len = le32_to_cpu(tlv->length);
1680 tlv_type = le16_to_cpu(tlv->type);
1681 tlv_alt = le16_to_cpu(tlv->alternative);
1682 tlv_data = tlv->data;
1683
1684 if (len < tlv_len)
1685 return -EINVAL;
1686 len -= ALIGN(tlv_len, 4);
1687 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
1688
1689 /*
1690 * Alternative 0 is always valid.
1691 *
1692 * Skip alternative TLVs that are not selected.
1693 */
1694 if (tlv_alt != 0 && tlv_alt != wanted_alternative)
1695 continue;
1696
1697 switch (tlv_type) {
1698 case IWL_UCODE_TLV_INST:
1699 pieces->inst = tlv_data;
1700 pieces->inst_size = tlv_len;
1701 break;
1702 case IWL_UCODE_TLV_DATA:
1703 pieces->data = tlv_data;
1704 pieces->data_size = tlv_len;
1705 break;
1706 case IWL_UCODE_TLV_INIT:
1707 pieces->init = tlv_data;
1708 pieces->init_size = tlv_len;
1709 break;
1710 case IWL_UCODE_TLV_INIT_DATA:
1711 pieces->init_data = tlv_data;
1712 pieces->init_data_size = tlv_len;
1713 break;
1714 case IWL_UCODE_TLV_BOOT:
1715 pieces->boot = tlv_data;
1716 pieces->boot_size = tlv_len;
1717 break;
1718 case IWL_UCODE_TLV_PROBE_MAX_LEN:
1719 if (tlv_len != 4)
1720 return -EINVAL;
1721 capa->max_probe_length =
1722 le32_to_cpup((__le32 *)tlv_data);
1723 break;
1724 default:
1725 break;
1726 }
1727 }
1728
1729 if (len)
1730 return -EINVAL;
1731
1732 return 0;
1733}
1734
1503/** 1735/**
1504 * iwl_ucode_callback - callback when firmware was loaded 1736 * iwl_ucode_callback - callback when firmware was loaded
1505 * 1737 *
@@ -1510,14 +1742,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1510{ 1742{
1511 struct iwl_priv *priv = context; 1743 struct iwl_priv *priv = context;
1512 struct iwl_ucode_header *ucode; 1744 struct iwl_ucode_header *ucode;
1745 int err;
1746 struct iwlagn_firmware_pieces pieces;
1513 const unsigned int api_max = priv->cfg->ucode_api_max; 1747 const unsigned int api_max = priv->cfg->ucode_api_max;
1514 const unsigned int api_min = priv->cfg->ucode_api_min; 1748 const unsigned int api_min = priv->cfg->ucode_api_min;
1515 u8 *src; 1749 u32 api_ver;
1516 size_t len; 1750 char buildstr[25];
1517 u32 api_ver, build; 1751 u32 build;
1518 u32 inst_size, data_size, init_size, init_data_size, boot_size; 1752 struct iwlagn_ucode_capabilities ucode_capa = {
1519 int err; 1753 .max_probe_length = 200,
1520 u16 eeprom_ver; 1754 };
1755
1756 memset(&pieces, 0, sizeof(pieces));
1521 1757
1522 if (!ucode_raw) { 1758 if (!ucode_raw) {
1523 IWL_ERR(priv, "request for firmware file '%s' failed.\n", 1759 IWL_ERR(priv, "request for firmware file '%s' failed.\n",
@@ -1528,8 +1764,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1528 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n", 1764 IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
1529 priv->firmware_name, ucode_raw->size); 1765 priv->firmware_name, ucode_raw->size);
1530 1766
1531 /* Make sure that we got at least the v1 header! */ 1767 /* Make sure that we got at least the API version number */
1532 if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { 1768 if (ucode_raw->size < 4) {
1533 IWL_ERR(priv, "File size way too small!\n"); 1769 IWL_ERR(priv, "File size way too small!\n");
1534 goto try_again; 1770 goto try_again;
1535 } 1771 }
@@ -1537,21 +1773,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1537 /* Data from ucode file: header followed by uCode images */ 1773 /* Data from ucode file: header followed by uCode images */
1538 ucode = (struct iwl_ucode_header *)ucode_raw->data; 1774 ucode = (struct iwl_ucode_header *)ucode_raw->data;
1539 1775
1540 priv->ucode_ver = le32_to_cpu(ucode->ver); 1776 if (ucode->ver)
1777 err = iwlagn_load_legacy_firmware(priv, ucode_raw, &pieces);
1778 else
1779 err = iwlagn_load_firmware(priv, ucode_raw, &pieces,
1780 &ucode_capa);
1781
1782 if (err)
1783 goto try_again;
1784
1541 api_ver = IWL_UCODE_API(priv->ucode_ver); 1785 api_ver = IWL_UCODE_API(priv->ucode_ver);
1542 build = priv->cfg->ops->ucode->get_build(ucode, api_ver); 1786 build = pieces.build;
1543 inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver);
1544 data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver);
1545 init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver);
1546 init_data_size =
1547 priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver);
1548 boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver);
1549 src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
1550
1551 /* api_ver should match the api version forming part of the
1552 * firmware filename ... but we don't check for that and only rely
1553 * on the API version read from firmware header from here on forward */
1554 1787
1788 /*
1789 * api_ver should match the api version forming part of the
1790 * firmware filename ... but we don't check for that and only rely
1791 * on the API version read from firmware header from here on forward
1792 */
1555 if (api_ver < api_min || api_ver > api_max) { 1793 if (api_ver < api_min || api_ver > api_max) {
1556 IWL_ERR(priv, "Driver unable to support your firmware API. " 1794 IWL_ERR(priv, "Driver unable to support your firmware API. "
1557 "Driver supports v%u, firmware is v%u.\n", 1795 "Driver supports v%u, firmware is v%u.\n",
@@ -1565,40 +1803,26 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1565 "from http://www.intellinuxwireless.org.\n", 1803 "from http://www.intellinuxwireless.org.\n",
1566 api_max, api_ver); 1804 api_max, api_ver);
1567 1805
1568 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n", 1806 if (build)
1569 IWL_UCODE_MAJOR(priv->ucode_ver), 1807 sprintf(buildstr, " build %u", build);
1570 IWL_UCODE_MINOR(priv->ucode_ver), 1808 else
1571 IWL_UCODE_API(priv->ucode_ver), 1809 buildstr[0] = '\0';
1572 IWL_UCODE_SERIAL(priv->ucode_ver)); 1810
1811 IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u%s\n",
1812 IWL_UCODE_MAJOR(priv->ucode_ver),
1813 IWL_UCODE_MINOR(priv->ucode_ver),
1814 IWL_UCODE_API(priv->ucode_ver),
1815 IWL_UCODE_SERIAL(priv->ucode_ver),
1816 buildstr);
1573 1817
1574 snprintf(priv->hw->wiphy->fw_version, 1818 snprintf(priv->hw->wiphy->fw_version,
1575 sizeof(priv->hw->wiphy->fw_version), 1819 sizeof(priv->hw->wiphy->fw_version),
1576 "%u.%u.%u.%u", 1820 "%u.%u.%u.%u%s",
1577 IWL_UCODE_MAJOR(priv->ucode_ver), 1821 IWL_UCODE_MAJOR(priv->ucode_ver),
1578 IWL_UCODE_MINOR(priv->ucode_ver), 1822 IWL_UCODE_MINOR(priv->ucode_ver),
1579 IWL_UCODE_API(priv->ucode_ver), 1823 IWL_UCODE_API(priv->ucode_ver),
1580 IWL_UCODE_SERIAL(priv->ucode_ver)); 1824 IWL_UCODE_SERIAL(priv->ucode_ver),
1581 1825 buildstr);
1582 if (build)
1583 IWL_DEBUG_INFO(priv, "Build %u\n", build);
1584
1585 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
1586 IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
1587 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
1588 ? "OTP" : "EEPROM", eeprom_ver);
1589
1590 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1591 priv->ucode_ver);
1592 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
1593 inst_size);
1594 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
1595 data_size);
1596 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
1597 init_size);
1598 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
1599 init_data_size);
1600 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
1601 boot_size);
1602 1826
1603 /* 1827 /*
1604 * For any of the failures below (before allocating pci memory) 1828 * For any of the failures below (before allocating pci memory)
@@ -1606,43 +1830,47 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1606 * user just got a corrupted version of the latest API. 1830 * user just got a corrupted version of the latest API.
1607 */ 1831 */
1608 1832
1609 /* Verify size of file vs. image size info in file's header */ 1833 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
1610 if (ucode_raw->size != 1834 priv->ucode_ver);
1611 priv->cfg->ops->ucode->get_header_size(api_ver) + 1835 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
1612 inst_size + data_size + init_size + 1836 pieces.inst_size);
1613 init_data_size + boot_size) { 1837 IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
1614 1838 pieces.data_size);
1615 IWL_DEBUG_INFO(priv, 1839 IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
1616 "uCode file size %d does not match expected size\n", 1840 pieces.init_size);
1617 (int)ucode_raw->size); 1841 IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
1618 goto try_again; 1842 pieces.init_data_size);
1619 } 1843 IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
1844 pieces.boot_size);
1620 1845
1621 /* Verify that uCode images will fit in card's SRAM */ 1846 /* Verify that uCode images will fit in card's SRAM */
1622 if (inst_size > priv->hw_params.max_inst_size) { 1847 if (pieces.inst_size > priv->hw_params.max_inst_size) {
1623 IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n", 1848 IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
1624 inst_size); 1849 pieces.inst_size);
1625 goto try_again; 1850 goto try_again;
1626 } 1851 }
1627 1852
1628 if (data_size > priv->hw_params.max_data_size) { 1853 if (pieces.data_size > priv->hw_params.max_data_size) {
1629 IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n", 1854 IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
1630 data_size); 1855 pieces.data_size);
1631 goto try_again; 1856 goto try_again;
1632 } 1857 }
1633 if (init_size > priv->hw_params.max_inst_size) { 1858
1634 IWL_INFO(priv, "uCode init instr len %d too large to fit in\n", 1859 if (pieces.init_size > priv->hw_params.max_inst_size) {
1635 init_size); 1860 IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
1861 pieces.init_size);
1636 goto try_again; 1862 goto try_again;
1637 } 1863 }
1638 if (init_data_size > priv->hw_params.max_data_size) { 1864
1639 IWL_INFO(priv, "uCode init data len %d too large to fit in\n", 1865 if (pieces.init_data_size > priv->hw_params.max_data_size) {
1640 init_data_size); 1866 IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
1867 pieces.init_data_size);
1641 goto try_again; 1868 goto try_again;
1642 } 1869 }
1643 if (boot_size > priv->hw_params.max_bsm_size) { 1870
1644 IWL_INFO(priv, "uCode boot instr len %d too large to fit in\n", 1871 if (pieces.boot_size > priv->hw_params.max_bsm_size) {
1645 boot_size); 1872 IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
1873 pieces.boot_size);
1646 goto try_again; 1874 goto try_again;
1647 } 1875 }
1648 1876
@@ -1651,13 +1879,13 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1651 /* Runtime instructions and 2 copies of data: 1879 /* Runtime instructions and 2 copies of data:
1652 * 1) unmodified from disk 1880 * 1) unmodified from disk
1653 * 2) backup cache for save/restore during power-downs */ 1881 * 2) backup cache for save/restore during power-downs */
1654 priv->ucode_code.len = inst_size; 1882 priv->ucode_code.len = pieces.inst_size;
1655 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code); 1883 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
1656 1884
1657 priv->ucode_data.len = data_size; 1885 priv->ucode_data.len = pieces.data_size;
1658 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data); 1886 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
1659 1887
1660 priv->ucode_data_backup.len = data_size; 1888 priv->ucode_data_backup.len = pieces.data_size;
1661 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup); 1889 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
1662 1890
1663 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr || 1891 if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
@@ -1665,11 +1893,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1665 goto err_pci_alloc; 1893 goto err_pci_alloc;
1666 1894
1667 /* Initialization instructions and data */ 1895 /* Initialization instructions and data */
1668 if (init_size && init_data_size) { 1896 if (pieces.init_size && pieces.init_data_size) {
1669 priv->ucode_init.len = init_size; 1897 priv->ucode_init.len = pieces.init_size;
1670 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init); 1898 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
1671 1899
1672 priv->ucode_init_data.len = init_data_size; 1900 priv->ucode_init_data.len = pieces.init_data_size;
1673 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data); 1901 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
1674 1902
1675 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr) 1903 if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
@@ -1677,8 +1905,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1677 } 1905 }
1678 1906
1679 /* Bootstrap (instructions only, no data) */ 1907 /* Bootstrap (instructions only, no data) */
1680 if (boot_size) { 1908 if (pieces.boot_size) {
1681 priv->ucode_boot.len = boot_size; 1909 priv->ucode_boot.len = pieces.boot_size;
1682 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot); 1910 iwl_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
1683 1911
1684 if (!priv->ucode_boot.v_addr) 1912 if (!priv->ucode_boot.v_addr)
@@ -1688,51 +1916,48 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1688 /* Copy images into buffers for card's bus-master reads ... */ 1916 /* Copy images into buffers for card's bus-master reads ... */
1689 1917
1690 /* Runtime instructions (first block of data in file) */ 1918 /* Runtime instructions (first block of data in file) */
1691 len = inst_size; 1919 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
1692 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n", len); 1920 pieces.inst_size);
1693 memcpy(priv->ucode_code.v_addr, src, len); 1921 memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
1694 src += len;
1695 1922
1696 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n", 1923 IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
1697 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr); 1924 priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
1698 1925
1699 /* Runtime data (2nd block) 1926 /*
1700 * NOTE: Copy into backup buffer will be done in iwl_up() */ 1927 * Runtime data
1701 len = data_size; 1928 * NOTE: Copy into backup buffer will be done in iwl_up()
1702 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n", len); 1929 */
1703 memcpy(priv->ucode_data.v_addr, src, len); 1930 IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
1704 memcpy(priv->ucode_data_backup.v_addr, src, len); 1931 pieces.data_size);
1705 src += len; 1932 memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
1706 1933 memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
1707 /* Initialization instructions (3rd block) */ 1934
1708 if (init_size) { 1935 /* Initialization instructions */
1709 len = init_size; 1936 if (pieces.init_size) {
1710 IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n", 1937 IWL_DEBUG_INFO(priv, "Copying (but not loading) init instr len %Zd\n",
1711 len); 1938 pieces.init_size);
1712 memcpy(priv->ucode_init.v_addr, src, len); 1939 memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
1713 src += len;
1714 } 1940 }
1715 1941
1716 /* Initialization data (4th block) */ 1942 /* Initialization data */
1717 if (init_data_size) { 1943 if (pieces.init_data_size) {
1718 len = init_data_size;
1719 IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n", 1944 IWL_DEBUG_INFO(priv, "Copying (but not loading) init data len %Zd\n",
1720 len); 1945 pieces.init_data_size);
1721 memcpy(priv->ucode_init_data.v_addr, src, len); 1946 memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
1722 src += len; 1947 pieces.init_data_size);
1723 } 1948 }
1724 1949
1725 /* Bootstrap instructions (5th block) */ 1950 /* Bootstrap instructions */
1726 len = boot_size; 1951 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
1727 IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n", len); 1952 pieces.boot_size);
1728 memcpy(priv->ucode_boot.v_addr, src, len); 1953 memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
1729 1954
1730 /************************************************** 1955 /**************************************************
1731 * This is still part of probe() in a sense... 1956 * This is still part of probe() in a sense...
1732 * 1957 *
1733 * 9. Setup and register with mac80211 and debugfs 1958 * 9. Setup and register with mac80211 and debugfs
1734 **************************************************/ 1959 **************************************************/
1735 err = iwl_mac_setup_register(priv); 1960 err = iwl_mac_setup_register(priv, &ucode_capa);
1736 if (err) 1961 if (err)
1737 goto out_unbind; 1962 goto out_unbind;
1738 1963
@@ -1742,6 +1967,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1742 1967
1743 /* We have our copies now, allow OS release its copies */ 1968 /* We have our copies now, allow OS release its copies */
1744 release_firmware(ucode_raw); 1969 release_firmware(ucode_raw);
1970 complete(&priv->_agn.firmware_loading_complete);
1745 return; 1971 return;
1746 1972
1747 try_again: 1973 try_again:
@@ -1755,6 +1981,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
1755 IWL_ERR(priv, "failed to allocate pci memory\n"); 1981 IWL_ERR(priv, "failed to allocate pci memory\n");
1756 iwl_dealloc_ucode_pci(priv); 1982 iwl_dealloc_ucode_pci(priv);
1757 out_unbind: 1983 out_unbind:
1984 complete(&priv->_agn.firmware_loading_complete);
1758 device_release_driver(&priv->pci_dev->dev); 1985 device_release_driver(&priv->pci_dev->dev);
1759 release_firmware(ucode_raw); 1986 release_firmware(ucode_raw);
1760} 1987}
@@ -1809,6 +2036,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1809 u32 data2, line; 2036 u32 data2, line;
1810 u32 desc, time, count, base, data1; 2037 u32 desc, time, count, base, data1;
1811 u32 blink1, blink2, ilink1, ilink2; 2038 u32 blink1, blink2, ilink1, ilink2;
2039 u32 pc, hcmd;
1812 2040
1813 if (priv->ucode_type == UCODE_INIT) 2041 if (priv->ucode_type == UCODE_INIT)
1814 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 2042 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1831,6 +2059,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1831 } 2059 }
1832 2060
1833 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 2061 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
2062 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
1834 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 2063 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1835 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 2064 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1836 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); 2065 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@@ -1839,6 +2068,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1839 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); 2068 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1840 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 2069 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1841 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 2070 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
2071 hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
1842 2072
1843 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 2073 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
1844 blink1, blink2, ilink1, ilink2); 2074 blink1, blink2, ilink1, ilink2);
@@ -1847,10 +2077,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1847 "data1 data2 line\n"); 2077 "data1 data2 line\n");
1848 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 2078 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1849 desc_lookup(desc), desc, time, data1, data2, line); 2079 desc_lookup(desc), desc, time, data1, data2, line);
1850 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); 2080 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1851 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, 2081 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1852 ilink1, ilink2); 2082 pc, blink1, blink2, ilink1, ilink2, hcmd);
1853
1854} 2083}
1855 2084
1856#define EVENT_START_OFFSET (4 * sizeof(u32)) 2085#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -1966,9 +2195,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1966 return pos; 2195 return pos;
1967} 2196}
1968 2197
1969/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1970#define MAX_EVENT_LOG_SIZE (512)
1971
1972#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 2198#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1973 2199
1974int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 2200int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -2001,16 +2227,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2001 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 2227 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
2002 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 2228 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
2003 2229
2004 if (capacity > MAX_EVENT_LOG_SIZE) { 2230 if (capacity > priv->cfg->max_event_log_size) {
2005 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 2231 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
2006 capacity, MAX_EVENT_LOG_SIZE); 2232 capacity, priv->cfg->max_event_log_size);
2007 capacity = MAX_EVENT_LOG_SIZE; 2233 capacity = priv->cfg->max_event_log_size;
2008 } 2234 }
2009 2235
2010 if (next_entry > MAX_EVENT_LOG_SIZE) { 2236 if (next_entry > priv->cfg->max_event_log_size) {
2011 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 2237 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
2012 next_entry, MAX_EVENT_LOG_SIZE); 2238 next_entry, priv->cfg->max_event_log_size);
2013 next_entry = MAX_EVENT_LOG_SIZE; 2239 next_entry = priv->cfg->max_event_log_size;
2014 } 2240 }
2015 2241
2016 size = num_wraps ? capacity : next_entry; 2242 size = num_wraps ? capacity : next_entry;
@@ -2095,7 +2321,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2095 goto restart; 2321 goto restart;
2096 } 2322 }
2097 2323
2098 iwl_clear_stations_table(priv);
2099 ret = priv->cfg->ops->lib->alive_notify(priv); 2324 ret = priv->cfg->ops->lib->alive_notify(priv);
2100 if (ret) { 2325 if (ret) {
2101 IWL_WARN(priv, 2326 IWL_WARN(priv,
@@ -2106,13 +2331,19 @@ static void iwl_alive_start(struct iwl_priv *priv)
2106 /* After the ALIVE response, we can send host commands to the uCode */ 2331 /* After the ALIVE response, we can send host commands to the uCode */
2107 set_bit(STATUS_ALIVE, &priv->status); 2332 set_bit(STATUS_ALIVE, &priv->status);
2108 2333
2334 if (priv->cfg->ops->lib->recover_from_tx_stall) {
2335 /* Enable timer to monitor the driver queues */
2336 mod_timer(&priv->monitor_recover,
2337 jiffies +
2338 msecs_to_jiffies(priv->cfg->monitor_recover_period));
2339 }
2340
2109 if (iwl_is_rfkill(priv)) 2341 if (iwl_is_rfkill(priv))
2110 return; 2342 return;
2111 2343
2112 ieee80211_wake_queues(priv->hw); 2344 ieee80211_wake_queues(priv->hw);
2113 2345
2114 priv->active_rate = priv->rates_mask; 2346 priv->active_rate = IWL_RATES_MASK;
2115 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2116 2347
2117 /* Configure Tx antenna selection based on H/W config */ 2348 /* Configure Tx antenna selection based on H/W config */
2118 if (priv->cfg->ops->hcmd->set_tx_ant) 2349 if (priv->cfg->ops->hcmd->set_tx_ant)
@@ -2126,7 +2357,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2126 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2357 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2127 } else { 2358 } else {
2128 /* Initialize our rx_config data */ 2359 /* Initialize our rx_config data */
2129 iwl_connection_init_rx_config(priv, priv->iw_mode); 2360 iwl_connection_init_rx_config(priv, NULL);
2130 2361
2131 if (priv->cfg->ops->hcmd->set_rxon_chain) 2362 if (priv->cfg->ops->hcmd->set_rxon_chain)
2132 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2363 priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2135,7 +2366,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
2135 } 2366 }
2136 2367
2137 /* Configure Bluetooth device coexistence support */ 2368 /* Configure Bluetooth device coexistence support */
2138 iwl_send_bt_config(priv); 2369 priv->cfg->ops->hcmd->send_bt_config(priv);
2139 2370
2140 iwl_reset_run_time_calib(priv); 2371 iwl_reset_run_time_calib(priv);
2141 2372
@@ -2152,18 +2383,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
2152 wake_up_interruptible(&priv->wait_command_queue); 2383 wake_up_interruptible(&priv->wait_command_queue);
2153 2384
2154 iwl_power_update_mode(priv, true); 2385 iwl_power_update_mode(priv, true);
2386 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2155 2387
2156 /* reassociate for ADHOC mode */
2157 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
2158 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
2159 priv->vif);
2160 if (beacon)
2161 iwl_mac_beacon_update(priv->hw, beacon);
2162 }
2163
2164
2165 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2166 iwl_set_mode(priv, priv->iw_mode);
2167 2388
2168 return; 2389 return;
2169 2390
@@ -2183,7 +2404,9 @@ static void __iwl_down(struct iwl_priv *priv)
2183 if (!exit_pending) 2404 if (!exit_pending)
2184 set_bit(STATUS_EXIT_PENDING, &priv->status); 2405 set_bit(STATUS_EXIT_PENDING, &priv->status);
2185 2406
2186 iwl_clear_stations_table(priv); 2407 iwl_clear_ucode_stations(priv);
2408 iwl_dealloc_bcast_station(priv);
2409 iwl_clear_driver_stations(priv);
2187 2410
2188 /* Unblock any waiting calls */ 2411 /* Unblock any waiting calls */
2189 wake_up_interruptible_all(&priv->wait_command_queue); 2412 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2231,8 +2454,8 @@ static void __iwl_down(struct iwl_priv *priv)
2231 /* device going down, Stop using ICT table */ 2454 /* device going down, Stop using ICT table */
2232 iwl_disable_ict(priv); 2455 iwl_disable_ict(priv);
2233 2456
2234 iwl_txq_ctx_stop(priv); 2457 iwlagn_txq_ctx_stop(priv);
2235 iwl_rxq_stop(priv); 2458 iwlagn_rxq_stop(priv);
2236 2459
2237 /* Power-down device's busmaster DMA clocks */ 2460 /* Power-down device's busmaster DMA clocks */
2238 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2461 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -2292,7 +2515,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2292{ 2515{
2293 int ret = 0; 2516 int ret = 0;
2294 2517
2295 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); 2518 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
2296 2519
2297 ret = iwl_set_hw_ready(priv); 2520 ret = iwl_set_hw_ready(priv);
2298 if (priv->hw_ready) 2521 if (priv->hw_ready)
@@ -2330,6 +2553,10 @@ static int __iwl_up(struct iwl_priv *priv)
2330 return -EIO; 2553 return -EIO;
2331 } 2554 }
2332 2555
2556 ret = iwl_alloc_bcast_station(priv, true);
2557 if (ret)
2558 return ret;
2559
2333 iwl_prepare_card_hw(priv); 2560 iwl_prepare_card_hw(priv);
2334 2561
2335 if (!priv->hw_ready) { 2562 if (!priv->hw_ready) {
@@ -2353,7 +2580,7 @@ static int __iwl_up(struct iwl_priv *priv)
2353 2580
2354 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2581 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2355 2582
2356 ret = iwl_hw_nic_init(priv); 2583 ret = iwlagn_hw_nic_init(priv);
2357 if (ret) { 2584 if (ret) {
2358 IWL_ERR(priv, "Unable to init nic\n"); 2585 IWL_ERR(priv, "Unable to init nic\n");
2359 return ret; 2586 return ret;
@@ -2380,8 +2607,6 @@ static int __iwl_up(struct iwl_priv *priv)
2380 2607
2381 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2608 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2382 2609
2383 iwl_clear_stations_table(priv);
2384
2385 /* load bootstrap state machine, 2610 /* load bootstrap state machine,
2386 * load bootstrap program into processor's memory, 2611 * load bootstrap program into processor's memory,
2387 * prepare to load the "initialize" uCode */ 2612 * prepare to load the "initialize" uCode */
@@ -2467,7 +2692,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
2467 } 2692 }
2468 2693
2469 mutex_unlock(&priv->mutex); 2694 mutex_unlock(&priv->mutex);
2470 return;
2471} 2695}
2472 2696
2473static void iwl_bg_restart(struct work_struct *data) 2697static void iwl_bg_restart(struct work_struct *data)
@@ -2505,34 +2729,28 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
2505 return; 2729 return;
2506 2730
2507 mutex_lock(&priv->mutex); 2731 mutex_lock(&priv->mutex);
2508 iwl_rx_replenish(priv); 2732 iwlagn_rx_replenish(priv);
2509 mutex_unlock(&priv->mutex); 2733 mutex_unlock(&priv->mutex);
2510} 2734}
2511 2735
2512#define IWL_DELAY_NEXT_SCAN (HZ*2) 2736#define IWL_DELAY_NEXT_SCAN (HZ*2)
2513 2737
2514void iwl_post_associate(struct iwl_priv *priv) 2738void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
2515{ 2739{
2516 struct ieee80211_conf *conf = NULL; 2740 struct ieee80211_conf *conf = NULL;
2517 int ret = 0; 2741 int ret = 0;
2518 unsigned long flags;
2519 2742
2520 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2743 if (!vif || !priv->is_open)
2744 return;
2745
2746 if (vif->type == NL80211_IFTYPE_AP) {
2521 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 2747 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2522 return; 2748 return;
2523 } 2749 }
2524 2750
2525 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2526 priv->assoc_id, priv->active_rxon.bssid_addr);
2527
2528
2529 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2751 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2530 return; 2752 return;
2531 2753
2532
2533 if (!priv->vif || !priv->is_open)
2534 return;
2535
2536 iwl_scan_cancel_timeout(priv, 200); 2754 iwl_scan_cancel_timeout(priv, 200);
2537 2755
2538 conf = ieee80211_get_hw_conf(priv->hw); 2756 conf = ieee80211_get_hw_conf(priv->hw);
@@ -2540,7 +2758,7 @@ void iwl_post_associate(struct iwl_priv *priv)
2540 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2758 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2541 iwlcore_commit_rxon(priv); 2759 iwlcore_commit_rxon(priv);
2542 2760
2543 iwl_setup_rxon_timing(priv); 2761 iwl_setup_rxon_timing(priv, vif);
2544 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 2762 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2545 sizeof(priv->rxon_timing), &priv->rxon_timing); 2763 sizeof(priv->rxon_timing), &priv->rxon_timing);
2546 if (ret) 2764 if (ret)
@@ -2554,56 +2772,44 @@ void iwl_post_associate(struct iwl_priv *priv)
2554 if (priv->cfg->ops->hcmd->set_rxon_chain) 2772 if (priv->cfg->ops->hcmd->set_rxon_chain)
2555 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2773 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2556 2774
2557 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 2775 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2558 2776
2559 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 2777 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2560 priv->assoc_id, priv->beacon_int); 2778 vif->bss_conf.aid, vif->bss_conf.beacon_int);
2561 2779
2562 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 2780 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
2563 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 2781 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2564 else 2782 else
2565 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 2783 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2566 2784
2567 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 2785 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
2568 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 2786 if (vif->bss_conf.assoc_capability &
2787 WLAN_CAPABILITY_SHORT_SLOT_TIME)
2569 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 2788 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
2570 else 2789 else
2571 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2790 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2572 2791
2573 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) 2792 if (vif->type == NL80211_IFTYPE_ADHOC)
2574 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 2793 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2575
2576 } 2794 }
2577 2795
2578 iwlcore_commit_rxon(priv); 2796 iwlcore_commit_rxon(priv);
2579 2797
2580 switch (priv->iw_mode) { 2798 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2799 vif->bss_conf.aid, priv->active_rxon.bssid_addr);
2800
2801 switch (vif->type) {
2581 case NL80211_IFTYPE_STATION: 2802 case NL80211_IFTYPE_STATION:
2582 break; 2803 break;
2583
2584 case NL80211_IFTYPE_ADHOC: 2804 case NL80211_IFTYPE_ADHOC:
2585
2586 /* assume default assoc id */
2587 priv->assoc_id = 1;
2588
2589 iwl_rxon_add_station(priv, priv->bssid, 0);
2590 iwl_send_beacon_cmd(priv); 2805 iwl_send_beacon_cmd(priv);
2591
2592 break; 2806 break;
2593
2594 default: 2807 default:
2595 IWL_ERR(priv, "%s Should not be called in %d mode\n", 2808 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2596 __func__, priv->iw_mode); 2809 __func__, vif->type);
2597 break; 2810 break;
2598 } 2811 }
2599 2812
2600 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2601 priv->assoc_station_added = 1;
2602
2603 spin_lock_irqsave(&priv->lock, flags);
2604 iwl_activate_qos(priv, 0);
2605 spin_unlock_irqrestore(&priv->lock, flags);
2606
2607 /* the chain noise calibration will enabled PM upon completion 2813 /* the chain noise calibration will enabled PM upon completion
2608 * If chain noise has already been run, then we need to enable 2814 * If chain noise has already been run, then we need to enable
2609 * power management here */ 2815 * power management here */
@@ -2628,7 +2834,8 @@ void iwl_post_associate(struct iwl_priv *priv)
2628 * Not a mac80211 entry point function, but it fits in with all the 2834 * Not a mac80211 entry point function, but it fits in with all the
2629 * other mac80211 functions grouped here. 2835 * other mac80211 functions grouped here.
2630 */ 2836 */
2631static int iwl_mac_setup_register(struct iwl_priv *priv) 2837static int iwl_mac_setup_register(struct iwl_priv *priv,
2838 struct iwlagn_ucode_capabilities *capa)
2632{ 2839{
2633 int ret; 2840 int ret;
2634 struct ieee80211_hw *hw = priv->hw; 2841 struct ieee80211_hw *hw = priv->hw;
@@ -2636,7 +2843,6 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
2636 2843
2637 /* Tell mac80211 our characteristics */ 2844 /* Tell mac80211 our characteristics */
2638 hw->flags = IEEE80211_HW_SIGNAL_DBM | 2845 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2639 IEEE80211_HW_NOISE_DBM |
2640 IEEE80211_HW_AMPDU_AGGREGATION | 2846 IEEE80211_HW_AMPDU_AGGREGATION |
2641 IEEE80211_HW_SPECTRUM_MGMT; 2847 IEEE80211_HW_SPECTRUM_MGMT;
2642 2848
@@ -2649,6 +2855,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
2649 IEEE80211_HW_SUPPORTS_STATIC_SMPS; 2855 IEEE80211_HW_SUPPORTS_STATIC_SMPS;
2650 2856
2651 hw->sta_data_size = sizeof(struct iwl_station_priv); 2857 hw->sta_data_size = sizeof(struct iwl_station_priv);
2858 hw->vif_data_size = sizeof(struct iwl_vif_priv);
2859
2652 hw->wiphy->interface_modes = 2860 hw->wiphy->interface_modes =
2653 BIT(NL80211_IFTYPE_STATION) | 2861 BIT(NL80211_IFTYPE_STATION) |
2654 BIT(NL80211_IFTYPE_ADHOC); 2862 BIT(NL80211_IFTYPE_ADHOC);
@@ -2664,7 +2872,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv)
2664 2872
2665 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 2873 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2666 /* we create the 802.11 header and a zero-length SSID element */ 2874 /* we create the 802.11 header and a zero-length SSID element */
2667 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 2875 hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
2668 2876
2669 /* Default value; 4 EDCA QOS priorities */ 2877 /* Default value; 4 EDCA QOS priorities */
2670 hw->queues = 4; 2878 hw->queues = 4;
@@ -2770,17 +2978,16 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2770 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2978 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2771 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2979 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2772 2980
2773 if (iwl_tx_skb(priv, skb)) 2981 if (iwlagn_tx_skb(priv, skb))
2774 dev_kfree_skb_any(skb); 2982 dev_kfree_skb_any(skb);
2775 2983
2776 IWL_DEBUG_MACDUMP(priv, "leave\n"); 2984 IWL_DEBUG_MACDUMP(priv, "leave\n");
2777 return NETDEV_TX_OK; 2985 return NETDEV_TX_OK;
2778} 2986}
2779 2987
2780void iwl_config_ap(struct iwl_priv *priv) 2988void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
2781{ 2989{
2782 int ret = 0; 2990 int ret = 0;
2783 unsigned long flags;
2784 2991
2785 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2992 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2786 return; 2993 return;
@@ -2793,7 +3000,7 @@ void iwl_config_ap(struct iwl_priv *priv)
2793 iwlcore_commit_rxon(priv); 3000 iwlcore_commit_rxon(priv);
2794 3001
2795 /* RXON Timing */ 3002 /* RXON Timing */
2796 iwl_setup_rxon_timing(priv); 3003 iwl_setup_rxon_timing(priv, vif);
2797 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3004 ret = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
2798 sizeof(priv->rxon_timing), &priv->rxon_timing); 3005 sizeof(priv->rxon_timing), &priv->rxon_timing);
2799 if (ret) 3006 if (ret)
@@ -2807,9 +3014,10 @@ void iwl_config_ap(struct iwl_priv *priv)
2807 if (priv->cfg->ops->hcmd->set_rxon_chain) 3014 if (priv->cfg->ops->hcmd->set_rxon_chain)
2808 priv->cfg->ops->hcmd->set_rxon_chain(priv); 3015 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2809 3016
2810 /* FIXME: what should be the assoc_id for AP? */ 3017 priv->staging_rxon.assoc_id = 0;
2811 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3018
2812 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3019 if (vif->bss_conf.assoc_capability &
3020 WLAN_CAPABILITY_SHORT_PREAMBLE)
2813 priv->staging_rxon.flags |= 3021 priv->staging_rxon.flags |=
2814 RXON_FLG_SHORT_PREAMBLE_MSK; 3022 RXON_FLG_SHORT_PREAMBLE_MSK;
2815 else 3023 else
@@ -2817,26 +3025,21 @@ void iwl_config_ap(struct iwl_priv *priv)
2817 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3025 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2818 3026
2819 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3027 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
2820 if (priv->assoc_capability & 3028 if (vif->bss_conf.assoc_capability &
2821 WLAN_CAPABILITY_SHORT_SLOT_TIME) 3029 WLAN_CAPABILITY_SHORT_SLOT_TIME)
2822 priv->staging_rxon.flags |= 3030 priv->staging_rxon.flags |=
2823 RXON_FLG_SHORT_SLOT_MSK; 3031 RXON_FLG_SHORT_SLOT_MSK;
2824 else 3032 else
2825 priv->staging_rxon.flags &= 3033 priv->staging_rxon.flags &=
2826 ~RXON_FLG_SHORT_SLOT_MSK; 3034 ~RXON_FLG_SHORT_SLOT_MSK;
2827 3035
2828 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) 3036 if (vif->type == NL80211_IFTYPE_ADHOC)
2829 priv->staging_rxon.flags &= 3037 priv->staging_rxon.flags &=
2830 ~RXON_FLG_SHORT_SLOT_MSK; 3038 ~RXON_FLG_SHORT_SLOT_MSK;
2831 } 3039 }
2832 /* restore RXON assoc */ 3040 /* restore RXON assoc */
2833 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3041 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2834 iwlcore_commit_rxon(priv); 3042 iwlcore_commit_rxon(priv);
2835 iwl_reset_qos(priv);
2836 spin_lock_irqsave(&priv->lock, flags);
2837 iwl_activate_qos(priv, 1);
2838 spin_unlock_irqrestore(&priv->lock, flags);
2839 iwl_add_bcast_station(priv);
2840 } 3043 }
2841 iwl_send_beacon_cmd(priv); 3044 iwl_send_beacon_cmd(priv);
2842 3045
@@ -2855,8 +3058,7 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
2855 struct iwl_priv *priv = hw->priv; 3058 struct iwl_priv *priv = hw->priv;
2856 IWL_DEBUG_MAC80211(priv, "enter\n"); 3059 IWL_DEBUG_MAC80211(priv, "enter\n");
2857 3060
2858 iwl_update_tkip_key(priv, keyconf, 3061 iwl_update_tkip_key(priv, keyconf, sta,
2859 sta ? sta->addr : iwl_bcast_addr,
2860 iv32, phase1key); 3062 iv32, phase1key);
2861 3063
2862 IWL_DEBUG_MAC80211(priv, "leave\n"); 3064 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2868,7 +3070,6 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2868 struct ieee80211_key_conf *key) 3070 struct ieee80211_key_conf *key)
2869{ 3071{
2870 struct iwl_priv *priv = hw->priv; 3072 struct iwl_priv *priv = hw->priv;
2871 const u8 *addr;
2872 int ret; 3073 int ret;
2873 u8 sta_id; 3074 u8 sta_id;
2874 bool is_default_wep_key = false; 3075 bool is_default_wep_key = false;
@@ -2879,25 +3080,29 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2879 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n"); 3080 IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
2880 return -EOPNOTSUPP; 3081 return -EOPNOTSUPP;
2881 } 3082 }
2882 addr = sta ? sta->addr : iwl_bcast_addr;
2883 sta_id = iwl_find_station(priv, addr);
2884 if (sta_id == IWL_INVALID_STATION) {
2885 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
2886 addr);
2887 return -EINVAL;
2888 3083
3084 if (sta) {
3085 sta_id = iwl_sta_id(sta);
3086
3087 if (sta_id == IWL_INVALID_STATION) {
3088 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n",
3089 sta->addr);
3090 return -EINVAL;
3091 }
3092 } else {
3093 sta_id = priv->hw_params.bcast_sta_id;
2889 } 3094 }
2890 3095
2891 mutex_lock(&priv->mutex); 3096 mutex_lock(&priv->mutex);
2892 iwl_scan_cancel_timeout(priv, 100); 3097 iwl_scan_cancel_timeout(priv, 100);
2893 mutex_unlock(&priv->mutex);
2894 3098
2895 /* If we are getting WEP group key and we didn't receive any key mapping 3099 /*
3100 * If we are getting WEP group key and we didn't receive any key mapping
2896 * so far, we are in legacy wep mode (group key only), otherwise we are 3101 * so far, we are in legacy wep mode (group key only), otherwise we are
2897 * in 1X mode. 3102 * in 1X mode.
2898 * In legacy wep mode, we use another host command to the uCode */ 3103 * In legacy wep mode, we use another host command to the uCode.
2899 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && 3104 */
2900 priv->iw_mode != NL80211_IFTYPE_AP) { 3105 if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
2901 if (cmd == SET_KEY) 3106 if (cmd == SET_KEY)
2902 is_default_wep_key = !priv->key_mapping_key; 3107 is_default_wep_key = !priv->key_mapping_key;
2903 else 3108 else
@@ -2926,6 +3131,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2926 ret = -EINVAL; 3131 ret = -EINVAL;
2927 } 3132 }
2928 3133
3134 mutex_unlock(&priv->mutex);
2929 IWL_DEBUG_MAC80211(priv, "leave\n"); 3135 IWL_DEBUG_MAC80211(priv, "leave\n");
2930 3136
2931 return ret; 3137 return ret;
@@ -2933,8 +3139,8 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2933 3139
2934static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3140static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2935 struct ieee80211_vif *vif, 3141 struct ieee80211_vif *vif,
2936 enum ieee80211_ampdu_mlme_action action, 3142 enum ieee80211_ampdu_mlme_action action,
2937 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3143 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2938{ 3144{
2939 struct iwl_priv *priv = hw->priv; 3145 struct iwl_priv *priv = hw->priv;
2940 int ret; 3146 int ret;
@@ -2948,20 +3154,31 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2948 switch (action) { 3154 switch (action) {
2949 case IEEE80211_AMPDU_RX_START: 3155 case IEEE80211_AMPDU_RX_START:
2950 IWL_DEBUG_HT(priv, "start Rx\n"); 3156 IWL_DEBUG_HT(priv, "start Rx\n");
2951 return iwl_sta_rx_agg_start(priv, sta->addr, tid, *ssn); 3157 return iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
2952 case IEEE80211_AMPDU_RX_STOP: 3158 case IEEE80211_AMPDU_RX_STOP:
2953 IWL_DEBUG_HT(priv, "stop Rx\n"); 3159 IWL_DEBUG_HT(priv, "stop Rx\n");
2954 ret = iwl_sta_rx_agg_stop(priv, sta->addr, tid); 3160 ret = iwl_sta_rx_agg_stop(priv, sta, tid);
2955 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3161 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2956 return 0; 3162 return 0;
2957 else 3163 else
2958 return ret; 3164 return ret;
2959 case IEEE80211_AMPDU_TX_START: 3165 case IEEE80211_AMPDU_TX_START:
2960 IWL_DEBUG_HT(priv, "start Tx\n"); 3166 IWL_DEBUG_HT(priv, "start Tx\n");
2961 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 3167 ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
3168 if (ret == 0) {
3169 priv->_agn.agg_tids_count++;
3170 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
3171 priv->_agn.agg_tids_count);
3172 }
3173 return ret;
2962 case IEEE80211_AMPDU_TX_STOP: 3174 case IEEE80211_AMPDU_TX_STOP:
2963 IWL_DEBUG_HT(priv, "stop Tx\n"); 3175 IWL_DEBUG_HT(priv, "stop Tx\n");
2964 ret = iwl_tx_agg_stop(priv, sta->addr, tid); 3176 ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
3177 if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
3178 priv->_agn.agg_tids_count--;
3179 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
3180 priv->_agn.agg_tids_count);
3181 }
2965 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3182 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2966 return 0; 3183 return 0;
2967 else 3184 else
@@ -2977,18 +3194,6 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2977 return 0; 3194 return 0;
2978} 3195}
2979 3196
2980static int iwl_mac_get_stats(struct ieee80211_hw *hw,
2981 struct ieee80211_low_level_stats *stats)
2982{
2983 struct iwl_priv *priv = hw->priv;
2984
2985 priv = hw->priv;
2986 IWL_DEBUG_MAC80211(priv, "enter\n");
2987 IWL_DEBUG_MAC80211(priv, "leave\n");
2988
2989 return 0;
2990}
2991
2992static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3197static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
2993 struct ieee80211_vif *vif, 3198 struct ieee80211_vif *vif,
2994 enum sta_notify_cmd cmd, 3199 enum sta_notify_cmd cmd,
@@ -2998,18 +3203,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
2998 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3203 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2999 int sta_id; 3204 int sta_id;
3000 3205
3001 /*
3002 * TODO: We really should use this callback to
3003 * actually maintain the station table in
3004 * the device.
3005 */
3006
3007 switch (cmd) { 3206 switch (cmd) {
3008 case STA_NOTIFY_ADD:
3009 atomic_set(&sta_priv->pending_frames, 0);
3010 if (vif->type == NL80211_IFTYPE_AP)
3011 sta_priv->client = true;
3012 break;
3013 case STA_NOTIFY_SLEEP: 3207 case STA_NOTIFY_SLEEP:
3014 WARN_ON(!sta_priv->client); 3208 WARN_ON(!sta_priv->client);
3015 sta_priv->asleep = true; 3209 sta_priv->asleep = true;
@@ -3021,7 +3215,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
3021 if (!sta_priv->asleep) 3215 if (!sta_priv->asleep)
3022 break; 3216 break;
3023 sta_priv->asleep = false; 3217 sta_priv->asleep = false;
3024 sta_id = iwl_find_station(priv, sta->addr); 3218 sta_id = iwl_sta_id(sta);
3025 if (sta_id != IWL_INVALID_STATION) 3219 if (sta_id != IWL_INVALID_STATION)
3026 iwl_sta_modify_ps_wake(priv, sta_id); 3220 iwl_sta_modify_ps_wake(priv, sta_id);
3027 break; 3221 break;
@@ -3030,6 +3224,44 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
3030 } 3224 }
3031} 3225}
3032 3226
3227static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3228 struct ieee80211_vif *vif,
3229 struct ieee80211_sta *sta)
3230{
3231 struct iwl_priv *priv = hw->priv;
3232 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3233 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3234 int ret;
3235 u8 sta_id;
3236
3237 sta_priv->common.sta_id = IWL_INVALID_STATION;
3238
3239 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3240 sta->addr);
3241
3242 atomic_set(&sta_priv->pending_frames, 0);
3243 if (vif->type == NL80211_IFTYPE_AP)
3244 sta_priv->client = true;
3245
3246 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
3247 &sta_id);
3248 if (ret) {
3249 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3250 sta->addr, ret);
3251 /* Should we return success if return code is EEXIST ? */
3252 return ret;
3253 }
3254
3255 sta_priv->common.sta_id = sta_id;
3256
3257 /* Initialize rate scaling */
3258 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3259 sta->addr);
3260 iwl_rs_rate_init(priv, sta, sta_id);
3261
3262 return 0;
3263}
3264
3033/***************************************************************************** 3265/*****************************************************************************
3034 * 3266 *
3035 * sysfs attributes 3267 * sysfs attributes
@@ -3130,125 +3362,6 @@ static ssize_t store_tx_power(struct device *d,
3130 3362
3131static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3363static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
3132 3364
3133static ssize_t show_flags(struct device *d,
3134 struct device_attribute *attr, char *buf)
3135{
3136 struct iwl_priv *priv = dev_get_drvdata(d);
3137
3138 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
3139}
3140
3141static ssize_t store_flags(struct device *d,
3142 struct device_attribute *attr,
3143 const char *buf, size_t count)
3144{
3145 struct iwl_priv *priv = dev_get_drvdata(d);
3146 unsigned long val;
3147 u32 flags;
3148 int ret = strict_strtoul(buf, 0, &val);
3149 if (ret)
3150 return ret;
3151 flags = (u32)val;
3152
3153 mutex_lock(&priv->mutex);
3154 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
3155 /* Cancel any currently running scans... */
3156 if (iwl_scan_cancel_timeout(priv, 100))
3157 IWL_WARN(priv, "Could not cancel scan.\n");
3158 else {
3159 IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
3160 priv->staging_rxon.flags = cpu_to_le32(flags);
3161 iwlcore_commit_rxon(priv);
3162 }
3163 }
3164 mutex_unlock(&priv->mutex);
3165
3166 return count;
3167}
3168
3169static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
3170
3171static ssize_t show_filter_flags(struct device *d,
3172 struct device_attribute *attr, char *buf)
3173{
3174 struct iwl_priv *priv = dev_get_drvdata(d);
3175
3176 return sprintf(buf, "0x%04X\n",
3177 le32_to_cpu(priv->active_rxon.filter_flags));
3178}
3179
3180static ssize_t store_filter_flags(struct device *d,
3181 struct device_attribute *attr,
3182 const char *buf, size_t count)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 unsigned long val;
3186 u32 filter_flags;
3187 int ret = strict_strtoul(buf, 0, &val);
3188 if (ret)
3189 return ret;
3190 filter_flags = (u32)val;
3191
3192 mutex_lock(&priv->mutex);
3193 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
3194 /* Cancel any currently running scans... */
3195 if (iwl_scan_cancel_timeout(priv, 100))
3196 IWL_WARN(priv, "Could not cancel scan.\n");
3197 else {
3198 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3199 "0x%04X\n", filter_flags);
3200 priv->staging_rxon.filter_flags =
3201 cpu_to_le32(filter_flags);
3202 iwlcore_commit_rxon(priv);
3203 }
3204 }
3205 mutex_unlock(&priv->mutex);
3206
3207 return count;
3208}
3209
3210static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3211 store_filter_flags);
3212
3213
3214static ssize_t show_statistics(struct device *d,
3215 struct device_attribute *attr, char *buf)
3216{
3217 struct iwl_priv *priv = dev_get_drvdata(d);
3218 u32 size = sizeof(struct iwl_notif_statistics);
3219 u32 len = 0, ofs = 0;
3220 u8 *data = (u8 *)&priv->statistics;
3221 int rc = 0;
3222
3223 if (!iwl_is_alive(priv))
3224 return -EAGAIN;
3225
3226 mutex_lock(&priv->mutex);
3227 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3228 mutex_unlock(&priv->mutex);
3229
3230 if (rc) {
3231 len = sprintf(buf,
3232 "Error sending statistics request: 0x%08X\n", rc);
3233 return len;
3234 }
3235
3236 while (size && (PAGE_SIZE - len)) {
3237 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3238 PAGE_SIZE - len, 1);
3239 len = strlen(buf);
3240 if (PAGE_SIZE - len)
3241 buf[len++] = '\n';
3242
3243 ofs += 16;
3244 size -= min(size, 16U);
3245 }
3246
3247 return len;
3248}
3249
3250static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
3251
3252static ssize_t show_rts_ht_protection(struct device *d, 3365static ssize_t show_rts_ht_protection(struct device *d,
3253 struct device_attribute *attr, char *buf) 3366 struct device_attribute *attr, char *buf)
3254{ 3367{
@@ -3316,6 +3429,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3316 priv->ucode_trace.data = (unsigned long)priv; 3429 priv->ucode_trace.data = (unsigned long)priv;
3317 priv->ucode_trace.function = iwl_bg_ucode_trace; 3430 priv->ucode_trace.function = iwl_bg_ucode_trace;
3318 3431
3432 if (priv->cfg->ops->lib->recover_from_tx_stall) {
3433 init_timer(&priv->monitor_recover);
3434 priv->monitor_recover.data = (unsigned long)priv;
3435 priv->monitor_recover.function =
3436 priv->cfg->ops->lib->recover_from_tx_stall;
3437 }
3438
3319 if (!priv->cfg->use_isr_legacy) 3439 if (!priv->cfg->use_isr_legacy)
3320 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3440 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3321 iwl_irq_tasklet, (unsigned long)priv); 3441 iwl_irq_tasklet, (unsigned long)priv);
@@ -3331,10 +3451,13 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3331 3451
3332 cancel_delayed_work_sync(&priv->init_alive_start); 3452 cancel_delayed_work_sync(&priv->init_alive_start);
3333 cancel_delayed_work(&priv->scan_check); 3453 cancel_delayed_work(&priv->scan_check);
3454 cancel_work_sync(&priv->start_internal_scan);
3334 cancel_delayed_work(&priv->alive_start); 3455 cancel_delayed_work(&priv->alive_start);
3335 cancel_work_sync(&priv->beacon_update); 3456 cancel_work_sync(&priv->beacon_update);
3336 del_timer_sync(&priv->statistics_periodic); 3457 del_timer_sync(&priv->statistics_periodic);
3337 del_timer_sync(&priv->ucode_trace); 3458 del_timer_sync(&priv->ucode_trace);
3459 if (priv->cfg->ops->lib->recover_from_tx_stall)
3460 del_timer_sync(&priv->monitor_recover);
3338} 3461}
3339 3462
3340static void iwl_init_hw_rates(struct iwl_priv *priv, 3463static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3372,9 +3495,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
3372 mutex_init(&priv->mutex); 3495 mutex_init(&priv->mutex);
3373 mutex_init(&priv->sync_cmd_mutex); 3496 mutex_init(&priv->sync_cmd_mutex);
3374 3497
3375 /* Clear the driver's (not device's) station table */
3376 iwl_clear_stations_table(priv);
3377
3378 priv->ieee_channels = NULL; 3498 priv->ieee_channels = NULL;
3379 priv->ieee_rates = NULL; 3499 priv->ieee_rates = NULL;
3380 priv->band = IEEE80211_BAND_2GHZ; 3500 priv->band = IEEE80211_BAND_2GHZ;
@@ -3382,6 +3502,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3382 priv->iw_mode = NL80211_IFTYPE_STATION; 3502 priv->iw_mode = NL80211_IFTYPE_STATION;
3383 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 3503 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3384 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3504 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3505 priv->_agn.agg_tids_count = 0;
3385 3506
3386 /* initialize force reset */ 3507 /* initialize force reset */
3387 priv->force_reset[IWL_RF_RESET].reset_duration = 3508 priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3395,16 +3516,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
3395 3516
3396 iwl_init_scan_params(priv); 3517 iwl_init_scan_params(priv);
3397 3518
3398 iwl_reset_qos(priv);
3399
3400 priv->qos_data.qos_active = 0;
3401 priv->qos_data.qos_cap.val = 0;
3402
3403 priv->rates_mask = IWL_RATES_MASK;
3404 /* Set the tx_power_user_lmt to the lowest power level 3519 /* Set the tx_power_user_lmt to the lowest power level
3405 * this value will get overwritten by channel max power avg 3520 * this value will get overwritten by channel max power avg
3406 * from eeprom */ 3521 * from eeprom */
3407 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN; 3522 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3408 3523
3409 ret = iwl_init_channel_map(priv); 3524 ret = iwl_init_channel_map(priv);
3410 if (ret) { 3525 if (ret) {
@@ -3432,13 +3547,10 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
3432 iwl_calib_free_results(priv); 3547 iwl_calib_free_results(priv);
3433 iwlcore_free_geos(priv); 3548 iwlcore_free_geos(priv);
3434 iwl_free_channel_map(priv); 3549 iwl_free_channel_map(priv);
3435 kfree(priv->scan); 3550 kfree(priv->scan_cmd);
3436} 3551}
3437 3552
3438static struct attribute *iwl_sysfs_entries[] = { 3553static struct attribute *iwl_sysfs_entries[] = {
3439 &dev_attr_flags.attr,
3440 &dev_attr_filter_flags.attr,
3441 &dev_attr_statistics.attr,
3442 &dev_attr_temperature.attr, 3554 &dev_attr_temperature.attr,
3443 &dev_attr_tx_power.attr, 3555 &dev_attr_tx_power.attr,
3444 &dev_attr_rts_ht_protection.attr, 3556 &dev_attr_rts_ht_protection.attr,
@@ -3463,13 +3575,14 @@ static struct ieee80211_ops iwl_hw_ops = {
3463 .configure_filter = iwl_configure_filter, 3575 .configure_filter = iwl_configure_filter,
3464 .set_key = iwl_mac_set_key, 3576 .set_key = iwl_mac_set_key,
3465 .update_tkip_key = iwl_mac_update_tkip_key, 3577 .update_tkip_key = iwl_mac_update_tkip_key,
3466 .get_stats = iwl_mac_get_stats,
3467 .conf_tx = iwl_mac_conf_tx, 3578 .conf_tx = iwl_mac_conf_tx,
3468 .reset_tsf = iwl_mac_reset_tsf, 3579 .reset_tsf = iwl_mac_reset_tsf,
3469 .bss_info_changed = iwl_bss_info_changed, 3580 .bss_info_changed = iwl_bss_info_changed,
3470 .ampdu_action = iwl_mac_ampdu_action, 3581 .ampdu_action = iwl_mac_ampdu_action,
3471 .hw_scan = iwl_mac_hw_scan, 3582 .hw_scan = iwl_mac_hw_scan,
3472 .sta_notify = iwl_mac_sta_notify, 3583 .sta_notify = iwl_mac_sta_notify,
3584 .sta_add = iwlagn_mac_sta_add,
3585 .sta_remove = iwl_mac_sta_remove,
3473}; 3586};
3474 3587
3475static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3588static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3573,7 +3686,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3573 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3686 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3574 3687
3575 iwl_hw_detect(priv); 3688 iwl_hw_detect(priv);
3576 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", 3689 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3577 priv->cfg->name, priv->hw_rev); 3690 priv->cfg->name, priv->hw_rev);
3578 3691
3579 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3692 /* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3671,6 +3784,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3671 iwl_power_initialize(priv); 3784 iwl_power_initialize(priv);
3672 iwl_tt_initialize(priv); 3785 iwl_tt_initialize(priv);
3673 3786
3787 init_completion(&priv->_agn.firmware_loading_complete);
3788
3674 err = iwl_request_firmware(priv, true); 3789 err = iwl_request_firmware(priv, true);
3675 if (err) 3790 if (err)
3676 goto out_remove_sysfs; 3791 goto out_remove_sysfs;
@@ -3711,6 +3826,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3711 if (!priv) 3826 if (!priv)
3712 return; 3827 return;
3713 3828
3829 wait_for_completion(&priv->_agn.firmware_loading_complete);
3830
3714 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n"); 3831 IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
3715 3832
3716 iwl_dbgfs_unregister(priv); 3833 iwl_dbgfs_unregister(priv);
@@ -3751,10 +3868,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3751 iwl_dealloc_ucode_pci(priv); 3868 iwl_dealloc_ucode_pci(priv);
3752 3869
3753 if (priv->rxq.bd) 3870 if (priv->rxq.bd)
3754 iwl_rx_queue_free(priv, &priv->rxq); 3871 iwlagn_rx_queue_free(priv, &priv->rxq);
3755 iwl_hw_txq_ctx_free(priv); 3872 iwlagn_hw_txq_ctx_free(priv);
3756 3873
3757 iwl_clear_stations_table(priv);
3758 iwl_eeprom_free(priv); 3874 iwl_eeprom_free(priv);
3759 3875
3760 3876
@@ -3869,6 +3985,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3869 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 3985 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
3870 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 3986 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
3871 3987
3988/* 6x00 Series Gen2a */
3989 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)},
3990 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)},
3991 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)},
3992
3872/* 6x50 WiFi/WiMax Series */ 3993/* 6x50 WiFi/WiMax Series */
3873 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 3994 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
3874 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)}, 3995 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
@@ -3950,3 +4071,38 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3950MODULE_PARM_DESC(debug, "debug output mask"); 4071MODULE_PARM_DESC(debug, "debug output mask");
3951#endif 4072#endif
3952 4073
4074module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
4075MODULE_PARM_DESC(swcrypto50,
4076 "using crypto in software (default 0 [hardware]) (deprecated)");
4077module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
4078MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
4079module_param_named(queues_num50,
4080 iwlagn_mod_params.num_of_queues, int, S_IRUGO);
4081MODULE_PARM_DESC(queues_num50,
4082 "number of hw queues in 50xx series (deprecated)");
4083module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
4084MODULE_PARM_DESC(queues_num, "number of hw queues.");
4085module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
4086MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
4087module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
4088MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
4089module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
4090 int, S_IRUGO);
4091MODULE_PARM_DESC(amsdu_size_8K50,
4092 "enable 8K amsdu size in 50XX series (deprecated)");
4093module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
4094 int, S_IRUGO);
4095MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
4096module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
4097MODULE_PARM_DESC(fw_restart50,
4098 "restart firmware in case of error (deprecated)");
4099module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
4100MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
4101module_param_named(
4102 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
4103MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4104
4105module_param_named(ucode_alternative, iwlagn_wanted_ucode_alternative, int,
4106 S_IRUGO);
4107MODULE_PARM_DESC(ucode_alternative,
4108 "specify ucode alternative to use from ucode file");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 000000000000..2d748053358e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,181 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__
65
66#include "iwl-dev.h"
67
68extern struct iwl_mod_params iwlagn_mod_params;
69extern struct iwl_hcmd_ops iwlagn_hcmd;
70extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
71
72int iwl_reset_ict(struct iwl_priv *priv);
73void iwl_disable_ict(struct iwl_priv *priv);
74int iwl_alloc_isr_ict(struct iwl_priv *priv);
75void iwl_free_isr_ict(struct iwl_priv *priv);
76irqreturn_t iwl_isr_ict(int irq, void *data);
77bool iwl_good_ack_health(struct iwl_priv *priv,
78 struct iwl_rx_packet *pkt);
79
80/* tx queue */
81void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
82 int txq_id, u32 index);
83void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
84 struct iwl_tx_queue *txq,
85 int tx_fifo_id, int scd_retry);
86void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
87 struct iwl_tx_queue *txq,
88 u16 byte_cnt);
89void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
90 struct iwl_tx_queue *txq);
91int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
92 int tx_fifo, int sta_id, int tid, u16 ssn_idx);
93int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
94 u16 ssn_idx, u8 tx_fifo);
95void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
96
97/* uCode */
98int iwlagn_load_ucode(struct iwl_priv *priv);
99void iwlagn_rx_calib_result(struct iwl_priv *priv,
100 struct iwl_rx_mem_buffer *rxb);
101void iwlagn_rx_calib_complete(struct iwl_priv *priv,
102 struct iwl_rx_mem_buffer *rxb);
103void iwlagn_init_alive_start(struct iwl_priv *priv);
104int iwlagn_alive_notify(struct iwl_priv *priv);
105
106/* lib */
107void iwl_check_abort_status(struct iwl_priv *priv,
108 u8 frame_count, u32 status);
109void iwlagn_rx_handler_setup(struct iwl_priv *priv);
110void iwlagn_setup_deferred_work(struct iwl_priv *priv);
111int iwlagn_hw_valid_rtc_data_addr(u32 addr);
112int iwlagn_send_tx_power(struct iwl_priv *priv);
113void iwlagn_temperature(struct iwl_priv *priv);
114u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
115const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
116 size_t offset);
117void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
118int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
119int iwlagn_hw_nic_init(struct iwl_priv *priv);
120
121/* rx */
122void iwlagn_rx_queue_restock(struct iwl_priv *priv);
123void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
124void iwlagn_rx_replenish(struct iwl_priv *priv);
125void iwlagn_rx_replenish_now(struct iwl_priv *priv);
126void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
127int iwlagn_rxq_stop(struct iwl_priv *priv);
128int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
129void iwlagn_rx_reply_rx(struct iwl_priv *priv,
130 struct iwl_rx_mem_buffer *rxb);
131void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
132 struct iwl_rx_mem_buffer *rxb);
133
134/* tx */
135void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
136 struct ieee80211_tx_info *info);
137int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
138int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
139 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
140int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
141 struct ieee80211_sta *sta, u16 tid);
142int iwlagn_txq_check_empty(struct iwl_priv *priv,
143 int sta_id, u8 tid, int txq_id);
144void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
145 struct iwl_rx_mem_buffer *rxb);
146int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
147void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
148int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
149void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
150void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
151
152static inline u32 iwl_tx_status_to_mac80211(u32 status)
153{
154 status &= TX_STATUS_MSK;
155
156 switch (status) {
157 case TX_STATUS_SUCCESS:
158 case TX_STATUS_DIRECT_DONE:
159 return IEEE80211_TX_STAT_ACK;
160 case TX_STATUS_FAIL_DEST_PS:
161 return IEEE80211_TX_STAT_TX_FILTERED;
162 default:
163 return 0;
164 }
165}
166
167static inline bool iwl_is_tx_success(u32 status)
168{
169 status &= TX_STATUS_MSK;
170 return (status == TX_STATUS_SUCCESS) ||
171 (status == TX_STATUS_DIRECT_DONE);
172}
173
174/* scan */
175void iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
176
177/* station mgmt */
178int iwlagn_manage_ibss_station(struct iwl_priv *priv,
179 struct ieee80211_vif *vif, bool add);
180
181#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 8b516c5ff0bb..7e8227773213 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
593 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 593 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
594 594
595 if (!rx_enable_time) { 595 if (!rx_enable_time) {
596 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n"); 596 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
597 return; 597 return;
598 } 598 }
599 599
@@ -638,8 +638,6 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
638 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time); 638 iwl_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
639 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis); 639 iwl_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
640 iwl_sensitivity_write(priv); 640 iwl_sensitivity_write(priv);
641
642 return;
643} 641}
644EXPORT_SYMBOL(iwl_sensitivity_calibration); 642EXPORT_SYMBOL(iwl_sensitivity_calibration);
645 643
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6383d9f8c9b3..9aab020c474b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -106,7 +106,7 @@ enum {
106 REPLY_TX = 0x1c, 106 REPLY_TX = 0x1c,
107 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 107 REPLY_RATE_SCALE = 0x47, /* 3945 only */
108 REPLY_LEDS_CMD = 0x48, 108 REPLY_LEDS_CMD = 0x48,
109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
110 110
111 /* WiMAX coexistence */ 111 /* WiMAX coexistence */
112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ 112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
512 * 512 *
513 * Entries without timestamps contain only event_id and data. 513 * Entries without timestamps contain only event_id and data.
514 * 514 *
515 *
515 * 2) error_event_table_ptr indicates base of the error log. This contains 516 * 2) error_event_table_ptr indicates base of the error log. This contains
516 * information about any uCode error that occurs. For 4965, the format 517 * information about any uCode error that occurs. For agn, the format
517 * of the error log is: 518 * of the error log is:
518 * 519 *
519 * __le32 valid; (nonzero) valid, (0) log is empty 520 * __le32 valid; (nonzero) valid, (0) log is empty
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
529 * __le32 bcon_time; beacon timer 530 * __le32 bcon_time; beacon timer
530 * __le32 tsf_low; network timestamp function timer 531 * __le32 tsf_low; network timestamp function timer
531 * __le32 tsf_hi; network timestamp function timer 532 * __le32 tsf_hi; network timestamp function timer
533 * __le32 gp1; GP1 timer register
534 * __le32 gp2; GP2 timer register
535 * __le32 gp3; GP3 timer register
536 * __le32 ucode_ver; uCode version
537 * __le32 hw_ver; HW Silicon version
538 * __le32 brd_ver; HW board version
539 * __le32 log_pc; log program counter
540 * __le32 frame_ptr; frame pointer
541 * __le32 stack_ptr; stack pointer
542 * __le32 hcmd; last host command
543 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
544 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
545 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
546 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
547 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
548 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
549 * __le32 wait_event; wait event() caller address
550 * __le32 l2p_control; L2pControlField
551 * __le32 l2p_duration; L2pDurationField
552 * __le32 l2p_mhvalid; L2pMhValidBits
553 * __le32 l2p_addr_match; L2pAddrMatchStat
554 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
555 * __le32 u_timestamp; indicate when the date and time of the compilation
556 * __le32 reserved;
532 * 557 *
533 * The Linux driver can print both logs to the system log when a uCode error 558 * The Linux driver can print both logs to the system log when a uCode error
534 * occurs. 559 * occurs.
@@ -1418,7 +1443,7 @@ struct iwl4965_rx_mpdu_res_start {
1418 1443
1419/* 1: Ignore Bluetooth priority for this frame. 1444/* 1: Ignore Bluetooth priority for this frame.
1420 * 0: Delay Tx until Bluetooth device is done (normal usage). */ 1445 * 0: Delay Tx until Bluetooth device is done (normal usage). */
1421#define TX_CMD_FLG_BT_DIS_MSK cpu_to_le32(1 << 12) 1446#define TX_CMD_FLG_IGNORE_BT cpu_to_le32(1 << 12)
1422 1447
1423/* 1: uCode overrides sequence control field in MAC header. 1448/* 1: uCode overrides sequence control field in MAC header.
1424 * 0: Driver provides sequence control field in MAC header. 1449 * 0: Driver provides sequence control field in MAC header.
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
1637 struct ieee80211_hdr hdr[0]; 1662 struct ieee80211_hdr hdr[0];
1638} __attribute__ ((packed)); 1663} __attribute__ ((packed));
1639 1664
1640/* TX command response is sent after *all* transmission attempts. 1665/* TX command response is sent after *3945* transmission attempts.
1641 * 1666 *
1642 * NOTES: 1667 * NOTES:
1643 * 1668 *
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd {
1665 * control line. Receiving is still allowed in this case. 1690 * control line. Receiving is still allowed in this case.
1666 */ 1691 */
1667enum { 1692enum {
1693 TX_3945_STATUS_SUCCESS = 0x01,
1694 TX_3945_STATUS_DIRECT_DONE = 0x02,
1695 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1696 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1697 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1698 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1699 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1700 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1701 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1702 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1703 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1704 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1705 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1706 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1707 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1708 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1709 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1710 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1711};
1712
1713/*
1714 * TX command response is sent after *agn* transmission attempts.
1715 *
1716 * both postpone and abort status are expected behavior from uCode. there is
1717 * no special operation required from driver; except for RFKILL_FLUSH,
1718 * which required tx flush host command to flush all the tx frames in queues
1719 */
1720enum {
1668 TX_STATUS_SUCCESS = 0x01, 1721 TX_STATUS_SUCCESS = 0x01,
1669 TX_STATUS_DIRECT_DONE = 0x02, 1722 TX_STATUS_DIRECT_DONE = 0x02,
1723 /* postpone TX */
1724 TX_STATUS_POSTPONE_DELAY = 0x40,
1725 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1726 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
1727 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1728 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1729 /* abort TX */
1730 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1670 TX_STATUS_FAIL_SHORT_LIMIT = 0x82, 1731 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1671 TX_STATUS_FAIL_LONG_LIMIT = 0x83, 1732 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1672 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, 1733 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1673 TX_STATUS_FAIL_MGMNT_ABORT = 0x85, 1734 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1674 TX_STATUS_FAIL_NEXT_FRAG = 0x86, 1735 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1675 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, 1736 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1676 TX_STATUS_FAIL_DEST_PS = 0x88, 1737 TX_STATUS_FAIL_DEST_PS = 0x88,
1677 TX_STATUS_FAIL_ABORTED = 0x89, 1738 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1678 TX_STATUS_FAIL_BT_RETRY = 0x8a, 1739 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1679 TX_STATUS_FAIL_STA_INVALID = 0x8b, 1740 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1680 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, 1741 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1681 TX_STATUS_FAIL_TID_DISABLE = 0x8d, 1742 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1682 TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, 1743 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1683 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, 1744 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1684 TX_STATUS_FAIL_TX_LOCKED = 0x90, 1745 /* uCode drop due to FW drop request */
1685 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, 1746 TX_STATUS_FAIL_FW_DROP = 0x90,
1747 /*
1748 * uCode drop due to station color mismatch
1749 * between tx command and station table
1750 */
1751 TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
1686}; 1752};
1687 1753
1688#define TX_PACKET_MODE_REGULAR 0x0000 1754#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1704,30 +1770,6 @@ enum {
1704 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1770 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1705}; 1771};
1706 1772
1707static inline u32 iwl_tx_status_to_mac80211(u32 status)
1708{
1709 status &= TX_STATUS_MSK;
1710
1711 switch (status) {
1712 case TX_STATUS_SUCCESS:
1713 case TX_STATUS_DIRECT_DONE:
1714 return IEEE80211_TX_STAT_ACK;
1715 case TX_STATUS_FAIL_DEST_PS:
1716 return IEEE80211_TX_STAT_TX_FILTERED;
1717 default:
1718 return 0;
1719 }
1720}
1721
1722static inline bool iwl_is_tx_success(u32 status)
1723{
1724 status &= TX_STATUS_MSK;
1725 return (status == TX_STATUS_SUCCESS) ||
1726 (status == TX_STATUS_DIRECT_DONE);
1727}
1728
1729
1730
1731/* ******************************* 1773/* *******************************
1732 * TX aggregation status 1774 * TX aggregation status
1733 ******************************* */ 1775 ******************************* */
@@ -2621,10 +2663,11 @@ struct iwl_ssid_ie {
2621#define PROBE_OPTION_MAX_3945 4 2663#define PROBE_OPTION_MAX_3945 4
2622#define PROBE_OPTION_MAX 20 2664#define PROBE_OPTION_MAX 20
2623#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF) 2665#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
2624#define IWL_GOOD_CRC_TH cpu_to_le16(1) 2666#define IWL_GOOD_CRC_TH_DISABLED 0
2667#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
2668#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
2625#define IWL_MAX_SCAN_SIZE 1024 2669#define IWL_MAX_SCAN_SIZE 1024
2626#define IWL_MAX_CMD_SIZE 4096 2670#define IWL_MAX_CMD_SIZE 4096
2627#define IWL_MAX_PROBE_REQUEST 200
2628 2671
2629/* 2672/*
2630 * REPLY_SCAN_CMD = 0x80 (command) 2673 * REPLY_SCAN_CMD = 0x80 (command)
@@ -3084,6 +3127,11 @@ struct statistics_tx {
3084 __le32 cts_timeout_collision; 3127 __le32 cts_timeout_collision;
3085 __le32 ack_or_ba_timeout_collision; 3128 __le32 ack_or_ba_timeout_collision;
3086 struct statistics_tx_non_phy_agg agg; 3129 struct statistics_tx_non_phy_agg agg;
3130 /*
3131 * "tx_power" are optional parameters provided by uCode,
3132 * 6000 series is the only device provide the information,
3133 * Those are reserved fields for all the other devices
3134 */
3087 struct statistics_tx_power tx_power; 3135 struct statistics_tx_power tx_power;
3088 __le32 reserved1; 3136 __le32 reserved1;
3089} __attribute__ ((packed)); 3137} __attribute__ ((packed));
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 3352f7086632..5a7eca8fb789 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -66,38 +66,7 @@ MODULE_LICENSE("GPL");
66 */ 66 */
67static bool bt_coex_active = true; 67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO); 68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n"); 69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70
71static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
72 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
73 0, COEX_UNASSOC_IDLE_FLAGS},
74 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
75 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
76 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
77 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
78 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
79 0, COEX_CALIBRATION_FLAGS},
80 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
81 0, COEX_PERIODIC_CALIBRATION_FLAGS},
82 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
83 0, COEX_CONNECTION_ESTAB_FLAGS},
84 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
85 0, COEX_ASSOCIATED_IDLE_FLAGS},
86 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
87 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
88 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
89 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
90 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
91 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
92 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
93 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
94 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
95 0, COEX_STAND_ALONE_DEBUG_FLAGS},
96 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
97 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
98 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
99 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
100};
101 70
102#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 71#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
103 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 72 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,8 +84,6 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
115u32 iwl_debug_level; 84u32 iwl_debug_level;
116EXPORT_SYMBOL(iwl_debug_level); 85EXPORT_SYMBOL(iwl_debug_level);
117 86
118static irqreturn_t iwl_isr(int irq, void *data);
119
120/* 87/*
121 * Parameter order: 88 * Parameter order:
122 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate 89 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -143,30 +110,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
143}; 110};
144EXPORT_SYMBOL(iwl_rates); 111EXPORT_SYMBOL(iwl_rates);
145 112
146/**
147 * translate ucode response to mac80211 tx status control values
148 */
149void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
150 struct ieee80211_tx_info *info)
151{
152 struct ieee80211_tx_rate *r = &info->control.rates[0];
153
154 info->antenna_sel_tx =
155 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
156 if (rate_n_flags & RATE_MCS_HT_MSK)
157 r->flags |= IEEE80211_TX_RC_MCS;
158 if (rate_n_flags & RATE_MCS_GF_MSK)
159 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
160 if (rate_n_flags & RATE_MCS_HT40_MSK)
161 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
162 if (rate_n_flags & RATE_MCS_DUP_MSK)
163 r->flags |= IEEE80211_TX_RC_DUP_DATA;
164 if (rate_n_flags & RATE_MCS_SGI_MSK)
165 r->flags |= IEEE80211_TX_RC_SHORT_GI;
166 r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
167}
168EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
169
170int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) 113int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
171{ 114{
172 int idx = 0; 115 int idx = 0;
@@ -198,27 +141,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
198} 141}
199EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); 142EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
200 143
201int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
202{
203 int idx = 0;
204 int band_offset = 0;
205
206 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
207 if (rate_n_flags & RATE_MCS_HT_MSK) {
208 idx = (rate_n_flags & 0xff);
209 return idx;
210 /* Legacy rate format, search for match in table */
211 } else {
212 if (band == IEEE80211_BAND_5GHZ)
213 band_offset = IWL_FIRST_OFDM_RATE;
214 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
215 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
216 return idx - band_offset;
217 }
218
219 return -1;
220}
221
222u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) 144u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
223{ 145{
224 int i; 146 int i;
@@ -268,74 +190,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
268} 190}
269EXPORT_SYMBOL(iwl_hw_detect); 191EXPORT_SYMBOL(iwl_hw_detect);
270 192
271int iwl_hw_nic_init(struct iwl_priv *priv)
272{
273 unsigned long flags;
274 struct iwl_rx_queue *rxq = &priv->rxq;
275 int ret;
276
277 /* nic_init */
278 spin_lock_irqsave(&priv->lock, flags);
279 priv->cfg->ops->lib->apm_ops.init(priv);
280
281 /* Set interrupt coalescing calibration timer to default (512 usecs) */
282 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
283
284 spin_unlock_irqrestore(&priv->lock, flags);
285
286 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
287
288 priv->cfg->ops->lib->apm_ops.config(priv);
289
290 /* Allocate the RX queue, or reset if it is already allocated */
291 if (!rxq->bd) {
292 ret = iwl_rx_queue_alloc(priv);
293 if (ret) {
294 IWL_ERR(priv, "Unable to initialize Rx queue\n");
295 return -ENOMEM;
296 }
297 } else
298 iwl_rx_queue_reset(priv, rxq);
299
300 iwl_rx_replenish(priv);
301
302 iwl_rx_init(priv, rxq);
303
304 spin_lock_irqsave(&priv->lock, flags);
305
306 rxq->need_update = 1;
307 iwl_rx_queue_update_write_ptr(priv, rxq);
308
309 spin_unlock_irqrestore(&priv->lock, flags);
310
311 /* Allocate or reset and init all Tx and Command queues */
312 if (!priv->txq) {
313 ret = iwl_txq_ctx_alloc(priv);
314 if (ret)
315 return ret;
316 } else
317 iwl_txq_ctx_reset(priv);
318
319 set_bit(STATUS_INIT, &priv->status);
320
321 return 0;
322}
323EXPORT_SYMBOL(iwl_hw_nic_init);
324
325/* 193/*
326 * QoS support 194 * QoS support
327*/ 195*/
328void iwl_activate_qos(struct iwl_priv *priv, u8 force) 196static void iwl_update_qos(struct iwl_priv *priv)
329{ 197{
330 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 198 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
331 return; 199 return;
332 200
333 priv->qos_data.def_qos_parm.qos_flags = 0; 201 priv->qos_data.def_qos_parm.qos_flags = 0;
334 202
335 if (priv->qos_data.qos_cap.q_AP.queue_request &&
336 !priv->qos_data.qos_cap.q_AP.txop_request)
337 priv->qos_data.def_qos_parm.qos_flags |=
338 QOS_PARAM_FLG_TXOP_TYPE_MSK;
339 if (priv->qos_data.qos_active) 203 if (priv->qos_data.qos_active)
340 priv->qos_data.def_qos_parm.qos_flags |= 204 priv->qos_data.def_qos_parm.qos_flags |=
341 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 205 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@@ -343,118 +207,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
343 if (priv->current_ht_config.is_ht) 207 if (priv->current_ht_config.is_ht)
344 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 208 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
345 209
346 if (force || iwl_is_associated(priv)) { 210 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
347 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", 211 priv->qos_data.qos_active,
348 priv->qos_data.qos_active, 212 priv->qos_data.def_qos_parm.qos_flags);
349 priv->qos_data.def_qos_parm.qos_flags);
350 213
351 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, 214 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
352 sizeof(struct iwl_qosparam_cmd), 215 sizeof(struct iwl_qosparam_cmd),
353 &priv->qos_data.def_qos_parm, NULL); 216 &priv->qos_data.def_qos_parm, NULL);
354 }
355} 217}
356EXPORT_SYMBOL(iwl_activate_qos);
357
358/*
359 * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
360 * (802.11b) (802.11a/g)
361 * AC_BK 15 1023 7 0 0
362 * AC_BE 15 1023 3 0 0
363 * AC_VI 7 15 2 6.016ms 3.008ms
364 * AC_VO 3 7 2 3.264ms 1.504ms
365 */
366void iwl_reset_qos(struct iwl_priv *priv)
367{
368 u16 cw_min = 15;
369 u16 cw_max = 1023;
370 u8 aifs = 2;
371 bool is_legacy = false;
372 unsigned long flags;
373 int i;
374
375 spin_lock_irqsave(&priv->lock, flags);
376 /* QoS always active in AP and ADHOC mode
377 * In STA mode wait for association
378 */
379 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
380 priv->iw_mode == NL80211_IFTYPE_AP)
381 priv->qos_data.qos_active = 1;
382 else
383 priv->qos_data.qos_active = 0;
384
385 /* check for legacy mode */
386 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
387 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
388 (priv->iw_mode == NL80211_IFTYPE_STATION &&
389 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
390 cw_min = 31;
391 is_legacy = 1;
392 }
393
394 if (priv->qos_data.qos_active)
395 aifs = 3;
396
397 /* AC_BE */
398 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
399 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
400 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
401 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
402 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
403
404 if (priv->qos_data.qos_active) {
405 /* AC_BK */
406 i = 1;
407 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
408 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
409 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
410 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
411 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
412
413 /* AC_VI */
414 i = 2;
415 priv->qos_data.def_qos_parm.ac[i].cw_min =
416 cpu_to_le16((cw_min + 1) / 2 - 1);
417 priv->qos_data.def_qos_parm.ac[i].cw_max =
418 cpu_to_le16(cw_min);
419 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
420 if (is_legacy)
421 priv->qos_data.def_qos_parm.ac[i].edca_txop =
422 cpu_to_le16(6016);
423 else
424 priv->qos_data.def_qos_parm.ac[i].edca_txop =
425 cpu_to_le16(3008);
426 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
427
428 /* AC_VO */
429 i = 3;
430 priv->qos_data.def_qos_parm.ac[i].cw_min =
431 cpu_to_le16((cw_min + 1) / 4 - 1);
432 priv->qos_data.def_qos_parm.ac[i].cw_max =
433 cpu_to_le16((cw_min + 1) / 2 - 1);
434 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
435 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
436 if (is_legacy)
437 priv->qos_data.def_qos_parm.ac[i].edca_txop =
438 cpu_to_le16(3264);
439 else
440 priv->qos_data.def_qos_parm.ac[i].edca_txop =
441 cpu_to_le16(1504);
442 } else {
443 for (i = 1; i < 4; i++) {
444 priv->qos_data.def_qos_parm.ac[i].cw_min =
445 cpu_to_le16(cw_min);
446 priv->qos_data.def_qos_parm.ac[i].cw_max =
447 cpu_to_le16(cw_max);
448 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
449 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
450 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
451 }
452 }
453 IWL_DEBUG_QOS(priv, "set QoS to default \n");
454
455 spin_unlock_irqrestore(&priv->lock, flags);
456}
457EXPORT_SYMBOL(iwl_reset_qos);
458 218
459#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 219#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
460#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 220#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -721,7 +481,7 @@ static u16 iwl_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
721 return new_val; 481 return new_val;
722} 482}
723 483
724void iwl_setup_rxon_timing(struct iwl_priv *priv) 484void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif)
725{ 485{
726 u64 tsf; 486 u64 tsf;
727 s32 interval_tm, rem; 487 s32 interval_tm, rem;
@@ -735,15 +495,14 @@ void iwl_setup_rxon_timing(struct iwl_priv *priv)
735 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp); 495 priv->rxon_timing.timestamp = cpu_to_le64(priv->timestamp);
736 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval); 496 priv->rxon_timing.listen_interval = cpu_to_le16(conf->listen_interval);
737 497
738 if (priv->iw_mode == NL80211_IFTYPE_STATION) { 498 beacon_int = vif->bss_conf.beacon_int;
739 beacon_int = priv->beacon_int;
740 priv->rxon_timing.atim_window = 0;
741 } else {
742 beacon_int = priv->vif->bss_conf.beacon_int;
743 499
500 if (vif->type == NL80211_IFTYPE_ADHOC) {
744 /* TODO: we need to get atim_window from upper stack 501 /* TODO: we need to get atim_window from upper stack
745 * for now we set to 0 */ 502 * for now we set to 0 */
746 priv->rxon_timing.atim_window = 0; 503 priv->rxon_timing.atim_window = 0;
504 } else {
505 priv->rxon_timing.atim_window = 0;
747 } 506 }
748 507
749 beacon_int = iwl_adjust_beacon_interval(beacon_int, 508 beacon_int = iwl_adjust_beacon_interval(beacon_int,
@@ -903,23 +662,10 @@ EXPORT_SYMBOL(iwl_full_rxon_required);
903 662
904u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv) 663u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
905{ 664{
906 int i; 665 /*
907 int rate_mask; 666 * Assign the lowest rate -- should really get this from
908 667 * the beacon skb from mac80211.
909 /* Set rate mask*/ 668 */
910 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
911 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
912 else
913 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
914
915 /* Find lowest valid rate */
916 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
917 i = iwl_rates[i].next_ieee) {
918 if (rate_mask & (1 << i))
919 return iwl_rates[i].plcp;
920 }
921
922 /* No valid rate was found. Assign the lowest one */
923 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) 669 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
924 return IWL_RATE_1M_PLCP; 670 return IWL_RATE_1M_PLCP;
925 else 671 else
@@ -991,7 +737,6 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
991 "extension channel offset 0x%x\n", 737 "extension channel offset 0x%x\n",
992 le32_to_cpu(rxon->flags), ht_conf->ht_protection, 738 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
993 ht_conf->extension_chan_offset); 739 ht_conf->extension_chan_offset);
994 return;
995} 740}
996EXPORT_SYMBOL(iwl_set_rxon_ht); 741EXPORT_SYMBOL(iwl_set_rxon_ht);
997 742
@@ -1051,19 +796,6 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1051} 796}
1052 797
1053/** 798/**
1054 * iwl_is_monitor_mode - Determine if interface in monitor mode
1055 *
1056 * priv->iw_mode is set in add_interface, but add_interface is
1057 * never called for monitor mode. The only way mac80211 informs us about
1058 * monitor mode is through configuring filters (call to configure_filter).
1059 */
1060bool iwl_is_monitor_mode(struct iwl_priv *priv)
1061{
1062 return !!(priv->staging_rxon.filter_flags & RXON_FILTER_PROMISC_MSK);
1063}
1064EXPORT_SYMBOL(iwl_is_monitor_mode);
1065
1066/**
1067 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image 799 * iwl_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
1068 * 800 *
1069 * Selects how many and which Rx receivers/antennas/chains to use. 801 * Selects how many and which Rx receivers/antennas/chains to use.
@@ -1106,19 +838,6 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
1106 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS; 838 rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
1107 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 839 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1108 840
1109 /* copied from 'iwl_bg_request_scan()' */
1110 /* Force use of chains B and C (0x6) for Rx for 4965
1111 * Avoid A (0x1) because of its off-channel reception on A-band.
1112 * MIMO is not used here, but value is required */
1113 if (iwl_is_monitor_mode(priv) &&
1114 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
1115 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) {
1116 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
1117 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
1118 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
1119 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
1120 }
1121
1122 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain); 841 priv->staging_rxon.rx_chain = cpu_to_le16(rx_chain);
1123 842
1124 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam) 843 if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
@@ -1174,8 +893,9 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch)
1174} 893}
1175EXPORT_SYMBOL(iwl_set_rxon_channel); 894EXPORT_SYMBOL(iwl_set_rxon_channel);
1176 895
1177void iwl_set_flags_for_band(struct iwl_priv *priv, 896static void iwl_set_flags_for_band(struct iwl_priv *priv,
1178 enum ieee80211_band band) 897 enum ieee80211_band band,
898 struct ieee80211_vif *vif)
1179{ 899{
1180 if (band == IEEE80211_BAND_5GHZ) { 900 if (band == IEEE80211_BAND_5GHZ) {
1181 priv->staging_rxon.flags &= 901 priv->staging_rxon.flags &=
@@ -1184,12 +904,12 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
1184 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 904 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1185 } else { 905 } else {
1186 /* Copied from iwl_post_associate() */ 906 /* Copied from iwl_post_associate() */
1187 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 907 if (vif && vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
1188 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 908 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
1189 else 909 else
1190 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 910 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1191 911
1192 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) 912 if (vif && vif->type == NL80211_IFTYPE_ADHOC)
1193 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 913 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
1194 914
1195 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK; 915 priv->staging_rxon.flags |= RXON_FLG_BAND_24G_MSK;
@@ -1201,13 +921,18 @@ void iwl_set_flags_for_band(struct iwl_priv *priv,
1201/* 921/*
1202 * initialize rxon structure with default values from eeprom 922 * initialize rxon structure with default values from eeprom
1203 */ 923 */
1204void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode) 924void iwl_connection_init_rx_config(struct iwl_priv *priv,
925 struct ieee80211_vif *vif)
1205{ 926{
1206 const struct iwl_channel_info *ch_info; 927 const struct iwl_channel_info *ch_info;
928 enum nl80211_iftype type = NL80211_IFTYPE_STATION;
929
930 if (vif)
931 type = vif->type;
1207 932
1208 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon)); 933 memset(&priv->staging_rxon, 0, sizeof(priv->staging_rxon));
1209 934
1210 switch (mode) { 935 switch (type) {
1211 case NL80211_IFTYPE_AP: 936 case NL80211_IFTYPE_AP:
1212 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP; 937 priv->staging_rxon.dev_type = RXON_DEV_TYPE_AP;
1213 break; 938 break;
@@ -1225,7 +950,7 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1225 break; 950 break;
1226 951
1227 default: 952 default:
1228 IWL_ERR(priv, "Unsupported interface type %d\n", mode); 953 IWL_ERR(priv, "Unsupported interface type %d\n", type);
1229 break; 954 break;
1230 } 955 }
1231 956
@@ -1244,18 +969,10 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1244 if (!ch_info) 969 if (!ch_info)
1245 ch_info = &priv->channel_info[0]; 970 ch_info = &priv->channel_info[0];
1246 971
1247 /*
1248 * in some case A channels are all non IBSS
1249 * in this case force B/G channel
1250 */
1251 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
1252 !(is_channel_ibss(ch_info)))
1253 ch_info = &priv->channel_info[0];
1254
1255 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 972 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1256 priv->band = ch_info->band; 973 priv->band = ch_info->band;
1257 974
1258 iwl_set_flags_for_band(priv, priv->band); 975 iwl_set_flags_for_band(priv, priv->band, vif);
1259 976
1260 priv->staging_rxon.ofdm_basic_rates = 977 priv->staging_rxon.ofdm_basic_rates =
1261 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF; 978 (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
@@ -1286,7 +1003,6 @@ static void iwl_set_rate(struct iwl_priv *priv)
1286 } 1003 }
1287 1004
1288 priv->active_rate = 0; 1005 priv->active_rate = 0;
1289 priv->active_rate_basic = 0;
1290 1006
1291 for (i = 0; i < hw->n_bitrates; i++) { 1007 for (i = 0; i < hw->n_bitrates; i++) {
1292 rate = &(hw->bitrates[i]); 1008 rate = &(hw->bitrates[i]);
@@ -1294,30 +1010,13 @@ static void iwl_set_rate(struct iwl_priv *priv)
1294 priv->active_rate |= (1 << rate->hw_value); 1010 priv->active_rate |= (1 << rate->hw_value);
1295 } 1011 }
1296 1012
1297 IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n", 1013 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
1298 priv->active_rate, priv->active_rate_basic);
1299 1014
1300 /* 1015 priv->staging_rxon.cck_basic_rates =
1301 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) 1016 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1302 * otherwise set it to the default of all CCK rates and 6, 12, 24 for 1017
1303 * OFDM 1018 priv->staging_rxon.ofdm_basic_rates =
1304 */ 1019 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1305 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
1306 priv->staging_rxon.cck_basic_rates =
1307 ((priv->active_rate_basic &
1308 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
1309 else
1310 priv->staging_rxon.cck_basic_rates =
1311 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1312
1313 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
1314 priv->staging_rxon.ofdm_basic_rates =
1315 ((priv->active_rate_basic &
1316 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
1317 IWL_FIRST_OFDM_RATE) & 0xFF;
1318 else
1319 priv->staging_rxon.ofdm_basic_rates =
1320 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1321} 1020}
1322 1021
1323void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1022void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
@@ -1374,6 +1073,9 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1374 /* Cancel currently queued command. */ 1073 /* Cancel currently queued command. */
1375 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 1074 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1376 1075
1076 IWL_ERR(priv, "Loaded firmware version: %s\n",
1077 priv->hw->wiphy->fw_version);
1078
1377 priv->cfg->ops->lib->dump_nic_error_log(priv); 1079 priv->cfg->ops->lib->dump_nic_error_log(priv);
1378 if (priv->cfg->ops->lib->dump_csr) 1080 if (priv->cfg->ops->lib->dump_csr)
1379 priv->cfg->ops->lib->dump_csr(priv); 1081 priv->cfg->ops->lib->dump_csr(priv);
@@ -1401,7 +1103,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1401} 1103}
1402EXPORT_SYMBOL(iwl_irq_handle_error); 1104EXPORT_SYMBOL(iwl_irq_handle_error);
1403 1105
1404int iwl_apm_stop_master(struct iwl_priv *priv) 1106static int iwl_apm_stop_master(struct iwl_priv *priv)
1405{ 1107{
1406 int ret = 0; 1108 int ret = 0;
1407 1109
@@ -1417,7 +1119,6 @@ int iwl_apm_stop_master(struct iwl_priv *priv)
1417 1119
1418 return ret; 1120 return ret;
1419} 1121}
1420EXPORT_SYMBOL(iwl_apm_stop_master);
1421 1122
1422void iwl_apm_stop(struct iwl_priv *priv) 1123void iwl_apm_stop(struct iwl_priv *priv)
1423{ 1124{
@@ -1561,41 +1262,33 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1561 u64 multicast) 1262 u64 multicast)
1562{ 1263{
1563 struct iwl_priv *priv = hw->priv; 1264 struct iwl_priv *priv = hw->priv;
1564 __le32 *filter_flags = &priv->staging_rxon.filter_flags; 1265 __le32 filter_or = 0, filter_nand = 0;
1266
1267#define CHK(test, flag) do { \
1268 if (*total_flags & (test)) \
1269 filter_or |= (flag); \
1270 else \
1271 filter_nand |= (flag); \
1272 } while (0)
1565 1273
1566 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", 1274 IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
1567 changed_flags, *total_flags); 1275 changed_flags, *total_flags);
1568 1276
1569 if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) { 1277 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
1570 if (*total_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) 1278 CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
1571 *filter_flags |= RXON_FILTER_PROMISC_MSK; 1279 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
1572 else 1280 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
1573 *filter_flags &= ~RXON_FILTER_PROMISC_MSK;
1574 }
1575 if (changed_flags & FIF_ALLMULTI) {
1576 if (*total_flags & FIF_ALLMULTI)
1577 *filter_flags |= RXON_FILTER_ACCEPT_GRP_MSK;
1578 else
1579 *filter_flags &= ~RXON_FILTER_ACCEPT_GRP_MSK;
1580 }
1581 if (changed_flags & FIF_CONTROL) {
1582 if (*total_flags & FIF_CONTROL)
1583 *filter_flags |= RXON_FILTER_CTL2HOST_MSK;
1584 else
1585 *filter_flags &= ~RXON_FILTER_CTL2HOST_MSK;
1586 }
1587 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
1588 if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
1589 *filter_flags |= RXON_FILTER_BCON_AWARE_MSK;
1590 else
1591 *filter_flags &= ~RXON_FILTER_BCON_AWARE_MSK;
1592 }
1593 1281
1594 /* We avoid iwl_commit_rxon here to commit the new filter flags 1282#undef CHK
1595 * since mac80211 will call ieee80211_hw_config immediately. 1283
1596 * (mc_list is not supported at this time). Otherwise, we need to 1284 mutex_lock(&priv->mutex);
1597 * queue a background iwl_commit_rxon work. 1285
1598 */ 1286 priv->staging_rxon.filter_flags &= ~filter_nand;
1287 priv->staging_rxon.filter_flags |= filter_or;
1288
1289 iwlcore_commit_rxon(priv);
1290
1291 mutex_unlock(&priv->mutex);
1599 1292
1600 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | 1293 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1601 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 1294 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
@@ -1626,10 +1319,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1626 int ret = 0; 1319 int ret = 0;
1627 s8 prev_tx_power = priv->tx_power_user_lmt; 1320 s8 prev_tx_power = priv->tx_power_user_lmt;
1628 1321
1629 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { 1322 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1630 IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", 1323 IWL_WARN(priv,
1324 "Requested user TXPOWER %d below lower limit %d.\n",
1631 tx_power, 1325 tx_power,
1632 IWL_TX_POWER_TARGET_POWER_MIN); 1326 IWLAGN_TX_POWER_TARGET_POWER_MIN);
1633 return -EINVAL; 1327 return -EINVAL;
1634 } 1328 }
1635 1329
@@ -1668,286 +1362,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1668} 1362}
1669EXPORT_SYMBOL(iwl_set_tx_power); 1363EXPORT_SYMBOL(iwl_set_tx_power);
1670 1364
1671#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1672
1673/* Free dram table */
1674void iwl_free_isr_ict(struct iwl_priv *priv)
1675{
1676 if (priv->ict_tbl_vir) {
1677 dma_free_coherent(&priv->pci_dev->dev,
1678 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1679 priv->ict_tbl_vir, priv->ict_tbl_dma);
1680 priv->ict_tbl_vir = NULL;
1681 }
1682}
1683EXPORT_SYMBOL(iwl_free_isr_ict);
1684
1685
1686/* allocate dram shared table it is a PAGE_SIZE aligned
1687 * also reset all data related to ICT table interrupt.
1688 */
1689int iwl_alloc_isr_ict(struct iwl_priv *priv)
1690{
1691
1692 if (priv->cfg->use_isr_legacy)
1693 return 0;
1694 /* allocate shrared data table */
1695 priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1696 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1697 &priv->ict_tbl_dma, GFP_KERNEL);
1698 if (!priv->ict_tbl_vir)
1699 return -ENOMEM;
1700
1701 /* align table to PAGE_SIZE boundry */
1702 priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
1703
1704 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
1705 (unsigned long long)priv->ict_tbl_dma,
1706 (unsigned long long)priv->aligned_ict_tbl_dma,
1707 (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1708
1709 priv->ict_tbl = priv->ict_tbl_vir +
1710 (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
1711
1712 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
1713 priv->ict_tbl, priv->ict_tbl_vir,
1714 (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1715
1716 /* reset table and index to all 0 */
1717 memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
1718 priv->ict_index = 0;
1719
1720 /* add periodic RX interrupt */
1721 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1722 return 0;
1723}
1724EXPORT_SYMBOL(iwl_alloc_isr_ict);
1725
1726/* Device is going up inform it about using ICT interrupt table,
1727 * also we need to tell the driver to start using ICT interrupt.
1728 */
1729int iwl_reset_ict(struct iwl_priv *priv)
1730{
1731 u32 val;
1732 unsigned long flags;
1733
1734 if (!priv->ict_tbl_vir)
1735 return 0;
1736
1737 spin_lock_irqsave(&priv->lock, flags);
1738 iwl_disable_interrupts(priv);
1739
1740 memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
1741
1742 val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
1743
1744 val |= CSR_DRAM_INT_TBL_ENABLE;
1745 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1746
1747 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
1748 "aligned dma address %Lx\n",
1749 val, (unsigned long long)priv->aligned_ict_tbl_dma);
1750
1751 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
1752 priv->use_ict = true;
1753 priv->ict_index = 0;
1754 iwl_write32(priv, CSR_INT, priv->inta_mask);
1755 iwl_enable_interrupts(priv);
1756 spin_unlock_irqrestore(&priv->lock, flags);
1757
1758 return 0;
1759}
1760EXPORT_SYMBOL(iwl_reset_ict);
1761
1762/* Device is going down disable ict interrupt usage */
1763void iwl_disable_ict(struct iwl_priv *priv)
1764{
1765 unsigned long flags;
1766
1767 spin_lock_irqsave(&priv->lock, flags);
1768 priv->use_ict = false;
1769 spin_unlock_irqrestore(&priv->lock, flags);
1770}
1771EXPORT_SYMBOL(iwl_disable_ict);
1772
1773/* interrupt handler using ict table, with this interrupt driver will
1774 * stop using INTA register to get device's interrupt, reading this register
1775 * is expensive, device will write interrupts in ICT dram table, increment
1776 * index then will fire interrupt to driver, driver will OR all ICT table
1777 * entries from current index up to table entry with 0 value. the result is
1778 * the interrupt we need to service, driver will set the entries back to 0 and
1779 * set index.
1780 */
1781irqreturn_t iwl_isr_ict(int irq, void *data)
1782{
1783 struct iwl_priv *priv = data;
1784 u32 inta, inta_mask;
1785 u32 val = 0;
1786
1787 if (!priv)
1788 return IRQ_NONE;
1789
1790 /* dram interrupt table not set yet,
1791 * use legacy interrupt.
1792 */
1793 if (!priv->use_ict)
1794 return iwl_isr(irq, data);
1795
1796 spin_lock(&priv->lock);
1797
1798 /* Disable (but don't clear!) interrupts here to avoid
1799 * back-to-back ISRs and sporadic interrupts from our NIC.
1800 * If we have something to service, the tasklet will re-enable ints.
1801 * If we *don't* have something, we'll re-enable before leaving here.
1802 */
1803 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1804 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1805
1806
1807 /* Ignore interrupt if there's nothing in NIC to service.
1808 * This may be due to IRQ shared with another device,
1809 * or due to sporadic interrupts thrown from our NIC. */
1810 if (!priv->ict_tbl[priv->ict_index]) {
1811 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1812 goto none;
1813 }
1814
1815 /* read all entries that not 0 start with ict_index */
1816 while (priv->ict_tbl[priv->ict_index]) {
1817
1818 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
1819 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
1820 priv->ict_index,
1821 le32_to_cpu(priv->ict_tbl[priv->ict_index]));
1822 priv->ict_tbl[priv->ict_index] = 0;
1823 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
1824 ICT_COUNT);
1825
1826 }
1827
1828 /* We should not get this value, just ignore it. */
1829 if (val == 0xffffffff)
1830 val = 0;
1831
1832 /*
1833 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1834 * (bit 15 before shifting it to 31) to clear when using interrupt
1835 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1836 * so we use them to decide on the real state of the Rx bit.
1837 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1838 */
1839 if (val & 0xC0000)
1840 val |= 0x8000;
1841
1842 inta = (0xff & val) | ((0xff00 & val) << 16);
1843 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1844 inta, inta_mask, val);
1845
1846 inta &= priv->inta_mask;
1847 priv->inta |= inta;
1848
1849 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1850 if (likely(inta))
1851 tasklet_schedule(&priv->irq_tasklet);
1852 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
1853 /* Allow interrupt if was disabled by this handler and
1854 * no tasklet was schedules, We should not enable interrupt,
1855 * tasklet will enable it.
1856 */
1857 iwl_enable_interrupts(priv);
1858 }
1859
1860 spin_unlock(&priv->lock);
1861 return IRQ_HANDLED;
1862
1863 none:
1864 /* re-enable interrupts here since we don't have anything to service.
1865 * only Re-enable if disabled by irq.
1866 */
1867 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1868 iwl_enable_interrupts(priv);
1869
1870 spin_unlock(&priv->lock);
1871 return IRQ_NONE;
1872}
1873EXPORT_SYMBOL(iwl_isr_ict);
1874
1875
1876static irqreturn_t iwl_isr(int irq, void *data)
1877{
1878 struct iwl_priv *priv = data;
1879 u32 inta, inta_mask;
1880#ifdef CONFIG_IWLWIFI_DEBUG
1881 u32 inta_fh;
1882#endif
1883 if (!priv)
1884 return IRQ_NONE;
1885
1886 spin_lock(&priv->lock);
1887
1888 /* Disable (but don't clear!) interrupts here to avoid
1889 * back-to-back ISRs and sporadic interrupts from our NIC.
1890 * If we have something to service, the tasklet will re-enable ints.
1891 * If we *don't* have something, we'll re-enable before leaving here. */
1892 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1893 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1894
1895 /* Discover which interrupts are active/pending */
1896 inta = iwl_read32(priv, CSR_INT);
1897
1898 /* Ignore interrupt if there's nothing in NIC to service.
1899 * This may be due to IRQ shared with another device,
1900 * or due to sporadic interrupts thrown from our NIC. */
1901 if (!inta) {
1902 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1903 goto none;
1904 }
1905
1906 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1907 /* Hardware disappeared. It might have already raised
1908 * an interrupt */
1909 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1910 goto unplugged;
1911 }
1912
1913#ifdef CONFIG_IWLWIFI_DEBUG
1914 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1915 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1916 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
1917 "fh 0x%08x\n", inta, inta_mask, inta_fh);
1918 }
1919#endif
1920
1921 priv->inta |= inta;
1922 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1923 if (likely(inta))
1924 tasklet_schedule(&priv->irq_tasklet);
1925 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1926 iwl_enable_interrupts(priv);
1927
1928 unplugged:
1929 spin_unlock(&priv->lock);
1930 return IRQ_HANDLED;
1931
1932 none:
1933 /* re-enable interrupts here since we don't have anything to service. */
1934 /* only Re-enable if diabled by irq and no schedules tasklet. */
1935 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1936 iwl_enable_interrupts(priv);
1937
1938 spin_unlock(&priv->lock);
1939 return IRQ_NONE;
1940}
1941
1942irqreturn_t iwl_isr_legacy(int irq, void *data) 1365irqreturn_t iwl_isr_legacy(int irq, void *data)
1943{ 1366{
1944 struct iwl_priv *priv = data; 1367 struct iwl_priv *priv = data;
1945 u32 inta, inta_mask; 1368 u32 inta, inta_mask;
1946 u32 inta_fh; 1369 u32 inta_fh;
1370 unsigned long flags;
1947 if (!priv) 1371 if (!priv)
1948 return IRQ_NONE; 1372 return IRQ_NONE;
1949 1373
1950 spin_lock(&priv->lock); 1374 spin_lock_irqsave(&priv->lock, flags);
1951 1375
1952 /* Disable (but don't clear!) interrupts here to avoid 1376 /* Disable (but don't clear!) interrupts here to avoid
1953 * back-to-back ISRs and sporadic interrupts from our NIC. 1377 * back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1985,7 +1409,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
1985 tasklet_schedule(&priv->irq_tasklet); 1409 tasklet_schedule(&priv->irq_tasklet);
1986 1410
1987 unplugged: 1411 unplugged:
1988 spin_unlock(&priv->lock); 1412 spin_unlock_irqrestore(&priv->lock, flags);
1989 return IRQ_HANDLED; 1413 return IRQ_HANDLED;
1990 1414
1991 none: 1415 none:
@@ -1993,12 +1417,12 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
1993 /* only Re-enable if diabled by irq */ 1417 /* only Re-enable if diabled by irq */
1994 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1418 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1995 iwl_enable_interrupts(priv); 1419 iwl_enable_interrupts(priv);
1996 spin_unlock(&priv->lock); 1420 spin_unlock_irqrestore(&priv->lock, flags);
1997 return IRQ_NONE; 1421 return IRQ_NONE;
1998} 1422}
1999EXPORT_SYMBOL(iwl_isr_legacy); 1423EXPORT_SYMBOL(iwl_isr_legacy);
2000 1424
2001int iwl_send_bt_config(struct iwl_priv *priv) 1425void iwl_send_bt_config(struct iwl_priv *priv)
2002{ 1426{
2003 struct iwl_bt_cmd bt_cmd = { 1427 struct iwl_bt_cmd bt_cmd = {
2004 .lead_time = BT_LEAD_TIME_DEF, 1428 .lead_time = BT_LEAD_TIME_DEF,
@@ -2015,8 +1439,9 @@ int iwl_send_bt_config(struct iwl_priv *priv)
2015 IWL_DEBUG_INFO(priv, "BT coex %s\n", 1439 IWL_DEBUG_INFO(priv, "BT coex %s\n",
2016 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 1440 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
2017 1441
2018 return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG, 1442 if (iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
2019 sizeof(struct iwl_bt_cmd), &bt_cmd); 1443 sizeof(struct iwl_bt_cmd), &bt_cmd))
1444 IWL_ERR(priv, "failed to send BT Coex Config\n");
2020} 1445}
2021EXPORT_SYMBOL(iwl_send_bt_config); 1446EXPORT_SYMBOL(iwl_send_bt_config);
2022 1447
@@ -2306,12 +1731,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2306 cpu_to_le16((params->txop * 32)); 1731 cpu_to_le16((params->txop * 32));
2307 1732
2308 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 1733 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
2309 priv->qos_data.qos_active = 1;
2310
2311 if (priv->iw_mode == NL80211_IFTYPE_AP)
2312 iwl_activate_qos(priv, 1);
2313 else if (priv->assoc_id && iwl_is_associated(priv))
2314 iwl_activate_qos(priv, 0);
2315 1734
2316 spin_unlock_irqrestore(&priv->lock, flags); 1735 spin_unlock_irqrestore(&priv->lock, flags);
2317 1736
@@ -2321,12 +1740,13 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2321EXPORT_SYMBOL(iwl_mac_conf_tx); 1740EXPORT_SYMBOL(iwl_mac_conf_tx);
2322 1741
2323static void iwl_ht_conf(struct iwl_priv *priv, 1742static void iwl_ht_conf(struct iwl_priv *priv,
2324 struct ieee80211_bss_conf *bss_conf) 1743 struct ieee80211_vif *vif)
2325{ 1744{
2326 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1745 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2327 struct ieee80211_sta *sta; 1746 struct ieee80211_sta *sta;
1747 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
2328 1748
2329 IWL_DEBUG_MAC80211(priv, "enter: \n"); 1749 IWL_DEBUG_MAC80211(priv, "enter:\n");
2330 1750
2331 if (!ht_conf->is_ht) 1751 if (!ht_conf->is_ht)
2332 return; 1752 return;
@@ -2338,10 +1758,10 @@ static void iwl_ht_conf(struct iwl_priv *priv,
2338 1758
2339 ht_conf->single_chain_sufficient = false; 1759 ht_conf->single_chain_sufficient = false;
2340 1760
2341 switch (priv->iw_mode) { 1761 switch (vif->type) {
2342 case NL80211_IFTYPE_STATION: 1762 case NL80211_IFTYPE_STATION:
2343 rcu_read_lock(); 1763 rcu_read_lock();
2344 sta = ieee80211_find_sta(priv->vif, priv->bssid); 1764 sta = ieee80211_find_sta(vif, bss_conf->bssid);
2345 if (sta) { 1765 if (sta) {
2346 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 1766 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2347 int maxstreams; 1767 int maxstreams;
@@ -2379,7 +1799,6 @@ static void iwl_ht_conf(struct iwl_priv *priv,
2379 1799
2380static inline void iwl_set_no_assoc(struct iwl_priv *priv) 1800static inline void iwl_set_no_assoc(struct iwl_priv *priv)
2381{ 1801{
2382 priv->assoc_id = 0;
2383 iwl_led_disassociate(priv); 1802 iwl_led_disassociate(priv);
2384 /* 1803 /*
2385 * inform the ucode that there is no longer an 1804 * inform the ucode that there is no longer an
@@ -2392,7 +1811,6 @@ static inline void iwl_set_no_assoc(struct iwl_priv *priv)
2392 iwlcore_commit_rxon(priv); 1811 iwlcore_commit_rxon(priv);
2393} 1812}
2394 1813
2395#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
2396void iwl_bss_info_changed(struct ieee80211_hw *hw, 1814void iwl_bss_info_changed(struct ieee80211_hw *hw,
2397 struct ieee80211_vif *vif, 1815 struct ieee80211_vif *vif,
2398 struct ieee80211_bss_conf *bss_conf, 1816 struct ieee80211_bss_conf *bss_conf,
@@ -2408,14 +1826,12 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2408 1826
2409 mutex_lock(&priv->mutex); 1827 mutex_lock(&priv->mutex);
2410 1828
2411 if (changes & BSS_CHANGED_BEACON && 1829 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
2412 priv->iw_mode == NL80211_IFTYPE_AP) {
2413 dev_kfree_skb(priv->ibss_beacon); 1830 dev_kfree_skb(priv->ibss_beacon);
2414 priv->ibss_beacon = ieee80211_beacon_get(hw, vif); 1831 priv->ibss_beacon = ieee80211_beacon_get(hw, vif);
2415 } 1832 }
2416 1833
2417 if (changes & BSS_CHANGED_BEACON_INT) { 1834 if (changes & BSS_CHANGED_BEACON_INT) {
2418 priv->beacon_int = bss_conf->beacon_int;
2419 /* TODO: in AP mode, do something to make this take effect */ 1835 /* TODO: in AP mode, do something to make this take effect */
2420 } 1836 }
2421 1837
@@ -2435,8 +1851,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2435 } 1851 }
2436 1852
2437 /* mac80211 only sets assoc when in STATION mode */ 1853 /* mac80211 only sets assoc when in STATION mode */
2438 if (priv->iw_mode == NL80211_IFTYPE_ADHOC || 1854 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
2439 bss_conf->assoc) {
2440 memcpy(priv->staging_rxon.bssid_addr, 1855 memcpy(priv->staging_rxon.bssid_addr,
2441 bss_conf->bssid, ETH_ALEN); 1856 bss_conf->bssid, ETH_ALEN);
2442 1857
@@ -2454,7 +1869,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2454 * mac80211 decides to do both changes at once because 1869 * mac80211 decides to do both changes at once because
2455 * it will invoke post_associate. 1870 * it will invoke post_associate.
2456 */ 1871 */
2457 if (priv->iw_mode == NL80211_IFTYPE_ADHOC && 1872 if (vif->type == NL80211_IFTYPE_ADHOC &&
2458 changes & BSS_CHANGED_BEACON) { 1873 changes & BSS_CHANGED_BEACON) {
2459 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1874 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
2460 1875
@@ -2497,7 +1912,7 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2497 } 1912 }
2498 1913
2499 if (changes & BSS_CHANGED_HT) { 1914 if (changes & BSS_CHANGED_HT) {
2500 iwl_ht_conf(priv, bss_conf); 1915 iwl_ht_conf(priv, vif);
2501 1916
2502 if (priv->cfg->ops->hcmd->set_rxon_chain) 1917 if (priv->cfg->ops->hcmd->set_rxon_chain)
2503 priv->cfg->ops->hcmd->set_rxon_chain(priv); 1918 priv->cfg->ops->hcmd->set_rxon_chain(priv);
@@ -2506,28 +1921,17 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2506 if (changes & BSS_CHANGED_ASSOC) { 1921 if (changes & BSS_CHANGED_ASSOC) {
2507 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc); 1922 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
2508 if (bss_conf->assoc) { 1923 if (bss_conf->assoc) {
2509 priv->assoc_id = bss_conf->aid;
2510 priv->beacon_int = bss_conf->beacon_int;
2511 priv->timestamp = bss_conf->timestamp; 1924 priv->timestamp = bss_conf->timestamp;
2512 priv->assoc_capability = bss_conf->assoc_capability;
2513 1925
2514 iwl_led_associate(priv); 1926 iwl_led_associate(priv);
2515 1927
2516 /*
2517 * We have just associated, don't start scan too early
2518 * leave time for EAPOL exchange to complete.
2519 *
2520 * XXX: do this in mac80211
2521 */
2522 priv->next_scan_jiffies = jiffies +
2523 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2524 if (!iwl_is_rfkill(priv)) 1928 if (!iwl_is_rfkill(priv))
2525 priv->cfg->ops->lib->post_associate(priv); 1929 priv->cfg->ops->lib->post_associate(priv, vif);
2526 } else 1930 } else
2527 iwl_set_no_assoc(priv); 1931 iwl_set_no_assoc(priv);
2528 } 1932 }
2529 1933
2530 if (changes && iwl_is_associated(priv) && priv->assoc_id) { 1934 if (changes && iwl_is_associated(priv) && bss_conf->aid) {
2531 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n", 1935 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
2532 changes); 1936 changes);
2533 ret = iwl_send_rxon_assoc(priv); 1937 ret = iwl_send_rxon_assoc(priv);
@@ -2544,11 +1948,20 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2544 memcpy(priv->staging_rxon.bssid_addr, 1948 memcpy(priv->staging_rxon.bssid_addr,
2545 bss_conf->bssid, ETH_ALEN); 1949 bss_conf->bssid, ETH_ALEN);
2546 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN); 1950 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
2547 iwlcore_config_ap(priv); 1951 iwlcore_config_ap(priv, vif);
2548 } else 1952 } else
2549 iwl_set_no_assoc(priv); 1953 iwl_set_no_assoc(priv);
2550 } 1954 }
2551 1955
1956 if (changes & BSS_CHANGED_IBSS) {
1957 ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif,
1958 bss_conf->ibss_joined);
1959 if (ret)
1960 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
1961 bss_conf->ibss_joined ? "add" : "remove",
1962 bss_conf->bssid);
1963 }
1964
2552 mutex_unlock(&priv->mutex); 1965 mutex_unlock(&priv->mutex);
2553 1966
2554 IWL_DEBUG_MAC80211(priv, "leave\n"); 1967 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2568,11 +1981,6 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2568 return -EIO; 1981 return -EIO;
2569 } 1982 }
2570 1983
2571 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2572 IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
2573 return -EIO;
2574 }
2575
2576 spin_lock_irqsave(&priv->lock, flags); 1984 spin_lock_irqsave(&priv->lock, flags);
2577 1985
2578 if (priv->ibss_beacon) 1986 if (priv->ibss_beacon)
@@ -2580,59 +1988,31 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2580 1988
2581 priv->ibss_beacon = skb; 1989 priv->ibss_beacon = skb;
2582 1990
2583 priv->assoc_id = 0;
2584 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp; 1991 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
2585 priv->timestamp = le64_to_cpu(timestamp); 1992 priv->timestamp = le64_to_cpu(timestamp);
2586 1993
2587 IWL_DEBUG_MAC80211(priv, "leave\n"); 1994 IWL_DEBUG_MAC80211(priv, "leave\n");
2588 spin_unlock_irqrestore(&priv->lock, flags); 1995 spin_unlock_irqrestore(&priv->lock, flags);
2589 1996
2590 iwl_reset_qos(priv); 1997 priv->cfg->ops->lib->post_associate(priv, priv->vif);
2591
2592 priv->cfg->ops->lib->post_associate(priv);
2593
2594 1998
2595 return 0; 1999 return 0;
2596} 2000}
2597EXPORT_SYMBOL(iwl_mac_beacon_update); 2001EXPORT_SYMBOL(iwl_mac_beacon_update);
2598 2002
2599int iwl_set_mode(struct iwl_priv *priv, int mode) 2003static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
2600{ 2004{
2601 if (mode == NL80211_IFTYPE_ADHOC) { 2005 iwl_connection_init_rx_config(priv, vif);
2602 const struct iwl_channel_info *ch_info;
2603
2604 ch_info = iwl_get_channel_info(priv,
2605 priv->band,
2606 le16_to_cpu(priv->staging_rxon.channel));
2607
2608 if (!ch_info || !is_channel_ibss(ch_info)) {
2609 IWL_ERR(priv, "channel %d not IBSS channel\n",
2610 le16_to_cpu(priv->staging_rxon.channel));
2611 return -EINVAL;
2612 }
2613 }
2614
2615 iwl_connection_init_rx_config(priv, mode);
2616 2006
2617 if (priv->cfg->ops->hcmd->set_rxon_chain) 2007 if (priv->cfg->ops->hcmd->set_rxon_chain)
2618 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2008 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2619 2009
2620 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 2010 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2621 2011
2622 iwl_clear_stations_table(priv); 2012 return iwlcore_commit_rxon(priv);
2623
2624 /* dont commit rxon if rf-kill is on*/
2625 if (!iwl_is_ready_rf(priv))
2626 return -EAGAIN;
2627
2628 iwlcore_commit_rxon(priv);
2629
2630 return 0;
2631} 2013}
2632EXPORT_SYMBOL(iwl_set_mode);
2633 2014
2634int iwl_mac_add_interface(struct ieee80211_hw *hw, 2015int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2635 struct ieee80211_vif *vif)
2636{ 2016{
2637 struct iwl_priv *priv = hw->priv; 2017 struct iwl_priv *priv = hw->priv;
2638 int err = 0; 2018 int err = 0;
@@ -2641,6 +2021,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2641 2021
2642 mutex_lock(&priv->mutex); 2022 mutex_lock(&priv->mutex);
2643 2023
2024 if (WARN_ON(!iwl_is_ready_rf(priv))) {
2025 err = -EINVAL;
2026 goto out;
2027 }
2028
2644 if (priv->vif) { 2029 if (priv->vif) {
2645 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2030 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
2646 err = -EOPNOTSUPP; 2031 err = -EOPNOTSUPP;
@@ -2650,15 +2035,18 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2650 priv->vif = vif; 2035 priv->vif = vif;
2651 priv->iw_mode = vif->type; 2036 priv->iw_mode = vif->type;
2652 2037
2653 if (vif->addr) { 2038 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2654 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr); 2039 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2655 memcpy(priv->mac_addr, vif->addr, ETH_ALEN); 2040
2656 } 2041 err = iwl_set_mode(priv, vif);
2042 if (err)
2043 goto out_err;
2657 2044
2658 if (iwl_set_mode(priv, vif->type) == -EAGAIN) 2045 goto out;
2659 /* we are not ready, will run again when ready */
2660 set_bit(STATUS_MODE_PENDING, &priv->status);
2661 2046
2047 out_err:
2048 priv->vif = NULL;
2049 priv->iw_mode = NL80211_IFTYPE_STATION;
2662 out: 2050 out:
2663 mutex_unlock(&priv->mutex); 2051 mutex_unlock(&priv->mutex);
2664 2052
@@ -2668,7 +2056,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2668EXPORT_SYMBOL(iwl_mac_add_interface); 2056EXPORT_SYMBOL(iwl_mac_add_interface);
2669 2057
2670void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2058void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2671 struct ieee80211_vif *vif) 2059 struct ieee80211_vif *vif)
2672{ 2060{
2673 struct iwl_priv *priv = hw->priv; 2061 struct iwl_priv *priv = hw->priv;
2674 2062
@@ -2694,10 +2082,6 @@ EXPORT_SYMBOL(iwl_mac_remove_interface);
2694 2082
2695/** 2083/**
2696 * iwl_mac_config - mac80211 config callback 2084 * iwl_mac_config - mac80211 config callback
2697 *
2698 * We ignore conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME since it seems to
2699 * be set inappropriately and the driver currently sets the hardware up to
2700 * use it whenever needed.
2701 */ 2085 */
2702int iwl_mac_config(struct ieee80211_hw *hw, u32 changed) 2086int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2703{ 2087{
@@ -2752,15 +2136,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2752 goto set_ch_out; 2136 goto set_ch_out;
2753 } 2137 }
2754 2138
2755 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2756 !is_channel_ibss(ch_info)) {
2757 IWL_ERR(priv, "channel %d in band %d not "
2758 "IBSS channel\n",
2759 conf->channel->hw_value, conf->channel->band);
2760 ret = -EINVAL;
2761 goto set_ch_out;
2762 }
2763
2764 spin_lock_irqsave(&priv->lock, flags); 2139 spin_lock_irqsave(&priv->lock, flags);
2765 2140
2766 /* Configure HT40 channels */ 2141 /* Configure HT40 channels */
@@ -2794,7 +2169,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2794 iwl_set_rxon_channel(priv, conf->channel); 2169 iwl_set_rxon_channel(priv, conf->channel);
2795 iwl_set_rxon_ht(priv, ht_conf); 2170 iwl_set_rxon_ht(priv, ht_conf);
2796 2171
2797 iwl_set_flags_for_band(priv, conf->channel->band); 2172 iwl_set_flags_for_band(priv, conf->channel->band, priv->vif);
2798 spin_unlock_irqrestore(&priv->lock, flags); 2173 spin_unlock_irqrestore(&priv->lock, flags);
2799 if (iwl_is_associated(priv) && 2174 if (iwl_is_associated(priv) &&
2800 (le16_to_cpu(priv->active_rxon.channel) != ch) && 2175 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
@@ -2833,6 +2208,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2833 iwl_set_tx_power(priv, conf->power_level, false); 2208 iwl_set_tx_power(priv, conf->power_level, false);
2834 } 2209 }
2835 2210
2211 if (changed & IEEE80211_CONF_CHANGE_QOS) {
2212 bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
2213
2214 spin_lock_irqsave(&priv->lock, flags);
2215 priv->qos_data.qos_active = qos_active;
2216 iwl_update_qos(priv);
2217 spin_unlock_irqrestore(&priv->lock, flags);
2218 }
2219
2836 if (!iwl_is_ready(priv)) { 2220 if (!iwl_is_ready(priv)) {
2837 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2221 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2838 goto out; 2222 goto out;
@@ -2867,12 +2251,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2867 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); 2251 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2868 spin_unlock_irqrestore(&priv->lock, flags); 2252 spin_unlock_irqrestore(&priv->lock, flags);
2869 2253
2870 iwl_reset_qos(priv);
2871
2872 spin_lock_irqsave(&priv->lock, flags); 2254 spin_lock_irqsave(&priv->lock, flags);
2873 priv->assoc_id = 0;
2874 priv->assoc_capability = 0;
2875 priv->assoc_station_added = 0;
2876 2255
2877 /* new association get rid of ibss beacon skb */ 2256 /* new association get rid of ibss beacon skb */
2878 if (priv->ibss_beacon) 2257 if (priv->ibss_beacon)
@@ -2880,10 +2259,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2880 2259
2881 priv->ibss_beacon = NULL; 2260 priv->ibss_beacon = NULL;
2882 2261
2883 priv->beacon_int = priv->vif->bss_conf.beacon_int;
2884 priv->timestamp = 0; 2262 priv->timestamp = 0;
2885 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
2886 priv->beacon_int = 0;
2887 2263
2888 spin_unlock_irqrestore(&priv->lock, flags); 2264 spin_unlock_irqrestore(&priv->lock, flags);
2889 2265
@@ -2896,17 +2272,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2896 /* we are restarting association process 2272 /* we are restarting association process
2897 * clear RXON_FILTER_ASSOC_MSK bit 2273 * clear RXON_FILTER_ASSOC_MSK bit
2898 */ 2274 */
2899 if (priv->iw_mode != NL80211_IFTYPE_AP) { 2275 iwl_scan_cancel_timeout(priv, 100);
2900 iwl_scan_cancel_timeout(priv, 100); 2276 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2901 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2277 iwlcore_commit_rxon(priv);
2902 iwlcore_commit_rxon(priv);
2903 }
2904
2905 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2906 IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
2907 mutex_unlock(&priv->mutex);
2908 return;
2909 }
2910 2278
2911 iwl_set_rate(priv); 2279 iwl_set_rate(priv);
2912 2280
@@ -2923,7 +2291,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
2923 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, 2291 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2924 GFP_KERNEL); 2292 GFP_KERNEL);
2925 if (!priv->txq) { 2293 if (!priv->txq) {
2926 IWL_ERR(priv, "Not enough memory for txq \n"); 2294 IWL_ERR(priv, "Not enough memory for txq\n");
2927 return -ENOMEM; 2295 return -ENOMEM;
2928 } 2296 }
2929 return 0; 2297 return 0;
@@ -2937,34 +2305,6 @@ void iwl_free_txq_mem(struct iwl_priv *priv)
2937} 2305}
2938EXPORT_SYMBOL(iwl_free_txq_mem); 2306EXPORT_SYMBOL(iwl_free_txq_mem);
2939 2307
2940int iwl_send_wimax_coex(struct iwl_priv *priv)
2941{
2942 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2943
2944 if (priv->cfg->support_wimax_coexist) {
2945 /* UnMask wake up src at associated sleep */
2946 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2947
2948 /* UnMask wake up src at unassociated sleep */
2949 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2950 memcpy(coex_cmd.sta_prio, cu_priorities,
2951 sizeof(struct iwl_wimax_coex_event_entry) *
2952 COEX_NUM_OF_EVENTS);
2953
2954 /* enabling the coexistence feature */
2955 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2956
2957 /* enabling the priorities tables */
2958 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2959 } else {
2960 /* coexistence is disabled */
2961 memset(&coex_cmd, 0, sizeof(coex_cmd));
2962 }
2963 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2964 sizeof(coex_cmd), &coex_cmd);
2965}
2966EXPORT_SYMBOL(iwl_send_wimax_coex);
2967
2968#ifdef CONFIG_IWLWIFI_DEBUGFS 2308#ifdef CONFIG_IWLWIFI_DEBUGFS
2969 2309
2970#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) 2310#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
@@ -3358,7 +2698,6 @@ static void iwl_force_rf_reset(struct iwl_priv *priv)
3358 */ 2698 */
3359 IWL_DEBUG_INFO(priv, "perform radio reset.\n"); 2699 IWL_DEBUG_INFO(priv, "perform radio reset.\n");
3360 iwl_internal_short_hw_scan(priv); 2700 iwl_internal_short_hw_scan(priv);
3361 return;
3362} 2701}
3363 2702
3364 2703
@@ -3404,6 +2743,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
3404 } 2743 }
3405 return 0; 2744 return 0;
3406} 2745}
2746EXPORT_SYMBOL(iwl_force_reset);
2747
2748/**
2749 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2750 *
2751 * During normal condition (no queue is stuck), the timer is continually set to
2752 * execute every monitor_recover_period milliseconds after the last timer
2753 * expired. When the queue read_ptr is at the same place, the timer is
2754 * shorten to 100mSecs. This is
2755 * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
2756 * 2) to detect the stuck queues quicker before the station and AP can
2757 * disassociate each other.
2758 *
2759 * This function monitors all the tx queues and recover from it if any
2760 * of the queues are stuck.
2761 * 1. It first check the cmd queue for stuck conditions. If it is stuck,
2762 * it will recover by resetting the firmware and return.
2763 * 2. Then, it checks for station association. If it associates it will check
2764 * other queues. If any queue is stuck, it will recover by resetting
2765 * the firmware.
2766 * Note: It the number of times the queue read_ptr to be at the same place to
2767 * be MAX_REPEAT+1 in order to consider to be stuck.
2768 */
2769/*
2770 * The maximum number of times the read pointer of the tx queue at the
2771 * same place without considering to be stuck.
2772 */
2773#define MAX_REPEAT (2)
2774static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2775{
2776 struct iwl_tx_queue *txq;
2777 struct iwl_queue *q;
2778
2779 txq = &priv->txq[cnt];
2780 q = &txq->q;
2781 /* queue is empty, skip */
2782 if (q->read_ptr != q->write_ptr) {
2783 if (q->read_ptr == q->last_read_ptr) {
2784 /* a queue has not been read from last time */
2785 if (q->repeat_same_read_ptr > MAX_REPEAT) {
2786 IWL_ERR(priv,
2787 "queue %d stuck %d time. Fw reload.\n",
2788 q->id, q->repeat_same_read_ptr);
2789 q->repeat_same_read_ptr = 0;
2790 iwl_force_reset(priv, IWL_FW_RESET);
2791 } else {
2792 q->repeat_same_read_ptr++;
2793 IWL_DEBUG_RADIO(priv,
2794 "queue %d, not read %d time\n",
2795 q->id,
2796 q->repeat_same_read_ptr);
2797 mod_timer(&priv->monitor_recover, jiffies +
2798 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
2799 }
2800 return 1;
2801 } else {
2802 q->last_read_ptr = q->read_ptr;
2803 q->repeat_same_read_ptr = 0;
2804 }
2805 }
2806 return 0;
2807}
2808
2809void iwl_bg_monitor_recover(unsigned long data)
2810{
2811 struct iwl_priv *priv = (struct iwl_priv *)data;
2812 int cnt;
2813
2814 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2815 return;
2816
2817 /* monitor and check for stuck cmd queue */
2818 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
2819 return;
2820
2821 /* monitor and check for other stuck queues */
2822 if (iwl_is_associated(priv)) {
2823 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
2824 /* skip as we already checked the command queue */
2825 if (cnt == IWL_CMD_QUEUE_NUM)
2826 continue;
2827 if (iwl_check_stuck_queue(priv, cnt))
2828 return;
2829 }
2830 }
2831 /*
2832 * Reschedule the timer to occur in
2833 * priv->cfg->monitor_recover_period
2834 */
2835 mod_timer(&priv->monitor_recover,
2836 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
2837}
2838EXPORT_SYMBOL(iwl_bg_monitor_recover);
3407 2839
3408#ifdef CONFIG_PM 2840#ifdef CONFIG_PM
3409 2841
@@ -3433,6 +2865,12 @@ int iwl_pci_resume(struct pci_dev *pdev)
3433 struct iwl_priv *priv = pci_get_drvdata(pdev); 2865 struct iwl_priv *priv = pci_get_drvdata(pdev);
3434 int ret; 2866 int ret;
3435 2867
2868 /*
2869 * We disable the RETRY_TIMEOUT register (0x41) to keep
2870 * PCI Tx retries from interfering with C3 CPU state.
2871 */
2872 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2873
3436 pci_set_power_state(pdev, PCI_D0); 2874 pci_set_power_state(pdev, PCI_D0);
3437 ret = pci_enable_device(pdev); 2875 ret = pci_enable_device(pdev);
3438 if (ret) 2876 if (ret)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 732590f5fe30..7e5a5ba41fd2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -90,6 +90,7 @@ struct iwl_hcmd_ops {
90 int (*commit_rxon)(struct iwl_priv *priv); 90 int (*commit_rxon)(struct iwl_priv *priv);
91 void (*set_rxon_chain)(struct iwl_priv *priv); 91 void (*set_rxon_chain)(struct iwl_priv *priv);
92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant); 92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
93 void (*send_bt_config)(struct iwl_priv *priv);
93}; 94};
94 95
95struct iwl_hcmd_utils_ops { 96struct iwl_hcmd_utils_ops {
@@ -105,6 +106,7 @@ struct iwl_hcmd_utils_ops {
105 __le32 *tx_flags); 106 __le32 *tx_flags);
106 int (*calc_rssi)(struct iwl_priv *priv, 107 int (*calc_rssi)(struct iwl_priv *priv,
107 struct iwl_rx_phy_res *rx_resp); 108 struct iwl_rx_phy_res *rx_resp);
109 void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
108}; 110};
109 111
110struct iwl_apm_ops { 112struct iwl_apm_ops {
@@ -114,23 +116,21 @@ struct iwl_apm_ops {
114 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 116 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
115}; 117};
116 118
119struct iwl_debugfs_ops {
120 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
121 size_t count, loff_t *ppos);
122 ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
123 size_t count, loff_t *ppos);
124 ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos);
126};
127
117struct iwl_temp_ops { 128struct iwl_temp_ops {
118 void (*temperature)(struct iwl_priv *priv); 129 void (*temperature)(struct iwl_priv *priv);
119 void (*set_ct_kill)(struct iwl_priv *priv); 130 void (*set_ct_kill)(struct iwl_priv *priv);
120 void (*set_calib_version)(struct iwl_priv *priv); 131 void (*set_calib_version)(struct iwl_priv *priv);
121}; 132};
122 133
123struct iwl_ucode_ops {
124 u32 (*get_header_size)(u32);
125 u32 (*get_build)(const struct iwl_ucode_header *, u32);
126 u32 (*get_inst_size)(const struct iwl_ucode_header *, u32);
127 u32 (*get_data_size)(const struct iwl_ucode_header *, u32);
128 u32 (*get_init_size)(const struct iwl_ucode_header *, u32);
129 u32 (*get_init_data_size)(const struct iwl_ucode_header *, u32);
130 u32 (*get_boot_size)(const struct iwl_ucode_header *, u32);
131 u8 * (*get_data)(const struct iwl_ucode_header *, u32);
132};
133
134struct iwl_lib_ops { 134struct iwl_lib_ops {
135 /* set hw dependent parameters */ 135 /* set hw dependent parameters */
136 int (*set_hw_params)(struct iwl_priv *priv); 136 int (*set_hw_params)(struct iwl_priv *priv);
@@ -180,8 +180,9 @@ struct iwl_lib_ops {
180 /* power */ 180 /* power */
181 int (*send_tx_power) (struct iwl_priv *priv); 181 int (*send_tx_power) (struct iwl_priv *priv);
182 void (*update_chain_flags)(struct iwl_priv *priv); 182 void (*update_chain_flags)(struct iwl_priv *priv);
183 void (*post_associate) (struct iwl_priv *priv); 183 void (*post_associate)(struct iwl_priv *priv,
184 void (*config_ap) (struct iwl_priv *priv); 184 struct ieee80211_vif *vif);
185 void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif);
185 irqreturn_t (*isr) (int irq, void *data); 186 irqreturn_t (*isr) (int irq, void *data);
186 187
187 /* eeprom operations (as defined in iwl-eeprom.h) */ 188 /* eeprom operations (as defined in iwl-eeprom.h) */
@@ -190,7 +191,17 @@ struct iwl_lib_ops {
190 /* temperature */ 191 /* temperature */
191 struct iwl_temp_ops temp_ops; 192 struct iwl_temp_ops temp_ops;
192 /* station management */ 193 /* station management */
193 void (*add_bcast_station)(struct iwl_priv *priv); 194 int (*manage_ibss_station)(struct iwl_priv *priv,
195 struct ieee80211_vif *vif, bool add);
196 /* recover from tx queue stall */
197 void (*recover_from_tx_stall)(unsigned long data);
198 /* check for plcp health */
199 bool (*check_plcp_health)(struct iwl_priv *priv,
200 struct iwl_rx_packet *pkt);
201 /* check for ack health */
202 bool (*check_ack_health)(struct iwl_priv *priv,
203 struct iwl_rx_packet *pkt);
204 struct iwl_debugfs_ops debugfs_ops;
194}; 205};
195 206
196struct iwl_led_ops { 207struct iwl_led_ops {
@@ -200,7 +211,6 @@ struct iwl_led_ops {
200}; 211};
201 212
202struct iwl_ops { 213struct iwl_ops {
203 const struct iwl_ucode_ops *ucode;
204 const struct iwl_lib_ops *lib; 214 const struct iwl_lib_ops *lib;
205 const struct iwl_hcmd_ops *hcmd; 215 const struct iwl_hcmd_ops *hcmd;
206 const struct iwl_hcmd_utils_ops *utils; 216 const struct iwl_hcmd_utils_ops *utils;
@@ -237,6 +247,18 @@ struct iwl_mod_params {
237 * @support_wimax_coexist: support wimax/wifi co-exist 247 * @support_wimax_coexist: support wimax/wifi co-exist
238 * @plcp_delta_threshold: plcp error rate threshold used to trigger 248 * @plcp_delta_threshold: plcp error rate threshold used to trigger
239 * radio tuning when there is a high receiving plcp error rate 249 * radio tuning when there is a high receiving plcp error rate
250 * @chain_noise_scale: default chain noise scale used for gain computation
251 * @monitor_recover_period: default timer used to check stuck queues
252 * @temperature_kelvin: temperature report by uCode in kelvin
253 * @max_event_log_size: size of event log buffer size for ucode event logging
254 * @tx_power_by_driver: tx power calibration performed by driver
255 * instead of uCode
256 * @ucode_tracing: support ucode continuous tracing
257 * @sensitivity_calib_by_driver: driver has the capability to perform
258 * sensitivity calibration operation
259 * @chain_noise_calib_by_driver: driver has the capability to perform
260 * chain noise calibration operation
261 * @scan_antennas: available antenna for scan operation
240 * 262 *
241 * We enable the driver to be backward compatible wrt API version. The 263 * We enable the driver to be backward compatible wrt API version. The
242 * driver specifies which APIs it supports (with @ucode_api_max being the 264 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -295,6 +317,15 @@ struct iwl_cfg {
295 const bool support_wimax_coexist; 317 const bool support_wimax_coexist;
296 u8 plcp_delta_threshold; 318 u8 plcp_delta_threshold;
297 s32 chain_noise_scale; 319 s32 chain_noise_scale;
320 /* timer period for monitor the driver queues */
321 u32 monitor_recover_period;
322 bool temperature_kelvin;
323 u32 max_event_log_size;
324 const bool tx_power_by_driver;
325 const bool ucode_tracing;
326 const bool sensitivity_calib_by_driver;
327 const bool chain_noise_calib_by_driver;
328 u8 scan_antennas[IEEE80211_NUM_BANDS];
298}; 329};
299 330
300/*************************** 331/***************************
@@ -304,8 +335,7 @@ struct iwl_cfg {
304struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 335struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
305 struct ieee80211_ops *hw_ops); 336 struct ieee80211_ops *hw_ops);
306void iwl_hw_detect(struct iwl_priv *priv); 337void iwl_hw_detect(struct iwl_priv *priv);
307void iwl_reset_qos(struct iwl_priv *priv); 338void iwl_activate_qos(struct iwl_priv *priv);
308void iwl_activate_qos(struct iwl_priv *priv, u8 force);
309int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 339int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
310 const struct ieee80211_tx_queue_params *params); 340 const struct ieee80211_tx_queue_params *params);
311void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); 341void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@@ -316,8 +346,8 @@ int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
316void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf); 346void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
317u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 347u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
318 struct ieee80211_sta_ht_cap *sta_ht_inf); 348 struct ieee80211_sta_ht_cap *sta_ht_inf);
319void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band); 349void iwl_connection_init_rx_config(struct iwl_priv *priv,
320void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode); 350 struct ieee80211_vif *vif);
321int iwl_set_decrypted_flag(struct iwl_priv *priv, 351int iwl_set_decrypted_flag(struct iwl_priv *priv,
322 struct ieee80211_hdr *hdr, 352 struct ieee80211_hdr *hdr,
323 u32 decrypt_res, 353 u32 decrypt_res,
@@ -326,29 +356,25 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
326void iwl_configure_filter(struct ieee80211_hw *hw, 356void iwl_configure_filter(struct ieee80211_hw *hw,
327 unsigned int changed_flags, 357 unsigned int changed_flags,
328 unsigned int *total_flags, u64 multicast); 358 unsigned int *total_flags, u64 multicast);
329int iwl_hw_nic_init(struct iwl_priv *priv);
330int iwl_set_hw_params(struct iwl_priv *priv); 359int iwl_set_hw_params(struct iwl_priv *priv);
331bool iwl_is_monitor_mode(struct iwl_priv *priv); 360void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
332void iwl_post_associate(struct iwl_priv *priv);
333void iwl_bss_info_changed(struct ieee80211_hw *hw, 361void iwl_bss_info_changed(struct ieee80211_hw *hw,
334 struct ieee80211_vif *vif, 362 struct ieee80211_vif *vif,
335 struct ieee80211_bss_conf *bss_conf, 363 struct ieee80211_bss_conf *bss_conf,
336 u32 changes); 364 u32 changes);
337int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb); 365int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
338int iwl_commit_rxon(struct iwl_priv *priv); 366int iwl_commit_rxon(struct iwl_priv *priv);
339int iwl_set_mode(struct iwl_priv *priv, int mode);
340int iwl_mac_add_interface(struct ieee80211_hw *hw, 367int iwl_mac_add_interface(struct ieee80211_hw *hw,
341 struct ieee80211_vif *vif); 368 struct ieee80211_vif *vif);
342void iwl_mac_remove_interface(struct ieee80211_hw *hw, 369void iwl_mac_remove_interface(struct ieee80211_hw *hw,
343 struct ieee80211_vif *vif); 370 struct ieee80211_vif *vif);
344int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 371int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
345void iwl_config_ap(struct iwl_priv *priv); 372void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif);
346void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 373void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
347int iwl_alloc_txq_mem(struct iwl_priv *priv); 374int iwl_alloc_txq_mem(struct iwl_priv *priv);
348void iwl_free_txq_mem(struct iwl_priv *priv); 375void iwl_free_txq_mem(struct iwl_priv *priv);
349void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 376void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
350 __le32 *tx_flags); 377 __le32 *tx_flags);
351int iwl_send_wimax_coex(struct iwl_priv *priv);
352#ifdef CONFIG_IWLWIFI_DEBUGFS 378#ifdef CONFIG_IWLWIFI_DEBUGFS
353int iwl_alloc_traffic_mem(struct iwl_priv *priv); 379int iwl_alloc_traffic_mem(struct iwl_priv *priv);
354void iwl_free_traffic_mem(struct iwl_priv *priv); 380void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -411,26 +437,24 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
411/***************************************************** 437/*****************************************************
412* RX 438* RX
413******************************************************/ 439******************************************************/
414void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
415void iwl_cmd_queue_free(struct iwl_priv *priv); 440void iwl_cmd_queue_free(struct iwl_priv *priv);
416int iwl_rx_queue_alloc(struct iwl_priv *priv); 441int iwl_rx_queue_alloc(struct iwl_priv *priv);
417void iwl_rx_handle(struct iwl_priv *priv); 442void iwl_rx_handle(struct iwl_priv *priv);
418void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 443void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
419 struct iwl_rx_queue *q); 444 struct iwl_rx_queue *q);
420void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
421void iwl_rx_replenish(struct iwl_priv *priv);
422void iwl_rx_replenish_now(struct iwl_priv *priv);
423int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
424void iwl_rx_queue_restock(struct iwl_priv *priv);
425int iwl_rx_queue_space(const struct iwl_rx_queue *q); 445int iwl_rx_queue_space(const struct iwl_rx_queue *q);
426void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
427void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 446void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
428int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
429/* Handlers */ 447/* Handlers */
430void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 448void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
431 struct iwl_rx_mem_buffer *rxb); 449 struct iwl_rx_mem_buffer *rxb);
432void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 450void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
433 struct iwl_rx_mem_buffer *rxb); 451 struct iwl_rx_mem_buffer *rxb);
452bool iwl_good_plcp_health(struct iwl_priv *priv,
453 struct iwl_rx_packet *pkt);
454bool iwl_good_ack_health(struct iwl_priv *priv,
455 struct iwl_rx_packet *pkt);
456void iwl_recover_from_statistics(struct iwl_priv *priv,
457 struct iwl_rx_packet *pkt);
434void iwl_rx_statistics(struct iwl_priv *priv, 458void iwl_rx_statistics(struct iwl_priv *priv,
435 struct iwl_rx_mem_buffer *rxb); 459 struct iwl_rx_mem_buffer *rxb);
436void iwl_reply_statistics(struct iwl_priv *priv, 460void iwl_reply_statistics(struct iwl_priv *priv,
@@ -442,14 +466,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
442/***************************************************** 466/*****************************************************
443* TX 467* TX
444******************************************************/ 468******************************************************/
445int iwl_txq_ctx_alloc(struct iwl_priv *priv);
446void iwl_txq_ctx_reset(struct iwl_priv *priv);
447void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 469void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
448int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 470int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
449 struct iwl_tx_queue *txq, 471 struct iwl_tx_queue *txq,
450 dma_addr_t addr, u16 len, u8 reset, u8 pad); 472 dma_addr_t addr, u16 len, u8 reset, u8 pad);
451int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
452void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
453int iwl_hw_tx_queue_init(struct iwl_priv *priv, 473int iwl_hw_tx_queue_init(struct iwl_priv *priv,
454 struct iwl_tx_queue *txq); 474 struct iwl_tx_queue *txq);
455void iwl_free_tfds_in_queue(struct iwl_priv *priv, 475void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@@ -460,9 +480,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
460void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 480void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
461 int slots_num, u32 txq_id); 481 int slots_num, u32 txq_id);
462void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 482void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
463int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
464int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
465int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
466/***************************************************** 483/*****************************************************
467 * TX power 484 * TX power
468 ****************************************************/ 485 ****************************************************/
@@ -472,10 +489,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
472 * Rate 489 * Rate
473 ******************************************************************************/ 490 ******************************************************************************/
474 491
475void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
476 struct ieee80211_tx_info *info);
477int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 492int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
478int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
479 493
480u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); 494u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
481 495
@@ -505,8 +519,11 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
505void iwl_init_scan_params(struct iwl_priv *priv); 519void iwl_init_scan_params(struct iwl_priv *priv);
506int iwl_scan_cancel(struct iwl_priv *priv); 520int iwl_scan_cancel(struct iwl_priv *priv);
507int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms); 521int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
508int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req); 522int iwl_mac_hw_scan(struct ieee80211_hw *hw,
509int iwl_internal_short_hw_scan(struct iwl_priv *priv); 523 struct ieee80211_vif *vif,
524 struct cfg80211_scan_request *req);
525void iwl_bg_start_internal_scan(struct work_struct *work);
526void iwl_internal_short_hw_scan(struct iwl_priv *priv);
510int iwl_force_reset(struct iwl_priv *priv, int mode); 527int iwl_force_reset(struct iwl_priv *priv, int mode);
511u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame, 528u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
512 const u8 *ie, int ie_len, int left); 529 const u8 *ie, int ie_len, int left);
@@ -515,7 +532,8 @@ u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
515 enum ieee80211_band band, 532 enum ieee80211_band band,
516 u8 n_probes); 533 u8 n_probes);
517u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 534u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
518 enum ieee80211_band band); 535 enum ieee80211_band band,
536 struct ieee80211_vif *vif);
519void iwl_bg_scan_check(struct work_struct *data); 537void iwl_bg_scan_check(struct work_struct *data);
520void iwl_bg_abort_scan(struct work_struct *work); 538void iwl_bg_abort_scan(struct work_struct *work);
521void iwl_bg_scan_completed(struct work_struct *work); 539void iwl_bg_scan_completed(struct work_struct *work);
@@ -530,6 +548,7 @@ void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
530#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */ 548#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
531#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */ 549#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
532 550
551#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
533 552
534/******************************************************************************* 553/*******************************************************************************
535 * Calibrations - implemented in iwl-calib.c 554 * Calibrations - implemented in iwl-calib.c
@@ -563,11 +582,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
563 * PCI * 582 * PCI *
564 *****************************************************/ 583 *****************************************************/
565irqreturn_t iwl_isr_legacy(int irq, void *data); 584irqreturn_t iwl_isr_legacy(int irq, void *data);
566int iwl_reset_ict(struct iwl_priv *priv);
567void iwl_disable_ict(struct iwl_priv *priv);
568int iwl_alloc_isr_ict(struct iwl_priv *priv);
569void iwl_free_isr_ict(struct iwl_priv *priv);
570irqreturn_t iwl_isr_ict(int irq, void *data);
571 585
572static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) 586static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
573{ 587{
@@ -577,6 +591,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
577 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); 591 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
578 return pci_lnk_ctl; 592 return pci_lnk_ctl;
579} 593}
594
595void iwl_bg_monitor_recover(unsigned long data);
596
580#ifdef CONFIG_PM 597#ifdef CONFIG_PM
581int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); 598int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
582int iwl_pci_resume(struct pci_dev *pdev); 599int iwl_pci_resume(struct pci_dev *pdev);
@@ -625,7 +642,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
625#define STATUS_SCAN_HW 15 642#define STATUS_SCAN_HW 15
626#define STATUS_POWER_PMI 16 643#define STATUS_POWER_PMI 16
627#define STATUS_FW_ERROR 17 644#define STATUS_FW_ERROR 17
628#define STATUS_MODE_PENDING 18
629 645
630 646
631static inline int iwl_is_ready(struct iwl_priv *priv) 647static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -672,23 +688,16 @@ static inline int iwl_is_ready_rf(struct iwl_priv *priv)
672} 688}
673 689
674extern void iwl_rf_kill_ct_config(struct iwl_priv *priv); 690extern void iwl_rf_kill_ct_config(struct iwl_priv *priv);
675extern int iwl_send_bt_config(struct iwl_priv *priv); 691extern void iwl_send_bt_config(struct iwl_priv *priv);
676extern int iwl_send_statistics_request(struct iwl_priv *priv, 692extern int iwl_send_statistics_request(struct iwl_priv *priv,
677 u8 flags, bool clear); 693 u8 flags, bool clear);
678extern int iwl_verify_ucode(struct iwl_priv *priv); 694extern int iwl_verify_ucode(struct iwl_priv *priv);
679extern int iwl_send_lq_cmd(struct iwl_priv *priv, 695extern int iwl_send_lq_cmd(struct iwl_priv *priv,
680 struct iwl_link_quality_cmd *lq, u8 flags); 696 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
681extern void iwl_rx_reply_rx(struct iwl_priv *priv,
682 struct iwl_rx_mem_buffer *rxb);
683extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
684 struct iwl_rx_mem_buffer *rxb);
685void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
686 struct iwl_rx_mem_buffer *rxb);
687void iwl_apm_stop(struct iwl_priv *priv); 697void iwl_apm_stop(struct iwl_priv *priv);
688int iwl_apm_stop_master(struct iwl_priv *priv);
689int iwl_apm_init(struct iwl_priv *priv); 698int iwl_apm_init(struct iwl_priv *priv);
690 699
691void iwl_setup_rxon_timing(struct iwl_priv *priv); 700void iwl_setup_rxon_timing(struct iwl_priv *priv, struct ieee80211_vif *vif);
692static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 701static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
693{ 702{
694 return priv->cfg->ops->hcmd->rxon_assoc(priv); 703 return priv->cfg->ops->hcmd->rxon_assoc(priv);
@@ -697,9 +706,10 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv)
697{ 706{
698 return priv->cfg->ops->hcmd->commit_rxon(priv); 707 return priv->cfg->ops->hcmd->commit_rxon(priv);
699} 708}
700static inline void iwlcore_config_ap(struct iwl_priv *priv) 709static inline void iwlcore_config_ap(struct iwl_priv *priv,
710 struct ieee80211_vif *vif)
701{ 711{
702 priv->cfg->ops->lib->config_ap(priv); 712 priv->cfg->ops->lib->config_ap(priv, vif);
703} 713}
704static inline const struct ieee80211_supported_band *iwl_get_hw_mode( 714static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
705 struct iwl_priv *priv, enum ieee80211_band band) 715 struct iwl_priv *priv, enum ieee80211_band band)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 808b7146bead..254c35ae8b38 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -298,6 +298,7 @@
298#define CSR_HW_REV_TYPE_1000 (0x0000060) 298#define CSR_HW_REV_TYPE_1000 (0x0000060)
299#define CSR_HW_REV_TYPE_6x00 (0x0000070) 299#define CSR_HW_REV_TYPE_6x00 (0x0000070)
300#define CSR_HW_REV_TYPE_6x50 (0x0000080) 300#define CSR_HW_REV_TYPE_6x50 (0x0000080)
301#define CSR_HW_REV_TYPE_6x00g2 (0x00000B0)
301#define CSR_HW_REV_TYPE_NONE (0x00000F0) 302#define CSR_HW_REV_TYPE_NONE (0x00000F0)
302 303
303/* EEPROM REG */ 304/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 1c7b53d511c7..5c2bcef5df0c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -78,6 +78,8 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
78#ifdef CONFIG_IWLWIFI_DEBUGFS 78#ifdef CONFIG_IWLWIFI_DEBUGFS
79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name); 79int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
80void iwl_dbgfs_unregister(struct iwl_priv *priv); 80void iwl_dbgfs_unregister(struct iwl_priv *priv);
81extern int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
82 int bufsz);
81#else 83#else
82static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name) 84static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
83{ 85{
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b6e1b0ebe230..9659c5d01df9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -106,6 +106,26 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
106 .open = iwl_dbgfs_open_file_generic, \ 106 .open = iwl_dbgfs_open_file_generic, \
107}; 107};
108 108
109int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
110{
111 int p = 0;
112
113 p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
114 le32_to_cpu(priv->statistics.flag));
115 if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
116 p += scnprintf(buf + p, bufsz - p,
117 "\tStatistics have been cleared\n");
118 p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
119 (le32_to_cpu(priv->statistics.flag) &
120 UCODE_STATISTICS_FREQUENCY_MSK)
121 ? "2.4 GHz" : "5.2 GHz");
122 p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
123 (le32_to_cpu(priv->statistics.flag) &
124 UCODE_STATISTICS_NARROW_BAND_MSK)
125 ? "enabled" : "disabled");
126 return p;
127}
128EXPORT_SYMBOL(iwl_dbgfs_statistics_flag);
109 129
110static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file, 130static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
111 char __user *user_buf, 131 char __user *user_buf,
@@ -561,8 +581,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
561 test_bit(STATUS_POWER_PMI, &priv->status)); 581 test_bit(STATUS_POWER_PMI, &priv->status));
562 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", 582 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
563 test_bit(STATUS_FW_ERROR, &priv->status)); 583 test_bit(STATUS_FW_ERROR, &priv->status));
564 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
565 test_bit(STATUS_MODE_PENDING, &priv->status));
566 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 584 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
567} 585}
568 586
@@ -661,7 +679,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
661 int pos = 0, i; 679 int pos = 0, i;
662 char buf[256]; 680 char buf[256];
663 const size_t bufsz = sizeof(buf); 681 const size_t bufsz = sizeof(buf);
664 ssize_t ret;
665 682
666 for (i = 0; i < AC_NUM; i++) { 683 for (i = 0; i < AC_NUM; i++) {
667 pos += scnprintf(buf + pos, bufsz - pos, 684 pos += scnprintf(buf + pos, bufsz - pos,
@@ -673,8 +690,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
673 priv->qos_data.def_qos_parm.ac[i].aifsn, 690 priv->qos_data.def_qos_parm.ac[i].aifsn,
674 priv->qos_data.def_qos_parm.ac[i].edca_txop); 691 priv->qos_data.def_qos_parm.ac[i].edca_txop);
675 } 692 }
676 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 693 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
677 return ret;
678} 694}
679 695
680static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 696static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
@@ -684,7 +700,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
684 int pos = 0; 700 int pos = 0;
685 char buf[256]; 701 char buf[256];
686 const size_t bufsz = sizeof(buf); 702 const size_t bufsz = sizeof(buf);
687 ssize_t ret;
688 703
689 pos += scnprintf(buf + pos, bufsz - pos, 704 pos += scnprintf(buf + pos, bufsz - pos,
690 "allow blinking: %s\n", 705 "allow blinking: %s\n",
@@ -698,8 +713,7 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
698 priv->last_blink_time); 713 priv->last_blink_time);
699 } 714 }
700 715
701 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 716 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
702 return ret;
703} 717}
704 718
705static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 719static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
@@ -712,7 +726,6 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
712 char buf[100]; 726 char buf[100];
713 int pos = 0; 727 int pos = 0;
714 const size_t bufsz = sizeof(buf); 728 const size_t bufsz = sizeof(buf);
715 ssize_t ret;
716 729
717 pos += scnprintf(buf + pos, bufsz - pos, 730 pos += scnprintf(buf + pos, bufsz - pos,
718 "Thermal Throttling Mode: %s\n", 731 "Thermal Throttling Mode: %s\n",
@@ -732,8 +745,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
732 "HT mode: %d\n", 745 "HT mode: %d\n",
733 restriction->is_ht); 746 restriction->is_ht);
734 } 747 }
735 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 748 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
736 return ret;
737} 749}
738 750
739static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, 751static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
@@ -770,13 +782,11 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
770 char buf[100]; 782 char buf[100];
771 int pos = 0; 783 int pos = 0;
772 const size_t bufsz = sizeof(buf); 784 const size_t bufsz = sizeof(buf);
773 ssize_t ret;
774 785
775 pos += scnprintf(buf + pos, bufsz - pos, 786 pos += scnprintf(buf + pos, bufsz - pos,
776 "11n 40MHz Mode: %s\n", 787 "11n 40MHz Mode: %s\n",
777 priv->disable_ht40 ? "Disabled" : "Enabled"); 788 priv->disable_ht40 ? "Disabled" : "Enabled");
778 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 789 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
779 return ret;
780} 790}
781 791
782static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, 792static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
@@ -1044,474 +1054,13 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1044 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1054 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1045} 1055}
1046 1056
1047static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
1048 int bufsz)
1049{
1050 int p = 0;
1051
1052 p += scnprintf(buf + p, bufsz - p,
1053 "Statistics Flag(0x%X):\n",
1054 le32_to_cpu(priv->statistics.flag));
1055 if (le32_to_cpu(priv->statistics.flag) & UCODE_STATISTICS_CLEAR_MSK)
1056 p += scnprintf(buf + p, bufsz - p,
1057 "\tStatistics have been cleared\n");
1058 p += scnprintf(buf + p, bufsz - p,
1059 "\tOperational Frequency: %s\n",
1060 (le32_to_cpu(priv->statistics.flag) &
1061 UCODE_STATISTICS_FREQUENCY_MSK)
1062 ? "2.4 GHz" : "5.2 GHz");
1063 p += scnprintf(buf + p, bufsz - p,
1064 "\tTGj Narrow Band: %s\n",
1065 (le32_to_cpu(priv->statistics.flag) &
1066 UCODE_STATISTICS_NARROW_BAND_MSK)
1067 ? "enabled" : "disabled");
1068 return p;
1069}
1070
1071static const char ucode_stats_header[] =
1072 "%-32s current acumulative delta max\n";
1073static const char ucode_stats_short_format[] =
1074 " %-30s %10u\n";
1075static const char ucode_stats_format[] =
1076 " %-30s %10u %10u %10u %10u\n";
1077
1078static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file, 1057static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1079 char __user *user_buf, 1058 char __user *user_buf,
1080 size_t count, loff_t *ppos) 1059 size_t count, loff_t *ppos)
1081{ 1060{
1082 struct iwl_priv *priv = file->private_data; 1061 struct iwl_priv *priv = file->private_data;
1083 int pos = 0; 1062 return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
1084 char *buf; 1063 user_buf, count, ppos);
1085 int bufsz = sizeof(struct statistics_rx_phy) * 40 +
1086 sizeof(struct statistics_rx_non_phy) * 40 +
1087 sizeof(struct statistics_rx_ht_phy) * 40 + 400;
1088 ssize_t ret;
1089 struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
1090 struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
1091 struct statistics_rx_non_phy *general, *accum_general;
1092 struct statistics_rx_non_phy *delta_general, *max_general;
1093 struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
1094
1095 if (!iwl_is_alive(priv))
1096 return -EAGAIN;
1097
1098 buf = kzalloc(bufsz, GFP_KERNEL);
1099 if (!buf) {
1100 IWL_ERR(priv, "Can not allocate Buffer\n");
1101 return -ENOMEM;
1102 }
1103
1104 /* the statistic information display here is based on
1105 * the last statistics notification from uCode
1106 * might not reflect the current uCode activity
1107 */
1108 ofdm = &priv->statistics.rx.ofdm;
1109 cck = &priv->statistics.rx.cck;
1110 general = &priv->statistics.rx.general;
1111 ht = &priv->statistics.rx.ofdm_ht;
1112 accum_ofdm = &priv->accum_statistics.rx.ofdm;
1113 accum_cck = &priv->accum_statistics.rx.cck;
1114 accum_general = &priv->accum_statistics.rx.general;
1115 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1116 delta_ofdm = &priv->delta_statistics.rx.ofdm;
1117 delta_cck = &priv->delta_statistics.rx.cck;
1118 delta_general = &priv->delta_statistics.rx.general;
1119 delta_ht = &priv->delta_statistics.rx.ofdm_ht;
1120 max_ofdm = &priv->max_delta.rx.ofdm;
1121 max_cck = &priv->max_delta.rx.cck;
1122 max_general = &priv->max_delta.rx.general;
1123 max_ht = &priv->max_delta.rx.ofdm_ht;
1124
1125 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1126 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1127 "Statistics_Rx - OFDM:");
1128 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1129 "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
1130 accum_ofdm->ina_cnt,
1131 delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
1132 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1133 "fina_cnt:",
1134 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
1135 delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
1136 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1137 "plcp_err:",
1138 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
1139 delta_ofdm->plcp_err, max_ofdm->plcp_err);
1140 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1141 "crc32_err:",
1142 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
1143 delta_ofdm->crc32_err, max_ofdm->crc32_err);
1144 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1145 "overrun_err:",
1146 le32_to_cpu(ofdm->overrun_err),
1147 accum_ofdm->overrun_err,
1148 delta_ofdm->overrun_err, max_ofdm->overrun_err);
1149 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1150 "early_overrun_err:",
1151 le32_to_cpu(ofdm->early_overrun_err),
1152 accum_ofdm->early_overrun_err,
1153 delta_ofdm->early_overrun_err,
1154 max_ofdm->early_overrun_err);
1155 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1156 "crc32_good:",
1157 le32_to_cpu(ofdm->crc32_good),
1158 accum_ofdm->crc32_good,
1159 delta_ofdm->crc32_good, max_ofdm->crc32_good);
1160 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1161 "false_alarm_cnt:",
1162 le32_to_cpu(ofdm->false_alarm_cnt),
1163 accum_ofdm->false_alarm_cnt,
1164 delta_ofdm->false_alarm_cnt,
1165 max_ofdm->false_alarm_cnt);
1166 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1167 "fina_sync_err_cnt:",
1168 le32_to_cpu(ofdm->fina_sync_err_cnt),
1169 accum_ofdm->fina_sync_err_cnt,
1170 delta_ofdm->fina_sync_err_cnt,
1171 max_ofdm->fina_sync_err_cnt);
1172 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1173 "sfd_timeout:",
1174 le32_to_cpu(ofdm->sfd_timeout),
1175 accum_ofdm->sfd_timeout,
1176 delta_ofdm->sfd_timeout,
1177 max_ofdm->sfd_timeout);
1178 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1179 "fina_timeout:",
1180 le32_to_cpu(ofdm->fina_timeout),
1181 accum_ofdm->fina_timeout,
1182 delta_ofdm->fina_timeout,
1183 max_ofdm->fina_timeout);
1184 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1185 "unresponded_rts:",
1186 le32_to_cpu(ofdm->unresponded_rts),
1187 accum_ofdm->unresponded_rts,
1188 delta_ofdm->unresponded_rts,
1189 max_ofdm->unresponded_rts);
1190 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1191 "rxe_frame_lmt_ovrun:",
1192 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1193 accum_ofdm->rxe_frame_limit_overrun,
1194 delta_ofdm->rxe_frame_limit_overrun,
1195 max_ofdm->rxe_frame_limit_overrun);
1196 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1197 "sent_ack_cnt:",
1198 le32_to_cpu(ofdm->sent_ack_cnt),
1199 accum_ofdm->sent_ack_cnt,
1200 delta_ofdm->sent_ack_cnt,
1201 max_ofdm->sent_ack_cnt);
1202 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1203 "sent_cts_cnt:",
1204 le32_to_cpu(ofdm->sent_cts_cnt),
1205 accum_ofdm->sent_cts_cnt,
1206 delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
1207 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1208 "sent_ba_rsp_cnt:",
1209 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1210 accum_ofdm->sent_ba_rsp_cnt,
1211 delta_ofdm->sent_ba_rsp_cnt,
1212 max_ofdm->sent_ba_rsp_cnt);
1213 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1214 "dsp_self_kill:",
1215 le32_to_cpu(ofdm->dsp_self_kill),
1216 accum_ofdm->dsp_self_kill,
1217 delta_ofdm->dsp_self_kill,
1218 max_ofdm->dsp_self_kill);
1219 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1220 "mh_format_err:",
1221 le32_to_cpu(ofdm->mh_format_err),
1222 accum_ofdm->mh_format_err,
1223 delta_ofdm->mh_format_err,
1224 max_ofdm->mh_format_err);
1225 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1226 "re_acq_main_rssi_sum:",
1227 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1228 accum_ofdm->re_acq_main_rssi_sum,
1229 delta_ofdm->re_acq_main_rssi_sum,
1230 max_ofdm->re_acq_main_rssi_sum);
1231
1232 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1233 "Statistics_Rx - CCK:");
1234 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1235 "ina_cnt:",
1236 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
1237 delta_cck->ina_cnt, max_cck->ina_cnt);
1238 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1239 "fina_cnt:",
1240 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
1241 delta_cck->fina_cnt, max_cck->fina_cnt);
1242 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1243 "plcp_err:",
1244 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
1245 delta_cck->plcp_err, max_cck->plcp_err);
1246 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1247 "crc32_err:",
1248 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
1249 delta_cck->crc32_err, max_cck->crc32_err);
1250 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1251 "overrun_err:",
1252 le32_to_cpu(cck->overrun_err),
1253 accum_cck->overrun_err,
1254 delta_cck->overrun_err, max_cck->overrun_err);
1255 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1256 "early_overrun_err:",
1257 le32_to_cpu(cck->early_overrun_err),
1258 accum_cck->early_overrun_err,
1259 delta_cck->early_overrun_err,
1260 max_cck->early_overrun_err);
1261 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1262 "crc32_good:",
1263 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
1264 delta_cck->crc32_good,
1265 max_cck->crc32_good);
1266 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1267 "false_alarm_cnt:",
1268 le32_to_cpu(cck->false_alarm_cnt),
1269 accum_cck->false_alarm_cnt,
1270 delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
1271 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1272 "fina_sync_err_cnt:",
1273 le32_to_cpu(cck->fina_sync_err_cnt),
1274 accum_cck->fina_sync_err_cnt,
1275 delta_cck->fina_sync_err_cnt,
1276 max_cck->fina_sync_err_cnt);
1277 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1278 "sfd_timeout:",
1279 le32_to_cpu(cck->sfd_timeout),
1280 accum_cck->sfd_timeout,
1281 delta_cck->sfd_timeout, max_cck->sfd_timeout);
1282 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1283 "fina_timeout:",
1284 le32_to_cpu(cck->fina_timeout),
1285 accum_cck->fina_timeout,
1286 delta_cck->fina_timeout, max_cck->fina_timeout);
1287 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1288 "unresponded_rts:",
1289 le32_to_cpu(cck->unresponded_rts),
1290 accum_cck->unresponded_rts,
1291 delta_cck->unresponded_rts,
1292 max_cck->unresponded_rts);
1293 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1294 "rxe_frame_lmt_ovrun:",
1295 le32_to_cpu(cck->rxe_frame_limit_overrun),
1296 accum_cck->rxe_frame_limit_overrun,
1297 delta_cck->rxe_frame_limit_overrun,
1298 max_cck->rxe_frame_limit_overrun);
1299 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1300 "sent_ack_cnt:",
1301 le32_to_cpu(cck->sent_ack_cnt),
1302 accum_cck->sent_ack_cnt,
1303 delta_cck->sent_ack_cnt,
1304 max_cck->sent_ack_cnt);
1305 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1306 "sent_cts_cnt:",
1307 le32_to_cpu(cck->sent_cts_cnt),
1308 accum_cck->sent_cts_cnt,
1309 delta_cck->sent_cts_cnt,
1310 max_cck->sent_cts_cnt);
1311 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1312 "sent_ba_rsp_cnt:",
1313 le32_to_cpu(cck->sent_ba_rsp_cnt),
1314 accum_cck->sent_ba_rsp_cnt,
1315 delta_cck->sent_ba_rsp_cnt,
1316 max_cck->sent_ba_rsp_cnt);
1317 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1318 "dsp_self_kill:",
1319 le32_to_cpu(cck->dsp_self_kill),
1320 accum_cck->dsp_self_kill,
1321 delta_cck->dsp_self_kill,
1322 max_cck->dsp_self_kill);
1323 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1324 "mh_format_err:",
1325 le32_to_cpu(cck->mh_format_err),
1326 accum_cck->mh_format_err,
1327 delta_cck->mh_format_err, max_cck->mh_format_err);
1328 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1329 "re_acq_main_rssi_sum:",
1330 le32_to_cpu(cck->re_acq_main_rssi_sum),
1331 accum_cck->re_acq_main_rssi_sum,
1332 delta_cck->re_acq_main_rssi_sum,
1333 max_cck->re_acq_main_rssi_sum);
1334
1335 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1336 "Statistics_Rx - GENERAL:");
1337 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1338 "bogus_cts:",
1339 le32_to_cpu(general->bogus_cts),
1340 accum_general->bogus_cts,
1341 delta_general->bogus_cts, max_general->bogus_cts);
1342 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1343 "bogus_ack:",
1344 le32_to_cpu(general->bogus_ack),
1345 accum_general->bogus_ack,
1346 delta_general->bogus_ack, max_general->bogus_ack);
1347 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1348 "non_bssid_frames:",
1349 le32_to_cpu(general->non_bssid_frames),
1350 accum_general->non_bssid_frames,
1351 delta_general->non_bssid_frames,
1352 max_general->non_bssid_frames);
1353 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1354 "filtered_frames:",
1355 le32_to_cpu(general->filtered_frames),
1356 accum_general->filtered_frames,
1357 delta_general->filtered_frames,
1358 max_general->filtered_frames);
1359 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1360 "non_channel_beacons:",
1361 le32_to_cpu(general->non_channel_beacons),
1362 accum_general->non_channel_beacons,
1363 delta_general->non_channel_beacons,
1364 max_general->non_channel_beacons);
1365 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1366 "channel_beacons:",
1367 le32_to_cpu(general->channel_beacons),
1368 accum_general->channel_beacons,
1369 delta_general->channel_beacons,
1370 max_general->channel_beacons);
1371 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1372 "num_missed_bcon:",
1373 le32_to_cpu(general->num_missed_bcon),
1374 accum_general->num_missed_bcon,
1375 delta_general->num_missed_bcon,
1376 max_general->num_missed_bcon);
1377 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1378 "adc_rx_saturation_time:",
1379 le32_to_cpu(general->adc_rx_saturation_time),
1380 accum_general->adc_rx_saturation_time,
1381 delta_general->adc_rx_saturation_time,
1382 max_general->adc_rx_saturation_time);
1383 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1384 "ina_detect_search_tm:",
1385 le32_to_cpu(general->ina_detection_search_time),
1386 accum_general->ina_detection_search_time,
1387 delta_general->ina_detection_search_time,
1388 max_general->ina_detection_search_time);
1389 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1390 "beacon_silence_rssi_a:",
1391 le32_to_cpu(general->beacon_silence_rssi_a),
1392 accum_general->beacon_silence_rssi_a,
1393 delta_general->beacon_silence_rssi_a,
1394 max_general->beacon_silence_rssi_a);
1395 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1396 "beacon_silence_rssi_b:",
1397 le32_to_cpu(general->beacon_silence_rssi_b),
1398 accum_general->beacon_silence_rssi_b,
1399 delta_general->beacon_silence_rssi_b,
1400 max_general->beacon_silence_rssi_b);
1401 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1402 "beacon_silence_rssi_c:",
1403 le32_to_cpu(general->beacon_silence_rssi_c),
1404 accum_general->beacon_silence_rssi_c,
1405 delta_general->beacon_silence_rssi_c,
1406 max_general->beacon_silence_rssi_c);
1407 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1408 "interference_data_flag:",
1409 le32_to_cpu(general->interference_data_flag),
1410 accum_general->interference_data_flag,
1411 delta_general->interference_data_flag,
1412 max_general->interference_data_flag);
1413 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1414 "channel_load:",
1415 le32_to_cpu(general->channel_load),
1416 accum_general->channel_load,
1417 delta_general->channel_load,
1418 max_general->channel_load);
1419 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1420 "dsp_false_alarms:",
1421 le32_to_cpu(general->dsp_false_alarms),
1422 accum_general->dsp_false_alarms,
1423 delta_general->dsp_false_alarms,
1424 max_general->dsp_false_alarms);
1425 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1426 "beacon_rssi_a:",
1427 le32_to_cpu(general->beacon_rssi_a),
1428 accum_general->beacon_rssi_a,
1429 delta_general->beacon_rssi_a,
1430 max_general->beacon_rssi_a);
1431 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1432 "beacon_rssi_b:",
1433 le32_to_cpu(general->beacon_rssi_b),
1434 accum_general->beacon_rssi_b,
1435 delta_general->beacon_rssi_b,
1436 max_general->beacon_rssi_b);
1437 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1438 "beacon_rssi_c:",
1439 le32_to_cpu(general->beacon_rssi_c),
1440 accum_general->beacon_rssi_c,
1441 delta_general->beacon_rssi_c,
1442 max_general->beacon_rssi_c);
1443 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1444 "beacon_energy_a:",
1445 le32_to_cpu(general->beacon_energy_a),
1446 accum_general->beacon_energy_a,
1447 delta_general->beacon_energy_a,
1448 max_general->beacon_energy_a);
1449 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1450 "beacon_energy_b:",
1451 le32_to_cpu(general->beacon_energy_b),
1452 accum_general->beacon_energy_b,
1453 delta_general->beacon_energy_b,
1454 max_general->beacon_energy_b);
1455 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1456 "beacon_energy_c:",
1457 le32_to_cpu(general->beacon_energy_c),
1458 accum_general->beacon_energy_c,
1459 delta_general->beacon_energy_c,
1460 max_general->beacon_energy_c);
1461
1462 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1463 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1464 "Statistics_Rx - OFDM_HT:");
1465 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1466 "plcp_err:",
1467 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
1468 delta_ht->plcp_err, max_ht->plcp_err);
1469 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1470 "overrun_err:",
1471 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
1472 delta_ht->overrun_err, max_ht->overrun_err);
1473 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1474 "early_overrun_err:",
1475 le32_to_cpu(ht->early_overrun_err),
1476 accum_ht->early_overrun_err,
1477 delta_ht->early_overrun_err,
1478 max_ht->early_overrun_err);
1479 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1480 "crc32_good:",
1481 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
1482 delta_ht->crc32_good, max_ht->crc32_good);
1483 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1484 "crc32_err:",
1485 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
1486 delta_ht->crc32_err, max_ht->crc32_err);
1487 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1488 "mh_format_err:",
1489 le32_to_cpu(ht->mh_format_err),
1490 accum_ht->mh_format_err,
1491 delta_ht->mh_format_err, max_ht->mh_format_err);
1492 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1493 "agg_crc32_good:",
1494 le32_to_cpu(ht->agg_crc32_good),
1495 accum_ht->agg_crc32_good,
1496 delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
1497 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1498 "agg_mpdu_cnt:",
1499 le32_to_cpu(ht->agg_mpdu_cnt),
1500 accum_ht->agg_mpdu_cnt,
1501 delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
1502 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1503 "agg_cnt:",
1504 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
1505 delta_ht->agg_cnt, max_ht->agg_cnt);
1506 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1507 "unsupport_mcs:",
1508 le32_to_cpu(ht->unsupport_mcs),
1509 accum_ht->unsupport_mcs,
1510 delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
1511
1512 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1513 kfree(buf);
1514 return ret;
1515} 1064}
1516 1065
1517static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file, 1066static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
@@ -1519,173 +1068,8 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1519 size_t count, loff_t *ppos) 1068 size_t count, loff_t *ppos)
1520{ 1069{
1521 struct iwl_priv *priv = file->private_data; 1070 struct iwl_priv *priv = file->private_data;
1522 int pos = 0; 1071 return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
1523 char *buf; 1072 user_buf, count, ppos);
1524 int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
1525 ssize_t ret;
1526 struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
1527
1528 if (!iwl_is_alive(priv))
1529 return -EAGAIN;
1530
1531 buf = kzalloc(bufsz, GFP_KERNEL);
1532 if (!buf) {
1533 IWL_ERR(priv, "Can not allocate Buffer\n");
1534 return -ENOMEM;
1535 }
1536
1537 /* the statistic information display here is based on
1538 * the last statistics notification from uCode
1539 * might not reflect the current uCode activity
1540 */
1541 tx = &priv->statistics.tx;
1542 accum_tx = &priv->accum_statistics.tx;
1543 delta_tx = &priv->delta_statistics.tx;
1544 max_tx = &priv->max_delta.tx;
1545 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1546 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1547 "Statistics_Tx:");
1548 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1549 "preamble:",
1550 le32_to_cpu(tx->preamble_cnt),
1551 accum_tx->preamble_cnt,
1552 delta_tx->preamble_cnt, max_tx->preamble_cnt);
1553 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1554 "rx_detected_cnt:",
1555 le32_to_cpu(tx->rx_detected_cnt),
1556 accum_tx->rx_detected_cnt,
1557 delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
1558 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1559 "bt_prio_defer_cnt:",
1560 le32_to_cpu(tx->bt_prio_defer_cnt),
1561 accum_tx->bt_prio_defer_cnt,
1562 delta_tx->bt_prio_defer_cnt,
1563 max_tx->bt_prio_defer_cnt);
1564 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1565 "bt_prio_kill_cnt:",
1566 le32_to_cpu(tx->bt_prio_kill_cnt),
1567 accum_tx->bt_prio_kill_cnt,
1568 delta_tx->bt_prio_kill_cnt,
1569 max_tx->bt_prio_kill_cnt);
1570 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1571 "few_bytes_cnt:",
1572 le32_to_cpu(tx->few_bytes_cnt),
1573 accum_tx->few_bytes_cnt,
1574 delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
1575 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1576 "cts_timeout:",
1577 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
1578 delta_tx->cts_timeout, max_tx->cts_timeout);
1579 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1580 "ack_timeout:",
1581 le32_to_cpu(tx->ack_timeout),
1582 accum_tx->ack_timeout,
1583 delta_tx->ack_timeout, max_tx->ack_timeout);
1584 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1585 "expected_ack_cnt:",
1586 le32_to_cpu(tx->expected_ack_cnt),
1587 accum_tx->expected_ack_cnt,
1588 delta_tx->expected_ack_cnt,
1589 max_tx->expected_ack_cnt);
1590 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1591 "actual_ack_cnt:",
1592 le32_to_cpu(tx->actual_ack_cnt),
1593 accum_tx->actual_ack_cnt,
1594 delta_tx->actual_ack_cnt,
1595 max_tx->actual_ack_cnt);
1596 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1597 "dump_msdu_cnt:",
1598 le32_to_cpu(tx->dump_msdu_cnt),
1599 accum_tx->dump_msdu_cnt,
1600 delta_tx->dump_msdu_cnt,
1601 max_tx->dump_msdu_cnt);
1602 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1603 "abort_nxt_frame_mismatch:",
1604 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1605 accum_tx->burst_abort_next_frame_mismatch_cnt,
1606 delta_tx->burst_abort_next_frame_mismatch_cnt,
1607 max_tx->burst_abort_next_frame_mismatch_cnt);
1608 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1609 "abort_missing_nxt_frame:",
1610 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1611 accum_tx->burst_abort_missing_next_frame_cnt,
1612 delta_tx->burst_abort_missing_next_frame_cnt,
1613 max_tx->burst_abort_missing_next_frame_cnt);
1614 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1615 "cts_timeout_collision:",
1616 le32_to_cpu(tx->cts_timeout_collision),
1617 accum_tx->cts_timeout_collision,
1618 delta_tx->cts_timeout_collision,
1619 max_tx->cts_timeout_collision);
1620 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1621 "ack_ba_timeout_collision:",
1622 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1623 accum_tx->ack_or_ba_timeout_collision,
1624 delta_tx->ack_or_ba_timeout_collision,
1625 max_tx->ack_or_ba_timeout_collision);
1626 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1627 "agg ba_timeout:",
1628 le32_to_cpu(tx->agg.ba_timeout),
1629 accum_tx->agg.ba_timeout,
1630 delta_tx->agg.ba_timeout,
1631 max_tx->agg.ba_timeout);
1632 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1633 "agg ba_resched_frames:",
1634 le32_to_cpu(tx->agg.ba_reschedule_frames),
1635 accum_tx->agg.ba_reschedule_frames,
1636 delta_tx->agg.ba_reschedule_frames,
1637 max_tx->agg.ba_reschedule_frames);
1638 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1639 "agg scd_query_agg_frame:",
1640 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1641 accum_tx->agg.scd_query_agg_frame_cnt,
1642 delta_tx->agg.scd_query_agg_frame_cnt,
1643 max_tx->agg.scd_query_agg_frame_cnt);
1644 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1645 "agg scd_query_no_agg:",
1646 le32_to_cpu(tx->agg.scd_query_no_agg),
1647 accum_tx->agg.scd_query_no_agg,
1648 delta_tx->agg.scd_query_no_agg,
1649 max_tx->agg.scd_query_no_agg);
1650 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1651 "agg scd_query_agg:",
1652 le32_to_cpu(tx->agg.scd_query_agg),
1653 accum_tx->agg.scd_query_agg,
1654 delta_tx->agg.scd_query_agg,
1655 max_tx->agg.scd_query_agg);
1656 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1657 "agg scd_query_mismatch:",
1658 le32_to_cpu(tx->agg.scd_query_mismatch),
1659 accum_tx->agg.scd_query_mismatch,
1660 delta_tx->agg.scd_query_mismatch,
1661 max_tx->agg.scd_query_mismatch);
1662 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1663 "agg frame_not_ready:",
1664 le32_to_cpu(tx->agg.frame_not_ready),
1665 accum_tx->agg.frame_not_ready,
1666 delta_tx->agg.frame_not_ready,
1667 max_tx->agg.frame_not_ready);
1668 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1669 "agg underrun:",
1670 le32_to_cpu(tx->agg.underrun),
1671 accum_tx->agg.underrun,
1672 delta_tx->agg.underrun, max_tx->agg.underrun);
1673 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1674 "agg bt_prio_kill:",
1675 le32_to_cpu(tx->agg.bt_prio_kill),
1676 accum_tx->agg.bt_prio_kill,
1677 delta_tx->agg.bt_prio_kill,
1678 max_tx->agg.bt_prio_kill);
1679 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1680 "agg rx_ba_rsp_cnt:",
1681 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1682 accum_tx->agg.rx_ba_rsp_cnt,
1683 delta_tx->agg.rx_ba_rsp_cnt,
1684 max_tx->agg.rx_ba_rsp_cnt);
1685
1686 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1687 kfree(buf);
1688 return ret;
1689} 1073}
1690 1074
1691static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file, 1075static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
@@ -1693,107 +1077,8 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1693 size_t count, loff_t *ppos) 1077 size_t count, loff_t *ppos)
1694{ 1078{
1695 struct iwl_priv *priv = file->private_data; 1079 struct iwl_priv *priv = file->private_data;
1696 int pos = 0; 1080 return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
1697 char *buf; 1081 user_buf, count, ppos);
1698 int bufsz = sizeof(struct statistics_general) * 10 + 300;
1699 ssize_t ret;
1700 struct statistics_general *general, *accum_general;
1701 struct statistics_general *delta_general, *max_general;
1702 struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
1703 struct statistics_div *div, *accum_div, *delta_div, *max_div;
1704
1705 if (!iwl_is_alive(priv))
1706 return -EAGAIN;
1707
1708 buf = kzalloc(bufsz, GFP_KERNEL);
1709 if (!buf) {
1710 IWL_ERR(priv, "Can not allocate Buffer\n");
1711 return -ENOMEM;
1712 }
1713
1714 /* the statistic information display here is based on
1715 * the last statistics notification from uCode
1716 * might not reflect the current uCode activity
1717 */
1718 general = &priv->statistics.general;
1719 dbg = &priv->statistics.general.dbg;
1720 div = &priv->statistics.general.div;
1721 accum_general = &priv->accum_statistics.general;
1722 delta_general = &priv->delta_statistics.general;
1723 max_general = &priv->max_delta.general;
1724 accum_dbg = &priv->accum_statistics.general.dbg;
1725 delta_dbg = &priv->delta_statistics.general.dbg;
1726 max_dbg = &priv->max_delta.general.dbg;
1727 accum_div = &priv->accum_statistics.general.div;
1728 delta_div = &priv->delta_statistics.general.div;
1729 max_div = &priv->max_delta.general.div;
1730 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1731 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
1732 "Statistics_General:");
1733 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1734 "temperature:",
1735 le32_to_cpu(general->temperature));
1736 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
1737 "temperature_m:",
1738 le32_to_cpu(general->temperature_m));
1739 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1740 "burst_check:",
1741 le32_to_cpu(dbg->burst_check),
1742 accum_dbg->burst_check,
1743 delta_dbg->burst_check, max_dbg->burst_check);
1744 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1745 "burst_count:",
1746 le32_to_cpu(dbg->burst_count),
1747 accum_dbg->burst_count,
1748 delta_dbg->burst_count, max_dbg->burst_count);
1749 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1750 "sleep_time:",
1751 le32_to_cpu(general->sleep_time),
1752 accum_general->sleep_time,
1753 delta_general->sleep_time, max_general->sleep_time);
1754 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1755 "slots_out:",
1756 le32_to_cpu(general->slots_out),
1757 accum_general->slots_out,
1758 delta_general->slots_out, max_general->slots_out);
1759 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1760 "slots_idle:",
1761 le32_to_cpu(general->slots_idle),
1762 accum_general->slots_idle,
1763 delta_general->slots_idle, max_general->slots_idle);
1764 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1765 le32_to_cpu(general->ttl_timestamp));
1766 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1767 "tx_on_a:",
1768 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
1769 delta_div->tx_on_a, max_div->tx_on_a);
1770 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1771 "tx_on_b:",
1772 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
1773 delta_div->tx_on_b, max_div->tx_on_b);
1774 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1775 "exec_time:",
1776 le32_to_cpu(div->exec_time), accum_div->exec_time,
1777 delta_div->exec_time, max_div->exec_time);
1778 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1779 "probe_time:",
1780 le32_to_cpu(div->probe_time), accum_div->probe_time,
1781 delta_div->probe_time, max_div->probe_time);
1782 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1783 "rx_enable_counter:",
1784 le32_to_cpu(general->rx_enable_counter),
1785 accum_general->rx_enable_counter,
1786 delta_general->rx_enable_counter,
1787 max_general->rx_enable_counter);
1788 pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
1789 "num_of_sos_states:",
1790 le32_to_cpu(general->num_of_sos_states),
1791 accum_general->num_of_sos_states,
1792 delta_general->num_of_sos_states,
1793 max_general->num_of_sos_states);
1794 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1795 kfree(buf);
1796 return ret;
1797} 1082}
1798 1083
1799static ssize_t iwl_dbgfs_sensitivity_read(struct file *file, 1084static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
@@ -1935,46 +1220,6 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
1935 return ret; 1220 return ret;
1936} 1221}
1937 1222
1938static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1939 char __user *user_buf,
1940 size_t count, loff_t *ppos) {
1941
1942 struct iwl_priv *priv = file->private_data;
1943 char buf[128];
1944 int pos = 0;
1945 const size_t bufsz = sizeof(buf);
1946 struct statistics_tx *tx;
1947
1948 if (!iwl_is_alive(priv))
1949 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
1950 else {
1951 tx = &priv->statistics.tx;
1952 if (tx->tx_power.ant_a ||
1953 tx->tx_power.ant_b ||
1954 tx->tx_power.ant_c) {
1955 pos += scnprintf(buf + pos, bufsz - pos,
1956 "tx power: (1/2 dB step)\n");
1957 if ((priv->cfg->valid_tx_ant & ANT_A) &&
1958 tx->tx_power.ant_a)
1959 pos += scnprintf(buf + pos, bufsz - pos,
1960 "\tantenna A: 0x%X\n",
1961 tx->tx_power.ant_a);
1962 if ((priv->cfg->valid_tx_ant & ANT_B) &&
1963 tx->tx_power.ant_b)
1964 pos += scnprintf(buf + pos, bufsz - pos,
1965 "\tantenna B: 0x%X\n",
1966 tx->tx_power.ant_b);
1967 if ((priv->cfg->valid_tx_ant & ANT_C) &&
1968 tx->tx_power.ant_c)
1969 pos += scnprintf(buf + pos, bufsz - pos,
1970 "\tantenna C: 0x%X\n",
1971 tx->tx_power.ant_c);
1972 } else
1973 pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
1974 }
1975 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1976}
1977
1978static ssize_t iwl_dbgfs_power_save_status_read(struct file *file, 1223static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1979 char __user *user_buf, 1224 char __user *user_buf,
1980 size_t count, loff_t *ppos) 1225 size_t count, loff_t *ppos)
@@ -2052,7 +1297,6 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2052 int pos = 0; 1297 int pos = 0;
2053 char buf[128]; 1298 char buf[128];
2054 const size_t bufsz = sizeof(buf); 1299 const size_t bufsz = sizeof(buf);
2055 ssize_t ret;
2056 1300
2057 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", 1301 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
2058 priv->event_log.ucode_trace ? "On" : "Off"); 1302 priv->event_log.ucode_trace ? "On" : "Off");
@@ -2063,8 +1307,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2063 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", 1307 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
2064 priv->event_log.wraps_more_count); 1308 priv->event_log.wraps_more_count);
2065 1309
2066 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1310 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2067 return ret;
2068} 1311}
2069 1312
2070static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, 1313static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
@@ -2096,6 +1339,31 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
2096 return count; 1339 return count;
2097} 1340}
2098 1341
1342static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
1343 char __user *user_buf,
1344 size_t count, loff_t *ppos) {
1345
1346 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1347 int len = 0;
1348 char buf[20];
1349
1350 len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
1351 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1352}
1353
1354static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
1355 char __user *user_buf,
1356 size_t count, loff_t *ppos) {
1357
1358 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1359 int len = 0;
1360 char buf[20];
1361
1362 len = sprintf(buf, "0x%04X\n",
1363 le32_to_cpu(priv->active_rxon.filter_flags));
1364 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
1365}
1366
2099static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 1367static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2100 char __user *user_buf, 1368 char __user *user_buf,
2101 size_t count, loff_t *ppos) 1369 size_t count, loff_t *ppos)
@@ -2125,13 +1393,11 @@ static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
2125 int pos = 0; 1393 int pos = 0;
2126 char buf[12]; 1394 char buf[12];
2127 const size_t bufsz = sizeof(buf); 1395 const size_t bufsz = sizeof(buf);
2128 ssize_t ret;
2129 1396
2130 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", 1397 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
2131 priv->missed_beacon_threshold); 1398 priv->missed_beacon_threshold);
2132 1399
2133 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1400 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2134 return ret;
2135} 1401}
2136 1402
2137static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, 1403static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
@@ -2160,27 +1426,6 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
2160 return count; 1426 return count;
2161} 1427}
2162 1428
2163static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
2164 const char __user *user_buf,
2165 size_t count, loff_t *ppos)
2166{
2167 struct iwl_priv *priv = file->private_data;
2168 char buf[8];
2169 int buf_size;
2170 int scan;
2171
2172 memset(buf, 0, sizeof(buf));
2173 buf_size = min(count, sizeof(buf) - 1);
2174 if (copy_from_user(buf, user_buf, buf_size))
2175 return -EFAULT;
2176 if (sscanf(buf, "%d", &scan) != 1)
2177 return -EINVAL;
2178
2179 iwl_internal_short_hw_scan(priv);
2180
2181 return count;
2182}
2183
2184static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, 1429static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2185 char __user *user_buf, 1430 char __user *user_buf,
2186 size_t count, loff_t *ppos) { 1431 size_t count, loff_t *ppos) {
@@ -2189,13 +1434,11 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2189 int pos = 0; 1434 int pos = 0;
2190 char buf[12]; 1435 char buf[12];
2191 const size_t bufsz = sizeof(buf); 1436 const size_t bufsz = sizeof(buf);
2192 ssize_t ret;
2193 1437
2194 pos += scnprintf(buf + pos, bufsz - pos, "%u\n", 1438 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2195 priv->cfg->plcp_delta_threshold); 1439 priv->cfg->plcp_delta_threshold);
2196 1440
2197 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1441 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2198 return ret;
2199} 1442}
2200 1443
2201static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, 1444static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
@@ -2288,7 +1531,6 @@ DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
2288DEBUGFS_READ_FILE_OPS(ucode_general_stats); 1531DEBUGFS_READ_FILE_OPS(ucode_general_stats);
2289DEBUGFS_READ_FILE_OPS(sensitivity); 1532DEBUGFS_READ_FILE_OPS(sensitivity);
2290DEBUGFS_READ_FILE_OPS(chain_noise); 1533DEBUGFS_READ_FILE_OPS(chain_noise);
2291DEBUGFS_READ_FILE_OPS(tx_power);
2292DEBUGFS_READ_FILE_OPS(power_save_status); 1534DEBUGFS_READ_FILE_OPS(power_save_status);
2293DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics); 1535DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
2294DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics); 1536DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
@@ -2296,9 +1538,10 @@ DEBUGFS_WRITE_FILE_OPS(csr);
2296DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); 1538DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2297DEBUGFS_READ_FILE_OPS(fh_reg); 1539DEBUGFS_READ_FILE_OPS(fh_reg);
2298DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); 1540DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2299DEBUGFS_WRITE_FILE_OPS(internal_scan);
2300DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); 1541DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2301DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 1542DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
1543DEBUGFS_READ_FILE_OPS(rxon_flags);
1544DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2302 1545
2303/* 1546/*
2304 * Create the debugfs files and directories 1547 * Create the debugfs files and directories
@@ -2334,8 +1577,11 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2334 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR); 1577 DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
2335 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR); 1578 DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
2336 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR); 1579 DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
2337 DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR); 1580 if (!priv->cfg->broken_powersave) {
2338 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR); 1581 DEBUGFS_ADD_FILE(sleep_level_override, dir_data,
1582 S_IWUSR | S_IRUSR);
1583 DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
1584 }
2339 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR); 1585 DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
2340 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR); 1586 DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
2341 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR); 1587 DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
@@ -2343,29 +1589,33 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2343 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR); 1589 DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
2344 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR); 1590 DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
2345 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR); 1591 DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
2346 DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
2347 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR); 1592 DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
2348 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR); 1593 DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
2349 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR); 1594 DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
2350 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR); 1595 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
2351 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); 1596 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
2352 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 1597 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2353 DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
2354 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 1598 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2355 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 1599 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
2356 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1600 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
2357 DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR); 1601 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
2358 DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR); 1602 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
2359 DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR); 1603
1604 if (priv->cfg->sensitivity_calib_by_driver)
2360 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR); 1605 DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
1606 if (priv->cfg->chain_noise_calib_by_driver)
2361 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 1607 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
1608 if (priv->cfg->ucode_tracing)
2362 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 1609 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
2363 } 1610 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2364 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal); 1611 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2365 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, 1612 if (priv->cfg->sensitivity_calib_by_driver)
2366 &priv->disable_chain_noise_cal); 1613 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
2367 if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) || 1614 &priv->disable_sens_cal);
2368 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945)) 1615 if (priv->cfg->chain_noise_calib_by_driver)
1616 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
1617 &priv->disable_chain_noise_cal);
1618 if (priv->cfg->tx_power_by_driver)
2369 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, 1619 DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
2370 &priv->disable_tx_power_cal); 1620 &priv->disable_tx_power_cal);
2371 return 0; 1621 return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6054c5fba0c1..f3f3473c5c7e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,6 +43,7 @@
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-4965-hw.h" 44#include "iwl-4965-hw.h"
45#include "iwl-3945-hw.h" 45#include "iwl-3945-hw.h"
46#include "iwl-agn-hw.h"
46#include "iwl-led.h" 47#include "iwl-led.h"
47#include "iwl-power.h" 48#include "iwl-power.h"
48#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
@@ -56,6 +57,7 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
56extern struct iwl_cfg iwl5100_abg_cfg; 57extern struct iwl_cfg iwl5100_abg_cfg;
57extern struct iwl_cfg iwl5150_agn_cfg; 58extern struct iwl_cfg iwl5150_agn_cfg;
58extern struct iwl_cfg iwl5150_abg_cfg; 59extern struct iwl_cfg iwl5150_abg_cfg;
60extern struct iwl_cfg iwl6000g2a_2agn_cfg;
59extern struct iwl_cfg iwl6000i_2agn_cfg; 61extern struct iwl_cfg iwl6000i_2agn_cfg;
60extern struct iwl_cfg iwl6000i_2abg_cfg; 62extern struct iwl_cfg iwl6000i_2abg_cfg;
61extern struct iwl_cfg iwl6000i_2bg_cfg; 63extern struct iwl_cfg iwl6000i_2bg_cfg;
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
67 69
68struct iwl_tx_queue; 70struct iwl_tx_queue;
69 71
70/* shared structures from iwl-5000.c */
71extern struct iwl_mod_params iwl50_mod_params;
72extern struct iwl_ucode_ops iwl5000_ucode;
73extern struct iwl_lib_ops iwl5000_lib;
74extern struct iwl_hcmd_ops iwl5000_hcmd;
75extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
76
77/* shared functions from iwl-5000.c */
78extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
79extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
80 u8 *data);
81extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
82 __le32 *tx_flags);
83extern int iwl5000_calc_rssi(struct iwl_priv *priv,
84 struct iwl_rx_phy_res *rx_resp);
85extern void iwl5000_nic_config(struct iwl_priv *priv);
86extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
87extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
88 size_t offset);
89extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
90 struct iwl_tx_queue *txq,
91 u16 byte_cnt);
92extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
93 struct iwl_tx_queue *txq);
94extern int iwl5000_load_ucode(struct iwl_priv *priv);
95extern void iwl5000_init_alive_start(struct iwl_priv *priv);
96extern int iwl5000_alive_notify(struct iwl_priv *priv);
97extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
98extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
99 int tx_fifo, int sta_id, int tid, u16 ssn_idx);
100extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
101 u16 ssn_idx, u8 tx_fifo);
102extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
103extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
104extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
105extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
106extern int iwl5000_send_tx_power(struct iwl_priv *priv);
107extern void iwl5000_temperature(struct iwl_priv *priv);
108
109/* CT-KILL constants */ 72/* CT-KILL constants */
110#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 73#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
111#define CT_KILL_THRESHOLD 114 /* in Celsius */ 74#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -183,6 +146,10 @@ struct iwl_queue {
183 int n_bd; /* number of BDs in this queue */ 146 int n_bd; /* number of BDs in this queue */
184 int write_ptr; /* 1-st empty entry (index) host_w*/ 147 int write_ptr; /* 1-st empty entry (index) host_w*/
185 int read_ptr; /* last used entry (index) host_r*/ 148 int read_ptr; /* last used entry (index) host_r*/
149 /* use for monitoring and recovering the stuck queue */
150 int last_read_ptr; /* storing the last read_ptr */
151 /* number of time read_ptr and last_read_ptr are the same */
152 u8 repeat_same_read_ptr;
186 dma_addr_t dma_addr; /* physical addr for BD's */ 153 dma_addr_t dma_addr; /* physical addr for BD's */
187 int n_window; /* safe queue window */ 154 int n_window; /* safe queue window */
188 u32 id; 155 u32 id;
@@ -304,13 +271,11 @@ struct iwl_channel_info {
304 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; 271 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
305}; 272};
306 273
307#define IWL_TX_FIFO_AC0 0 274#define IWL_TX_FIFO_BK 0
308#define IWL_TX_FIFO_AC1 1 275#define IWL_TX_FIFO_BE 1
309#define IWL_TX_FIFO_AC2 2 276#define IWL_TX_FIFO_VI 2
310#define IWL_TX_FIFO_AC3 3 277#define IWL_TX_FIFO_VO 3
311#define IWL_TX_FIFO_HCCA_1 5 278#define IWL_TX_FIFO_UNUSED -1
312#define IWL_TX_FIFO_HCCA_2 6
313#define IWL_TX_FIFO_NONE 7
314 279
315/* Minimum number of queues. MAX_NUM is defined in hw specific files. 280/* Minimum number of queues. MAX_NUM is defined in hw specific files.
316 * Set the minimum to accommodate the 4 standard TX queues, 1 command 281 * Set the minimum to accommodate the 4 standard TX queues, 1 command
@@ -361,13 +326,6 @@ enum {
361 326
362#define DEF_CMD_PAYLOAD_SIZE 320 327#define DEF_CMD_PAYLOAD_SIZE 320
363 328
364/*
365 * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
366 * SNAP header and alignment. It should also be big enough for 802.11
367 * control frames.
368 */
369#define IWL_LINK_HDR_MAX 64
370
371/** 329/**
372 * struct iwl_device_cmd 330 * struct iwl_device_cmd
373 * 331 *
@@ -519,38 +477,28 @@ struct iwl_ht_config {
519 u8 non_GF_STA_present; 477 u8 non_GF_STA_present;
520}; 478};
521 479
522union iwl_qos_capabity {
523 struct {
524 u8 edca_count:4; /* bit 0-3 */
525 u8 q_ack:1; /* bit 4 */
526 u8 queue_request:1; /* bit 5 */
527 u8 txop_request:1; /* bit 6 */
528 u8 reserved:1; /* bit 7 */
529 } q_AP;
530 struct {
531 u8 acvo_APSD:1; /* bit 0 */
532 u8 acvi_APSD:1; /* bit 1 */
533 u8 ac_bk_APSD:1; /* bit 2 */
534 u8 ac_be_APSD:1; /* bit 3 */
535 u8 q_ack:1; /* bit 4 */
536 u8 max_len:2; /* bit 5-6 */
537 u8 more_data_ack:1; /* bit 7 */
538 } q_STA;
539 u8 val;
540};
541
542/* QoS structures */ 480/* QoS structures */
543struct iwl_qos_info { 481struct iwl_qos_info {
544 int qos_active; 482 int qos_active;
545 union iwl_qos_capabity qos_cap;
546 struct iwl_qosparam_cmd def_qos_parm; 483 struct iwl_qosparam_cmd def_qos_parm;
547}; 484};
548 485
486/*
487 * Structure should be accessed with sta_lock held. When station addition
488 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
489 * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
490 * held.
491 */
549struct iwl_station_entry { 492struct iwl_station_entry {
550 struct iwl_addsta_cmd sta; 493 struct iwl_addsta_cmd sta;
551 struct iwl_tid_data tid[MAX_TID_COUNT]; 494 struct iwl_tid_data tid[MAX_TID_COUNT];
552 u8 used; 495 u8 used;
553 struct iwl_hw_key keyinfo; 496 struct iwl_hw_key keyinfo;
497 struct iwl_link_quality_cmd *lq;
498};
499
500struct iwl_station_priv_common {
501 u8 sta_id;
554}; 502};
555 503
556/* 504/*
@@ -559,14 +507,28 @@ struct iwl_station_entry {
559 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 507 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
560 * in the structure for use by driver. This structure is places in that 508 * in the structure for use by driver. This structure is places in that
561 * space. 509 * space.
510 *
511 * The common struct MUST be first because it is shared between
512 * 3945 and agn!
562 */ 513 */
563struct iwl_station_priv { 514struct iwl_station_priv {
515 struct iwl_station_priv_common common;
564 struct iwl_lq_sta lq_sta; 516 struct iwl_lq_sta lq_sta;
565 atomic_t pending_frames; 517 atomic_t pending_frames;
566 bool client; 518 bool client;
567 bool asleep; 519 bool asleep;
568}; 520};
569 521
522/**
523 * struct iwl_vif_priv - driver's private per-interface information
524 *
525 * When mac80211 allocates a virtual interface, it can allocate
526 * space for us to put data into.
527 */
528struct iwl_vif_priv {
529 u8 ibss_bssid_sta_id;
530};
531
570/* one for each uCode image (inst/data, boot/init/runtime) */ 532/* one for each uCode image (inst/data, boot/init/runtime) */
571struct fw_desc { 533struct fw_desc {
572 void *v_addr; /* access by driver */ 534 void *v_addr; /* access by driver */
@@ -574,7 +536,7 @@ struct fw_desc {
574 u32 len; /* bytes */ 536 u32 len; /* bytes */
575}; 537};
576 538
577/* uCode file layout */ 539/* v1/v2 uCode file layout */
578struct iwl_ucode_header { 540struct iwl_ucode_header {
579 __le32 ver; /* major/minor/API/serial */ 541 __le32 ver; /* major/minor/API/serial */
580 union { 542 union {
@@ -597,7 +559,62 @@ struct iwl_ucode_header {
597 } v2; 559 } v2;
598 } u; 560 } u;
599}; 561};
600#define UCODE_HEADER_SIZE(ver) ((ver) == 1 ? 24 : 28) 562
563/*
564 * new TLV uCode file layout
565 *
566 * The new TLV file format contains TLVs, that each specify
567 * some piece of data. To facilitate "groups", for example
568 * different instruction image with different capabilities,
569 * bundled with the same init image, an alternative mechanism
570 * is provided:
571 * When the alternative field is 0, that means that the item
572 * is always valid. When it is non-zero, then it is only
573 * valid in conjunction with items of the same alternative,
574 * in which case the driver (user) selects one alternative
575 * to use.
576 */
577
578enum iwl_ucode_tlv_type {
579 IWL_UCODE_TLV_INVALID = 0, /* unused */
580 IWL_UCODE_TLV_INST = 1,
581 IWL_UCODE_TLV_DATA = 2,
582 IWL_UCODE_TLV_INIT = 3,
583 IWL_UCODE_TLV_INIT_DATA = 4,
584 IWL_UCODE_TLV_BOOT = 5,
585 IWL_UCODE_TLV_PROBE_MAX_LEN = 6, /* a u32 value */
586};
587
588struct iwl_ucode_tlv {
589 __le16 type; /* see above */
590 __le16 alternative; /* see comment */
591 __le32 length; /* not including type/length fields */
592 u8 data[0];
593} __attribute__ ((packed));
594
595#define IWL_TLV_UCODE_MAGIC 0x0a4c5749
596
597struct iwl_tlv_ucode_header {
598 /*
599 * The TLV style ucode header is distinguished from
600 * the v1/v2 style header by first four bytes being
601 * zero, as such is an invalid combination of
602 * major/minor/API/serial versions.
603 */
604 __le32 zero;
605 __le32 magic;
606 u8 human_readable[64];
607 __le32 ver; /* major/minor/API/serial */
608 __le32 build;
609 __le64 alternatives; /* bitmask of valid alternatives */
610 /*
611 * The data contained herein has a TLV layout,
612 * see above for the TLV header and types.
613 * Note that each TLV is padded to a length
614 * that is a multiple of 4 for alignment.
615 */
616 u8 data[0];
617};
601 618
602struct iwl4965_ibss_seq { 619struct iwl4965_ibss_seq {
603 u8 mac[ETH_ALEN]; 620 u8 mac[ETH_ALEN];
@@ -1039,6 +1056,11 @@ struct iwl_event_log {
1039#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 1056#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1040#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) 1057#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1041 1058
1059/* timer constants use to monitor and recover stuck tx queues in mSecs */
1060#define IWL_MONITORING_PERIOD (1000)
1061#define IWL_ONE_HUNDRED_MSECS (100)
1062#define IWL_SIXTY_SECS (60000)
1063
1042enum iwl_reset { 1064enum iwl_reset {
1043 IWL_RF_RESET = 0, 1065 IWL_RF_RESET = 0,
1044 IWL_FW_RESET, 1066 IWL_FW_RESET,
@@ -1092,10 +1114,6 @@ struct iwl_priv {
1092 struct iwl_channel_info *channel_info; /* channel info array */ 1114 struct iwl_channel_info *channel_info; /* channel info array */
1093 u8 channel_count; /* # of channels */ 1115 u8 channel_count; /* # of channels */
1094 1116
1095 /* each calibration channel group in the EEPROM has a derived
1096 * clip setting for each rate. 3945 only.*/
1097 const struct iwl3945_clip_group clip39_groups[5];
1098
1099 /* thermal calibration */ 1117 /* thermal calibration */
1100 s32 temperature; /* degrees Kelvin */ 1118 s32 temperature; /* degrees Kelvin */
1101 s32 last_temperature; 1119 s32 last_temperature;
@@ -1104,12 +1122,10 @@ struct iwl_priv {
1104 struct iwl_calib_result calib_results[IWL_CALIB_MAX]; 1122 struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1105 1123
1106 /* Scan related variables */ 1124 /* Scan related variables */
1107 unsigned long next_scan_jiffies;
1108 unsigned long scan_start; 1125 unsigned long scan_start;
1109 unsigned long scan_pass_start;
1110 unsigned long scan_start_tsf; 1126 unsigned long scan_start_tsf;
1111 void *scan; 1127 void *scan_cmd;
1112 int scan_bands; 1128 enum ieee80211_band scan_band;
1113 struct cfg80211_scan_request *scan_request; 1129 struct cfg80211_scan_request *scan_request;
1114 bool is_internal_short_scan; 1130 bool is_internal_short_scan;
1115 u8 scan_tx_ant[IEEE80211_NUM_BANDS]; 1131 u8 scan_tx_ant[IEEE80211_NUM_BANDS];
@@ -1168,16 +1184,13 @@ struct iwl_priv {
1168 u64 led_tpt; 1184 u64 led_tpt;
1169 1185
1170 u16 active_rate; 1186 u16 active_rate;
1171 u16 active_rate_basic;
1172 1187
1173 u8 assoc_station_added;
1174 u8 start_calib; 1188 u8 start_calib;
1175 struct iwl_sensitivity_data sensitivity_data; 1189 struct iwl_sensitivity_data sensitivity_data;
1176 struct iwl_chain_noise_data chain_noise_data; 1190 struct iwl_chain_noise_data chain_noise_data;
1177 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1191 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1178 1192
1179 struct iwl_ht_config current_ht_config; 1193 struct iwl_ht_config current_ht_config;
1180 u8 last_phy_res[100];
1181 1194
1182 /* Rate scaling data */ 1195 /* Rate scaling data */
1183 u8 retry_rate; 1196 u8 retry_rate;
@@ -1197,9 +1210,6 @@ struct iwl_priv {
1197 1210
1198 unsigned long status; 1211 unsigned long status;
1199 1212
1200 int last_rx_rssi; /* From Rx packet statistics */
1201 int last_rx_noise; /* From beacon statistics */
1202
1203 /* counts mgmt, ctl, and data packets */ 1213 /* counts mgmt, ctl, and data packets */
1204 struct traffic_stats tx_stats; 1214 struct traffic_stats tx_stats;
1205 struct traffic_stats rx_stats; 1215 struct traffic_stats rx_stats;
@@ -1218,18 +1228,14 @@ struct iwl_priv {
1218#endif 1228#endif
1219 1229
1220 /* context information */ 1230 /* context information */
1221 u16 rates_mask; 1231 u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
1222
1223 u8 bssid[ETH_ALEN];
1224 u16 rts_threshold;
1225 u8 mac_addr[ETH_ALEN]; 1232 u8 mac_addr[ETH_ALEN];
1226 1233
1227 /*station table variables */ 1234 /*station table variables */
1228 spinlock_t sta_lock; 1235 spinlock_t sta_lock;
1229 int num_stations; 1236 int num_stations;
1230 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1237 struct iwl_station_entry stations[IWL_STATION_COUNT];
1231 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; 1238 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
1232 u8 default_wep_key;
1233 u8 key_mapping_key; 1239 u8 key_mapping_key;
1234 unsigned long ucode_key_table; 1240 unsigned long ucode_key_table;
1235 1241
@@ -1244,10 +1250,6 @@ struct iwl_priv {
1244 1250
1245 u8 mac80211_registered; 1251 u8 mac80211_registered;
1246 1252
1247 /* Rx'd packet timing information */
1248 u32 last_beacon_time;
1249 u64 last_tsf;
1250
1251 /* eeprom -- this is in the card's little endian byte order */ 1253 /* eeprom -- this is in the card's little endian byte order */
1252 u8 *eeprom; 1254 u8 *eeprom;
1253 int nvm_device_type; 1255 int nvm_device_type;
@@ -1259,29 +1261,67 @@ struct iwl_priv {
1259 1261
1260 /* Last Rx'd beacon timestamp */ 1262 /* Last Rx'd beacon timestamp */
1261 u64 timestamp; 1263 u64 timestamp;
1262 u16 beacon_int;
1263 struct ieee80211_vif *vif; 1264 struct ieee80211_vif *vif;
1264 1265
1265 /*Added for 3945 */ 1266 union {
1266 void *shared_virt; 1267#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1267 dma_addr_t shared_phys; 1268 struct {
1268 /*End*/ 1269 void *shared_virt;
1269 struct iwl_hw_params hw_params; 1270 dma_addr_t shared_phys;
1270 1271
1271 /* INT ICT Table */ 1272 struct delayed_work thermal_periodic;
1272 __le32 *ict_tbl; 1273 struct delayed_work rfkill_poll;
1273 dma_addr_t ict_tbl_dma; 1274
1274 dma_addr_t aligned_ict_tbl_dma; 1275 struct iwl3945_notif_statistics statistics;
1275 int ict_index; 1276#ifdef CONFIG_IWLWIFI_DEBUG
1276 void *ict_tbl_vir; 1277 struct iwl3945_notif_statistics accum_statistics;
1277 u32 inta; 1278 struct iwl3945_notif_statistics delta_statistics;
1278 bool use_ict; 1279 struct iwl3945_notif_statistics max_delta;
1280#endif
1281
1282 u32 sta_supp_rates;
1283 int last_rx_rssi; /* From Rx packet statistics */
1284
1285 /* Rx'd packet timing information */
1286 u32 last_beacon_time;
1287 u64 last_tsf;
1288
1289 /*
1290 * each calibration channel group in the
1291 * EEPROM has a derived clip setting for
1292 * each rate.
1293 */
1294 const struct iwl3945_clip_group clip_groups[5];
1295
1296 } _3945;
1297#endif
1298#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
1299 struct {
1300 /* INT ICT Table */
1301 __le32 *ict_tbl;
1302 void *ict_tbl_vir;
1303 dma_addr_t ict_tbl_dma;
1304 dma_addr_t aligned_ict_tbl_dma;
1305 int ict_index;
1306 u32 inta;
1307 bool use_ict;
1308 /*
1309 * reporting the number of tids has AGG on. 0 means
1310 * no AGGREGATION
1311 */
1312 u8 agg_tids_count;
1313
1314 struct iwl_rx_phy_res last_phy_res;
1315 bool last_phy_res_valid;
1316
1317 struct completion firmware_loading_complete;
1318 } _agn;
1319#endif
1320 };
1321
1322 struct iwl_hw_params hw_params;
1279 1323
1280 u32 inta_mask; 1324 u32 inta_mask;
1281 /* Current association information needed to configure the
1282 * hardware */
1283 u16 assoc_id;
1284 u16 assoc_capability;
1285 1325
1286 struct iwl_qos_info qos_data; 1326 struct iwl_qos_info qos_data;
1287 1327
@@ -1291,11 +1331,11 @@ struct iwl_priv {
1291 struct work_struct scan_completed; 1331 struct work_struct scan_completed;
1292 struct work_struct rx_replenish; 1332 struct work_struct rx_replenish;
1293 struct work_struct abort_scan; 1333 struct work_struct abort_scan;
1294 struct work_struct request_scan;
1295 struct work_struct beacon_update; 1334 struct work_struct beacon_update;
1296 struct work_struct tt_work; 1335 struct work_struct tt_work;
1297 struct work_struct ct_enter; 1336 struct work_struct ct_enter;
1298 struct work_struct ct_exit; 1337 struct work_struct ct_exit;
1338 struct work_struct start_internal_scan;
1299 1339
1300 struct tasklet_struct irq_tasklet; 1340 struct tasklet_struct irq_tasklet;
1301 1341
@@ -1303,10 +1343,6 @@ struct iwl_priv {
1303 struct delayed_work alive_start; 1343 struct delayed_work alive_start;
1304 struct delayed_work scan_check; 1344 struct delayed_work scan_check;
1305 1345
1306 /*For 3945 only*/
1307 struct delayed_work thermal_periodic;
1308 struct delayed_work rfkill_poll;
1309
1310 /* TX Power */ 1346 /* TX Power */
1311 s8 tx_power_user_lmt; 1347 s8 tx_power_user_lmt;
1312 s8 tx_power_device_lmt; 1348 s8 tx_power_device_lmt;
@@ -1338,13 +1374,8 @@ struct iwl_priv {
1338 struct work_struct run_time_calib_work; 1374 struct work_struct run_time_calib_work;
1339 struct timer_list statistics_periodic; 1375 struct timer_list statistics_periodic;
1340 struct timer_list ucode_trace; 1376 struct timer_list ucode_trace;
1377 struct timer_list monitor_recover;
1341 bool hw_ready; 1378 bool hw_ready;
1342 /*For 3945*/
1343#define IWL_DEFAULT_TX_POWER 0x0F
1344
1345 struct iwl3945_notif_statistics statistics_39;
1346
1347 u32 sta_supp_rates;
1348 1379
1349 struct iwl_event_log event_log; 1380 struct iwl_event_log event_log;
1350}; /*iwl_priv */ 1381}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2ffc2edbf4f0..4a487639d932 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); 37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); 38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); 39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); 41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); 43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index fb5bb487f3bc..ee11452519e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -590,9 +590,16 @@ int iwl_eeprom_init(struct iwl_priv *priv)
590 e[addr / 2] = cpu_to_le16(r >> 16); 590 e[addr / 2] = cpu_to_le16(r >> 16);
591 } 591 }
592 } 592 }
593
594 IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n",
595 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
596 ? "OTP" : "EEPROM",
597 iwl_eeprom_query16(priv, EEPROM_VERSION));
598
593 ret = 0; 599 ret = 0;
594done: 600done:
595 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv); 601 priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
602
596err: 603err:
597 if (ret) 604 if (ret)
598 iwl_eeprom_free(priv); 605 iwl_eeprom_free(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 4e1ba824dc50..95aa202c85e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -172,37 +172,41 @@ struct iwl_eeprom_enhanced_txpwr {
172#define EEPROM_5000_TX_POWER_VERSION (4) 172#define EEPROM_5000_TX_POWER_VERSION (4)
173#define EEPROM_5000_EEPROM_VERSION (0x11A) 173#define EEPROM_5000_EEPROM_VERSION (0x11A)
174 174
175/*5000 calibrations */ 175/* 5000 and up calibration */
176#define EEPROM_5000_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION) 176#define EEPROM_CALIB_ALL (INDIRECT_ADDRESS | INDIRECT_CALIBRATION)
177#define EEPROM_5000_XTAL ((2*0x128) | EEPROM_5000_CALIB_ALL) 177#define EEPROM_XTAL ((2*0x128) | EEPROM_CALIB_ALL)
178#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_5000_CALIB_ALL) 178
179 179/* 5000 temperature */
180/* 5000 links */ 180#define EEPROM_5000_TEMPERATURE ((2*0x12A) | EEPROM_CALIB_ALL)
181#define EEPROM_5000_LINK_HOST (2*0x64) 181
182#define EEPROM_5000_LINK_GENERAL (2*0x65) 182/* agn links */
183#define EEPROM_5000_LINK_REGULATORY (2*0x66) 183#define EEPROM_LINK_HOST (2*0x64)
184#define EEPROM_5000_LINK_CALIBRATION (2*0x67) 184#define EEPROM_LINK_GENERAL (2*0x65)
185#define EEPROM_5000_LINK_PROCESS_ADJST (2*0x68) 185#define EEPROM_LINK_REGULATORY (2*0x66)
186#define EEPROM_5000_LINK_OTHERS (2*0x69) 186#define EEPROM_LINK_CALIBRATION (2*0x67)
187 187#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
188/* 5000 regulatory - indirect access */ 188#define EEPROM_LINK_OTHERS (2*0x69)
189#define EEPROM_5000_REG_SKU_ID ((0x02)\ 189
190 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ 190/* agn regulatory - indirect access */
191#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\ 191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ 192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
193#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\ 193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */ 194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
195#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\ 195#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ 196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
197#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\ 197#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
199#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\ 199#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */ 200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
201#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\ 201#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ 202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
203#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ 203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205 205
206/* 6000 regulatory - indirect access */
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209
206/* 6000 and up regulatory tx power - indirect access */ 210/* 6000 and up regulatory tx power - indirect access */
207/* max. elements per section */ 211/* max. elements per section */
208#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8) 212#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
@@ -261,14 +265,21 @@ struct iwl_eeprom_enhanced_txpwr {
261#define EEPROM_5050_EEPROM_VERSION (0x21E) 265#define EEPROM_5050_EEPROM_VERSION (0x21E)
262 266
263/* 1000 Specific */ 267/* 1000 Specific */
268#define EEPROM_1000_TX_POWER_VERSION (4)
264#define EEPROM_1000_EEPROM_VERSION (0x15C) 269#define EEPROM_1000_EEPROM_VERSION (0x15C)
265 270
266/* 6x00 Specific */ 271/* 6x00 Specific */
272#define EEPROM_6000_TX_POWER_VERSION (4)
267#define EEPROM_6000_EEPROM_VERSION (0x434) 273#define EEPROM_6000_EEPROM_VERSION (0x434)
268 274
269/* 6x50 Specific */ 275/* 6x50 Specific */
276#define EEPROM_6050_TX_POWER_VERSION (4)
270#define EEPROM_6050_EEPROM_VERSION (0x532) 277#define EEPROM_6050_EEPROM_VERSION (0x532)
271 278
279/* 6x00g2 Specific */
280#define EEPROM_6000G2_TX_POWER_VERSION (6)
281#define EEPROM_6000G2_EEPROM_VERSION (0x709)
282
272/* OTP */ 283/* OTP */
273/* lower blocks contain EEPROM image and calibration data */ 284/* lower blocks contain EEPROM image and calibration data */
274#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ 285#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 73681c4fefe7..51f89e7ba681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
169 mutex_lock(&priv->sync_cmd_mutex); 169 mutex_lock(&priv->sync_cmd_mutex);
170 170
171 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 171 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
172 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n", 172 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
173 get_cmd_string(cmd->id)); 173 get_cmd_string(cmd->id));
174 174
175 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 175 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
191 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 191 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
192 192
193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
194 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", 194 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
195 get_cmd_string(cmd->id)); 195 get_cmd_string(cmd->id));
196 ret = -ETIMEDOUT; 196 ret = -ETIMEDOUT;
197 goto cancel; 197 goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 51a67fb2e185..3ff6b9d25a10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -31,6 +31,9 @@
31#define __iwl_helpers_h__ 31#define __iwl_helpers_h__
32 32
33#include <linux/ctype.h> 33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
34 37
35#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
36 39
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 16eb3ced9b30..0203a3bbf872 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
298 struct iwl_priv *priv, u32 reg) 298 struct iwl_priv *priv, u32 reg)
299{ 299{
300 u32 value = _iwl_read_direct32(priv, reg); 300 u32 value = _iwl_read_direct32(priv, reg);
301 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value, 301 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
302 f, l); 302 f, l);
303 return value; 303 return value;
304} 304}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a6f9c918aabc..db5bfcb036ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -46,7 +46,7 @@
46static int led_mode; 46static int led_mode;
47module_param(led_mode, int, S_IRUGO); 47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " 48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
49 "(default 0)\n"); 49 "(default 0)");
50 50
51 51
52static const struct { 52static const struct {
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 548dac2f6a96..cda6a94d6cc9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -318,10 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
318 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || 318 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
319 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; 319 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
320 320
321 if (priv->vif) 321 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
322 dtimper = priv->hw->conf.ps_dtim_period;
323 else
324 dtimper = 1;
325 322
326 if (priv->cfg->broken_powersave) 323 if (priv->cfg->broken_powersave)
327 iwl_power_sleep_cam_cmd(priv, &cmd); 324 iwl_power_sleep_cam_cmd(priv, &cmd);
@@ -384,10 +381,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
384 381
385bool iwl_within_ct_kill_margin(struct iwl_priv *priv) 382bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
386{ 383{
387 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ 384 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
388 bool within_margin = false; 385 bool within_margin = false;
389 386
390 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 387 if (priv->cfg->temperature_kelvin)
391 temp = KELVIN_TO_CELSIUS(priv->temperature); 388 temp = KELVIN_TO_CELSIUS(priv->temperature);
392 389
393 if (!priv->thermal_throttle.advanced_tt) 390 if (!priv->thermal_throttle.advanced_tt)
@@ -840,12 +837,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
840static void iwl_bg_tt_work(struct work_struct *work) 837static void iwl_bg_tt_work(struct work_struct *work)
841{ 838{
842 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); 839 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
843 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ 840 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
844 841
845 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 842 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
846 return; 843 return;
847 844
848 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 845 if (priv->cfg->temperature_kelvin)
849 temp = KELVIN_TO_CELSIUS(priv->temperature); 846 temp = KELVIN_TO_CELSIUS(priv->temperature);
850 847
851 if (!priv->thermal_throttle.advanced_tt) 848 if (!priv->thermal_throttle.advanced_tt)
@@ -875,7 +872,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
875 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); 872 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
876 struct iwl_tt_trans *transaction; 873 struct iwl_tt_trans *transaction;
877 874
878 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n"); 875 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
879 876
880 memset(tt, 0, sizeof(struct iwl_tt_mgmt)); 877 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
881 878
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d2d2a9174900..b1f101caf19d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,7 @@
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel, 254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues. 255 * but one DMA channel may take input from several queues.
256 * 256 *
257 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows 257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c): 258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 * 259 *
260 * 0 -- EDCA BK (background) frames, lowest priority 260 * 0 -- EDCA BK (background) frames, lowest priority
@@ -262,20 +262,20 @@
262 * 2 -- EDCA VI (video) frames, higher priority 262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority 263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.) 264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- HCCA short frames 265 * 5 -- unused (HCCA)
266 * 6 -- HCCA long frames 266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only) 267 * 7 -- not used by driver (device-internal only)
268 * 268 *
269 * For 5000 series and up, they are used slightly differently 269 * For 5000 series and up, they are used differently
270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): 270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
271 * 271 *
272 * 0 -- EDCA BK (background) frames, lowest priority 272 * 0 -- EDCA BK (background) frames, lowest priority
273 * 1 -- EDCA BE (best effort) frames, normal priority 273 * 1 -- EDCA BE (best effort) frames, normal priority
274 * 2 -- EDCA VI (video) frames, higher priority 274 * 2 -- EDCA VI (video) frames, higher priority
275 * 3 -- EDCA VO (voice) and management frames, highest priority 275 * 3 -- EDCA VO (voice) and management frames, highest priority
276 * 4 -- (TBD) 276 * 4 -- unused
277 * 5 -- HCCA short frames 277 * 5 -- unused
278 * 6 -- HCCA long frames 278 * 6 -- unused
279 * 7 -- Commands 279 * 7 -- Commands
280 * 280 *
281 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. 281 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
@@ -529,48 +529,48 @@
529#define IWL_SCD_TXFIFO_POS_RA (4) 529#define IWL_SCD_TXFIFO_POS_RA (4)
530#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF) 530#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
531 531
532/* 5000 SCD */ 532/* agn SCD */
533#define IWL50_SCD_QUEUE_STTS_REG_POS_TXF (0) 533#define IWLAGN_SCD_QUEUE_STTS_REG_POS_TXF (0)
534#define IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE (3) 534#define IWLAGN_SCD_QUEUE_STTS_REG_POS_ACTIVE (3)
535#define IWL50_SCD_QUEUE_STTS_REG_POS_WSL (4) 535#define IWLAGN_SCD_QUEUE_STTS_REG_POS_WSL (4)
536#define IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19) 536#define IWLAGN_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (19)
537#define IWL50_SCD_QUEUE_STTS_REG_MSK (0x00FF0000) 537#define IWLAGN_SCD_QUEUE_STTS_REG_MSK (0x00FF0000)
538 538
539#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_POS (8) 539#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_POS (8)
540#define IWL50_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00) 540#define IWLAGN_SCD_QUEUE_CTX_REG1_CREDIT_MSK (0x00FFFF00)
541#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24) 541#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_POS (24)
542#define IWL50_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000) 542#define IWLAGN_SCD_QUEUE_CTX_REG1_SUPER_CREDIT_MSK (0xFF000000)
543#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0) 543#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS (0)
544#define IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F) 544#define IWLAGN_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
545#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16) 545#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
546#define IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000) 546#define IWLAGN_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
547 547
548#define IWL50_SCD_CONTEXT_DATA_OFFSET (0x600) 548#define IWLAGN_SCD_CONTEXT_DATA_OFFSET (0x600)
549#define IWL50_SCD_TX_STTS_BITMAP_OFFSET (0x7B1) 549#define IWLAGN_SCD_TX_STTS_BITMAP_OFFSET (0x7B1)
550#define IWL50_SCD_TRANSLATE_TBL_OFFSET (0x7E0) 550#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET (0x7E0)
551 551
552#define IWL50_SCD_CONTEXT_QUEUE_OFFSET(x)\ 552#define IWLAGN_SCD_CONTEXT_QUEUE_OFFSET(x)\
553 (IWL50_SCD_CONTEXT_DATA_OFFSET + ((x) * 8)) 553 (IWLAGN_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
554 554
555#define IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \ 555#define IWLAGN_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
556 ((IWL50_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc) 556 ((IWLAGN_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffc)
557 557
558#define IWL50_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\ 558#define IWLAGN_SCD_QUEUECHAIN_SEL_ALL(x) (((1<<(x)) - 1) &\
559 (~(1<<IWL_CMD_QUEUE_NUM))) 559 (~(1<<IWL_CMD_QUEUE_NUM)))
560 560
561#define IWL50_SCD_BASE (PRPH_BASE + 0xa02c00) 561#define IWLAGN_SCD_BASE (PRPH_BASE + 0xa02c00)
562 562
563#define IWL50_SCD_SRAM_BASE_ADDR (IWL50_SCD_BASE + 0x0) 563#define IWLAGN_SCD_SRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x0)
564#define IWL50_SCD_DRAM_BASE_ADDR (IWL50_SCD_BASE + 0x8) 564#define IWLAGN_SCD_DRAM_BASE_ADDR (IWLAGN_SCD_BASE + 0x8)
565#define IWL50_SCD_AIT (IWL50_SCD_BASE + 0x0c) 565#define IWLAGN_SCD_AIT (IWLAGN_SCD_BASE + 0x0c)
566#define IWL50_SCD_TXFACT (IWL50_SCD_BASE + 0x10) 566#define IWLAGN_SCD_TXFACT (IWLAGN_SCD_BASE + 0x10)
567#define IWL50_SCD_ACTIVE (IWL50_SCD_BASE + 0x14) 567#define IWLAGN_SCD_ACTIVE (IWLAGN_SCD_BASE + 0x14)
568#define IWL50_SCD_QUEUE_WRPTR(x) (IWL50_SCD_BASE + 0x18 + (x) * 4) 568#define IWLAGN_SCD_QUEUE_WRPTR(x) (IWLAGN_SCD_BASE + 0x18 + (x) * 4)
569#define IWL50_SCD_QUEUE_RDPTR(x) (IWL50_SCD_BASE + 0x68 + (x) * 4) 569#define IWLAGN_SCD_QUEUE_RDPTR(x) (IWLAGN_SCD_BASE + 0x68 + (x) * 4)
570#define IWL50_SCD_QUEUECHAIN_SEL (IWL50_SCD_BASE + 0xe8) 570#define IWLAGN_SCD_QUEUECHAIN_SEL (IWLAGN_SCD_BASE + 0xe8)
571#define IWL50_SCD_AGGR_SEL (IWL50_SCD_BASE + 0x248) 571#define IWLAGN_SCD_AGGR_SEL (IWLAGN_SCD_BASE + 0x248)
572#define IWL50_SCD_INTERRUPT_MASK (IWL50_SCD_BASE + 0x108) 572#define IWLAGN_SCD_INTERRUPT_MASK (IWLAGN_SCD_BASE + 0x108)
573#define IWL50_SCD_QUEUE_STATUS_BITS(x) (IWL50_SCD_BASE + 0x10c + (x) * 4) 573#define IWLAGN_SCD_QUEUE_STATUS_BITS(x) (IWLAGN_SCD_BASE + 0x10c + (x) * 4)
574 574
575/*********************** END TX SCHEDULER *************************************/ 575/*********************** END TX SCHEDULER *************************************/
576 576
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e5eb339107dd..0a5d7cf25196 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
163 spin_unlock_irqrestore(&q->lock, flags); 163 spin_unlock_irqrestore(&q->lock, flags);
164} 164}
165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); 165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
166/**
167 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
168 */
169static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
170 dma_addr_t dma_addr)
171{
172 return cpu_to_le32((u32)(dma_addr >> 8));
173}
174
175/**
176 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
177 *
178 * If there are slots in the RX queue that need to be restocked,
179 * and we have free pre-allocated buffers, fill the ranks as much
180 * as we can, pulling from rx_free.
181 *
182 * This moves the 'write' index forward to catch up with 'processed', and
183 * also updates the memory address in the firmware to reference the new
184 * target buffer.
185 */
186void iwl_rx_queue_restock(struct iwl_priv *priv)
187{
188 struct iwl_rx_queue *rxq = &priv->rxq;
189 struct list_head *element;
190 struct iwl_rx_mem_buffer *rxb;
191 unsigned long flags;
192 int write;
193
194 spin_lock_irqsave(&rxq->lock, flags);
195 write = rxq->write & ~0x7;
196 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
197 /* Get next free Rx buffer, remove from free list */
198 element = rxq->rx_free.next;
199 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
200 list_del(element);
201
202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--;
207 }
208 spin_unlock_irqrestore(&rxq->lock, flags);
209 /* If the pre-allocated buffer pool is dropping low, schedule to
210 * refill it */
211 if (rxq->free_count <= RX_LOW_WATERMARK)
212 queue_work(priv->workqueue, &priv->rx_replenish);
213
214
215 /* If we've added more space for the firmware to place data, tell it.
216 * Increment device's write pointer in multiples of 8. */
217 if (rxq->write_actual != (rxq->write & ~0x7)) {
218 spin_lock_irqsave(&rxq->lock, flags);
219 rxq->need_update = 1;
220 spin_unlock_irqrestore(&rxq->lock, flags);
221 iwl_rx_queue_update_write_ptr(priv, rxq);
222 }
223}
224EXPORT_SYMBOL(iwl_rx_queue_restock);
225
226
227/**
228 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
229 *
230 * When moving to rx_free an SKB is allocated for the slot.
231 *
232 * Also restock the Rx queue via iwl_rx_queue_restock.
233 * This is called as a scheduled work item (except for during initialization)
234 */
235void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
236{
237 struct iwl_rx_queue *rxq = &priv->rxq;
238 struct list_head *element;
239 struct iwl_rx_mem_buffer *rxb;
240 struct page *page;
241 unsigned long flags;
242 gfp_t gfp_mask = priority;
243
244 while (1) {
245 spin_lock_irqsave(&rxq->lock, flags);
246 if (list_empty(&rxq->rx_used)) {
247 spin_unlock_irqrestore(&rxq->lock, flags);
248 return;
249 }
250 spin_unlock_irqrestore(&rxq->lock, flags);
251
252 if (rxq->free_count > RX_LOW_WATERMARK)
253 gfp_mask |= __GFP_NOWARN;
254
255 if (priv->hw_params.rx_page_order > 0)
256 gfp_mask |= __GFP_COMP;
257
258 /* Alloc a new receive buffer */
259 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
260 if (!page) {
261 if (net_ratelimit())
262 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
263 "order: %d\n",
264 priv->hw_params.rx_page_order);
265
266 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
267 net_ratelimit())
268 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
269 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
270 rxq->free_count);
271 /* We don't reschedule replenish work here -- we will
272 * call the restock method and if it still needs
273 * more buffers it will schedule replenish */
274 return;
275 }
276
277 spin_lock_irqsave(&rxq->lock, flags);
278
279 if (list_empty(&rxq->rx_used)) {
280 spin_unlock_irqrestore(&rxq->lock, flags);
281 __free_pages(page, priv->hw_params.rx_page_order);
282 return;
283 }
284 element = rxq->rx_used.next;
285 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
286 list_del(element);
287
288 spin_unlock_irqrestore(&rxq->lock, flags);
289
290 rxb->page = page;
291 /* Get physical address of the RB */
292 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
293 PAGE_SIZE << priv->hw_params.rx_page_order,
294 PCI_DMA_FROMDEVICE);
295 /* dma address must be no more than 36 bits */
296 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
297 /* and also 256 byte aligned! */
298 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
299
300 spin_lock_irqsave(&rxq->lock, flags);
301
302 list_add_tail(&rxb->list, &rxq->rx_free);
303 rxq->free_count++;
304 priv->alloc_rxb_page++;
305
306 spin_unlock_irqrestore(&rxq->lock, flags);
307 }
308}
309
310void iwl_rx_replenish(struct iwl_priv *priv)
311{
312 unsigned long flags;
313
314 iwl_rx_allocate(priv, GFP_KERNEL);
315
316 spin_lock_irqsave(&priv->lock, flags);
317 iwl_rx_queue_restock(priv);
318 spin_unlock_irqrestore(&priv->lock, flags);
319}
320EXPORT_SYMBOL(iwl_rx_replenish);
321
322void iwl_rx_replenish_now(struct iwl_priv *priv)
323{
324 iwl_rx_allocate(priv, GFP_ATOMIC);
325
326 iwl_rx_queue_restock(priv);
327}
328EXPORT_SYMBOL(iwl_rx_replenish_now);
329
330
331/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
332 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
333 * This free routine walks the list of POOL entries and if SKB is set to
334 * non NULL it is unmapped and freed
335 */
336void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
337{
338 int i;
339 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
340 if (rxq->pool[i].page != NULL) {
341 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
342 PAGE_SIZE << priv->hw_params.rx_page_order,
343 PCI_DMA_FROMDEVICE);
344 __iwl_free_pages(priv, rxq->pool[i].page);
345 rxq->pool[i].page = NULL;
346 }
347 }
348
349 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
350 rxq->dma_addr);
351 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
352 rxq->rb_stts, rxq->rb_stts_dma);
353 rxq->bd = NULL;
354 rxq->rb_stts = NULL;
355}
356EXPORT_SYMBOL(iwl_rx_queue_free);
357 166
358int iwl_rx_queue_alloc(struct iwl_priv *priv) 167int iwl_rx_queue_alloc(struct iwl_priv *priv)
359{ 168{
@@ -396,98 +205,6 @@ err_bd:
396} 205}
397EXPORT_SYMBOL(iwl_rx_queue_alloc); 206EXPORT_SYMBOL(iwl_rx_queue_alloc);
398 207
399void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
400{
401 unsigned long flags;
402 int i;
403 spin_lock_irqsave(&rxq->lock, flags);
404 INIT_LIST_HEAD(&rxq->rx_free);
405 INIT_LIST_HEAD(&rxq->rx_used);
406 /* Fill the rx_used queue with _all_ of the Rx buffers */
407 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
408 /* In the reset function, these buffers may have been allocated
409 * to an SKB, so we need to unmap and free potential storage */
410 if (rxq->pool[i].page != NULL) {
411 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
412 PAGE_SIZE << priv->hw_params.rx_page_order,
413 PCI_DMA_FROMDEVICE);
414 __iwl_free_pages(priv, rxq->pool[i].page);
415 rxq->pool[i].page = NULL;
416 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 }
419
420 /* Set us so that we have processed and used all buffers, but have
421 * not restocked the Rx queue with fresh buffers */
422 rxq->read = rxq->write = 0;
423 rxq->write_actual = 0;
424 rxq->free_count = 0;
425 spin_unlock_irqrestore(&rxq->lock, flags);
426}
427
428int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
429{
430 u32 rb_size;
431 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
432 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
433
434 if (!priv->cfg->use_isr_legacy)
435 rb_timeout = RX_RB_TIMEOUT;
436
437 if (priv->cfg->mod_params->amsdu_size_8K)
438 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
439 else
440 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
441
442 /* Stop Rx DMA */
443 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
444
445 /* Reset driver's Rx queue write index */
446 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
447
448 /* Tell device where to find RBD circular buffer in DRAM */
449 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
450 (u32)(rxq->dma_addr >> 8));
451
452 /* Tell device where in DRAM to update its Rx status */
453 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
454 rxq->rb_stts_dma >> 4);
455
456 /* Enable Rx DMA
457 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
458 * the credit mechanism in 5000 HW RX FIFO
459 * Direct rx interrupts to hosts
460 * Rx buffer size 4 or 8k
461 * RB timeout 0x10
462 * 256 RBDs
463 */
464 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
465 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
466 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
467 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
468 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
469 rb_size|
470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
472
473 /* Set interrupt coalescing timer to default (2048 usecs) */
474 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
475
476 return 0;
477}
478
479int iwl_rxq_stop(struct iwl_priv *priv)
480{
481
482 /* stop Rx DMA */
483 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
484 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
485 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
486
487 return 0;
488}
489EXPORT_SYMBOL(iwl_rxq_stop);
490
491void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 208void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
492 struct iwl_rx_mem_buffer *rxb) 209 struct iwl_rx_mem_buffer *rxb)
493 210
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
543 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; 260 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
544 int bcn_silence_c = 261 int bcn_silence_c =
545 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; 262 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
263 int last_rx_noise;
546 264
547 if (bcn_silence_a) { 265 if (bcn_silence_a) {
548 total_silence += bcn_silence_a; 266 total_silence += bcn_silence_a;
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
559 277
560 /* Average among active antennas */ 278 /* Average among active antennas */
561 if (num_active_rx) 279 if (num_active_rx)
562 priv->last_rx_noise = (total_silence / num_active_rx) - 107; 280 last_rx_noise = (total_silence / num_active_rx) - 107;
563 else 281 else
564 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 282 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
565 283
566 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", 284 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
567 bcn_silence_a, bcn_silence_b, bcn_silence_c, 285 bcn_silence_a, bcn_silence_b, bcn_silence_c,
568 priv->last_rx_noise); 286 last_rx_noise);
569} 287}
570 288
571#ifdef CONFIG_IWLWIFI_DEBUG 289#ifdef CONFIG_IWLWIFI_DEBUG
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
617 335
618#define REG_RECALIB_PERIOD (60) 336#define REG_RECALIB_PERIOD (60)
619 337
620#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" 338/**
621void iwl_rx_statistics(struct iwl_priv *priv, 339 * iwl_good_plcp_health - checks for plcp error.
622 struct iwl_rx_mem_buffer *rxb) 340 *
341 * When the plcp error is exceeding the thresholds, reset the radio
342 * to improve the throughput.
343 */
344bool iwl_good_plcp_health(struct iwl_priv *priv,
345 struct iwl_rx_packet *pkt)
623{ 346{
624 int change; 347 bool rc = true;
625 struct iwl_rx_packet *pkt = rxb_addr(rxb);
626 int combined_plcp_delta; 348 int combined_plcp_delta;
627 unsigned int plcp_msec; 349 unsigned int plcp_msec;
628 unsigned long plcp_received_jiffies; 350 unsigned long plcp_received_jiffies;
629 351
630 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
631 (int)sizeof(priv->statistics),
632 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
633
634 change = ((priv->statistics.general.temperature !=
635 pkt->u.stats.general.temperature) ||
636 ((priv->statistics.flag &
637 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
638 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
639
640#ifdef CONFIG_IWLWIFI_DEBUG
641 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
642#endif
643 /* 352 /*
644 * check for plcp_err and trigger radio reset if it exceeds 353 * check for plcp_err and trigger radio reset if it exceeds
645 * the plcp error threshold plcp_delta. 354 * the plcp error threshold plcp_delta.
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv,
660 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); 369 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
661 370
662 if ((combined_plcp_delta > 0) && 371 if ((combined_plcp_delta > 0) &&
663 ((combined_plcp_delta * 100) / plcp_msec) > 372 ((combined_plcp_delta * 100) / plcp_msec) >
664 priv->cfg->plcp_delta_threshold) { 373 priv->cfg->plcp_delta_threshold) {
665 /* 374 /*
666 * if plcp_err exceed the threshold, the following 375 * if plcp_err exceed the threshold,
667 * data is printed in csv format: 376 * the following data is printed in csv format:
668 * Text: plcp_err exceeded %d, 377 * Text: plcp_err exceeded %d,
669 * Received ofdm.plcp_err, 378 * Received ofdm.plcp_err,
670 * Current ofdm.plcp_err, 379 * Current ofdm.plcp_err,
@@ -673,22 +382,76 @@ void iwl_rx_statistics(struct iwl_priv *priv,
673 * combined_plcp_delta, 382 * combined_plcp_delta,
674 * plcp_msec 383 * plcp_msec
675 */ 384 */
676 IWL_DEBUG_RADIO(priv, PLCP_MSG, 385 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
386 "%u, %u, %u, %u, %d, %u mSecs\n",
677 priv->cfg->plcp_delta_threshold, 387 priv->cfg->plcp_delta_threshold,
678 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), 388 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
679 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), 389 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
680 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), 390 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
681 le32_to_cpu( 391 le32_to_cpu(
682 priv->statistics.rx.ofdm_ht.plcp_err), 392 priv->statistics.rx.ofdm_ht.plcp_err),
683 combined_plcp_delta, plcp_msec); 393 combined_plcp_delta, plcp_msec);
394 rc = false;
395 }
396 }
397 return rc;
398}
399EXPORT_SYMBOL(iwl_good_plcp_health);
684 400
685 /* 401void iwl_recover_from_statistics(struct iwl_priv *priv,
686 * Reset the RF radio due to the high plcp 402 struct iwl_rx_packet *pkt)
687 * error rate 403{
688 */ 404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
689 iwl_force_reset(priv, IWL_RF_RESET); 405 return;
406 if (iwl_is_associated(priv)) {
407 if (priv->cfg->ops->lib->check_ack_health) {
408 if (!priv->cfg->ops->lib->check_ack_health(
409 priv, pkt)) {
410 /*
411 * low ack count detected
412 * restart Firmware
413 */
414 IWL_ERR(priv, "low ack count detected, "
415 "restart firmware\n");
416 if (!iwl_force_reset(priv, IWL_FW_RESET))
417 return;
418 }
419 }
420 if (priv->cfg->ops->lib->check_plcp_health) {
421 if (!priv->cfg->ops->lib->check_plcp_health(
422 priv, pkt)) {
423 /*
424 * high plcp error detected
425 * reset Radio
426 */
427 iwl_force_reset(priv, IWL_RF_RESET);
428 }
690 } 429 }
691 } 430 }
431}
432EXPORT_SYMBOL(iwl_recover_from_statistics);
433
434void iwl_rx_statistics(struct iwl_priv *priv,
435 struct iwl_rx_mem_buffer *rxb)
436{
437 int change;
438 struct iwl_rx_packet *pkt = rxb_addr(rxb);
439
440
441 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
442 (int)sizeof(priv->statistics),
443 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
444
445 change = ((priv->statistics.general.temperature !=
446 pkt->u.stats.general.temperature) ||
447 ((priv->statistics.flag &
448 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
449 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
450
451#ifdef CONFIG_IWLWIFI_DEBUG
452 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
453#endif
454 iwl_recover_from_statistics(priv, pkt);
692 455
693 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 456 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
694 457
@@ -731,139 +494,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
731} 494}
732EXPORT_SYMBOL(iwl_reply_statistics); 495EXPORT_SYMBOL(iwl_reply_statistics);
733 496
734/* Calc max signal level (dBm) among 3 possible receivers */
735static inline int iwl_calc_rssi(struct iwl_priv *priv,
736 struct iwl_rx_phy_res *rx_resp)
737{
738 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
739}
740
741#ifdef CONFIG_IWLWIFI_DEBUG
742/**
743 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
744 *
745 * You may hack this function to show different aspects of received frames,
746 * including selective frame dumps.
747 * group100 parameter selects whether to show 1 out of 100 good data frames.
748 * All beacon and probe response frames are printed.
749 */
750static void iwl_dbg_report_frame(struct iwl_priv *priv,
751 struct iwl_rx_phy_res *phy_res, u16 length,
752 struct ieee80211_hdr *header, int group100)
753{
754 u32 to_us;
755 u32 print_summary = 0;
756 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
757 u32 hundred = 0;
758 u32 dataframe = 0;
759 __le16 fc;
760 u16 seq_ctl;
761 u16 channel;
762 u16 phy_flags;
763 u32 rate_n_flags;
764 u32 tsf_low;
765 int rssi;
766
767 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
768 return;
769
770 /* MAC header */
771 fc = header->frame_control;
772 seq_ctl = le16_to_cpu(header->seq_ctrl);
773
774 /* metadata */
775 channel = le16_to_cpu(phy_res->channel);
776 phy_flags = le16_to_cpu(phy_res->phy_flags);
777 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
778
779 /* signal statistics */
780 rssi = iwl_calc_rssi(priv, phy_res);
781 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
782
783 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
784
785 /* if data frame is to us and all is good,
786 * (optionally) print summary for only 1 out of every 100 */
787 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
788 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
789 dataframe = 1;
790 if (!group100)
791 print_summary = 1; /* print each frame */
792 else if (priv->framecnt_to_us < 100) {
793 priv->framecnt_to_us++;
794 print_summary = 0;
795 } else {
796 priv->framecnt_to_us = 0;
797 print_summary = 1;
798 hundred = 1;
799 }
800 } else {
801 /* print summary for all other frames */
802 print_summary = 1;
803 }
804
805 if (print_summary) {
806 char *title;
807 int rate_idx;
808 u32 bitrate;
809
810 if (hundred)
811 title = "100Frames";
812 else if (ieee80211_has_retry(fc))
813 title = "Retry";
814 else if (ieee80211_is_assoc_resp(fc))
815 title = "AscRsp";
816 else if (ieee80211_is_reassoc_resp(fc))
817 title = "RasRsp";
818 else if (ieee80211_is_probe_resp(fc)) {
819 title = "PrbRsp";
820 print_dump = 1; /* dump frame contents */
821 } else if (ieee80211_is_beacon(fc)) {
822 title = "Beacon";
823 print_dump = 1; /* dump frame contents */
824 } else if (ieee80211_is_atim(fc))
825 title = "ATIM";
826 else if (ieee80211_is_auth(fc))
827 title = "Auth";
828 else if (ieee80211_is_deauth(fc))
829 title = "DeAuth";
830 else if (ieee80211_is_disassoc(fc))
831 title = "DisAssoc";
832 else
833 title = "Frame";
834
835 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
836 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
837 bitrate = 0;
838 WARN_ON_ONCE(1);
839 } else {
840 bitrate = iwl_rates[rate_idx].ieee / 2;
841 }
842
843 /* print frame summary.
844 * MAC addresses show just the last byte (for brevity),
845 * but you can hack it to show more, if you'd like to. */
846 if (dataframe)
847 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
848 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
849 title, le16_to_cpu(fc), header->addr1[5],
850 length, rssi, channel, bitrate);
851 else {
852 /* src/dst addresses assume managed mode */
853 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
854 "len=%u, rssi=%d, tim=%lu usec, "
855 "phy=0x%02x, chnl=%d\n",
856 title, le16_to_cpu(fc), header->addr1[5],
857 header->addr3[5], length, rssi,
858 tsf_low - priv->scan_start_tsf,
859 phy_flags, channel);
860 }
861 }
862 if (print_dump)
863 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
864}
865#endif
866
867/* 497/*
868 * returns non-zero if packet should be dropped 498 * returns non-zero if packet should be dropped
869 */ 499 */
@@ -911,305 +541,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
911 return 0; 541 return 0;
912} 542}
913EXPORT_SYMBOL(iwl_set_decrypted_flag); 543EXPORT_SYMBOL(iwl_set_decrypted_flag);
914
915static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
916{
917 u32 decrypt_out = 0;
918
919 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
920 RX_RES_STATUS_STATION_FOUND)
921 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
922 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
923
924 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
925
926 /* packet was not encrypted */
927 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
928 RX_RES_STATUS_SEC_TYPE_NONE)
929 return decrypt_out;
930
931 /* packet was encrypted with unknown alg */
932 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
933 RX_RES_STATUS_SEC_TYPE_ERR)
934 return decrypt_out;
935
936 /* decryption was not done in HW */
937 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
938 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
939 return decrypt_out;
940
941 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
942
943 case RX_RES_STATUS_SEC_TYPE_CCMP:
944 /* alg is CCM: check MIC only */
945 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
946 /* Bad MIC */
947 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
948 else
949 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
950
951 break;
952
953 case RX_RES_STATUS_SEC_TYPE_TKIP:
954 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
955 /* Bad TTAK */
956 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
957 break;
958 }
959 /* fall through if TTAK OK */
960 default:
961 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
962 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
963 else
964 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
965 break;
966 };
967
968 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
969 decrypt_in, decrypt_out);
970
971 return decrypt_out;
972}
973
974static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
975 struct ieee80211_hdr *hdr,
976 u16 len,
977 u32 ampdu_status,
978 struct iwl_rx_mem_buffer *rxb,
979 struct ieee80211_rx_status *stats)
980{
981 struct sk_buff *skb;
982 int ret = 0;
983 __le16 fc = hdr->frame_control;
984
985 /* We only process data packets if the interface is open */
986 if (unlikely(!priv->is_open)) {
987 IWL_DEBUG_DROP_LIMIT(priv,
988 "Dropping packet while interface is not open.\n");
989 return;
990 }
991
992 /* In case of HW accelerated crypto and bad decryption, drop */
993 if (!priv->cfg->mod_params->sw_crypto &&
994 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
995 return;
996
997 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
998 if (!skb) {
999 IWL_ERR(priv, "alloc_skb failed\n");
1000 return;
1001 }
1002
1003 skb_reserve(skb, IWL_LINK_HDR_MAX);
1004 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1005
1006 /* mac80211 currently doesn't support paged SKB. Convert it to
1007 * linear SKB for management frame and data frame requires
1008 * software decryption or software defragementation. */
1009 if (ieee80211_is_mgmt(fc) ||
1010 ieee80211_has_protected(fc) ||
1011 ieee80211_has_morefrags(fc) ||
1012 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
1013 (ieee80211_is_data_qos(fc) &&
1014 *ieee80211_get_qos_ctl(hdr) &
1015 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
1016 ret = skb_linearize(skb);
1017 else
1018 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
1019 0 : -ENOMEM;
1020
1021 if (ret) {
1022 kfree_skb(skb);
1023 goto out;
1024 }
1025
1026 /*
1027 * XXX: We cannot touch the page and its virtual memory (hdr) after
1028 * here. It might have already been freed by the above skb change.
1029 */
1030
1031 iwl_update_stats(priv, false, fc, len);
1032 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1033
1034 ieee80211_rx(priv->hw, skb);
1035 out:
1036 priv->alloc_rxb_page--;
1037 rxb->page = NULL;
1038}
1039
1040/* This is necessary only for a number of statistics, see the caller. */
1041static int iwl_is_network_packet(struct iwl_priv *priv,
1042 struct ieee80211_hdr *header)
1043{
1044 /* Filter incoming packets to determine if they are targeted toward
1045 * this network, discarding packets coming from ourselves */
1046 switch (priv->iw_mode) {
1047 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
1048 /* packets to our IBSS update information */
1049 return !compare_ether_addr(header->addr3, priv->bssid);
1050 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
1051 /* packets to our IBSS update information */
1052 return !compare_ether_addr(header->addr2, priv->bssid);
1053 default:
1054 return 1;
1055 }
1056}
1057
1058/* Called for REPLY_RX (legacy ABG frames), or
1059 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1060void iwl_rx_reply_rx(struct iwl_priv *priv,
1061 struct iwl_rx_mem_buffer *rxb)
1062{
1063 struct ieee80211_hdr *header;
1064 struct ieee80211_rx_status rx_status;
1065 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1066 struct iwl_rx_phy_res *phy_res;
1067 __le32 rx_pkt_status;
1068 struct iwl4965_rx_mpdu_res_start *amsdu;
1069 u32 len;
1070 u32 ampdu_status;
1071 u32 rate_n_flags;
1072
1073 /**
1074 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1075 * REPLY_RX: physical layer info is in this buffer
1076 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1077 * command and cached in priv->last_phy_res
1078 *
1079 * Here we set up local variables depending on which command is
1080 * received.
1081 */
1082 if (pkt->hdr.cmd == REPLY_RX) {
1083 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1084 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1085 + phy_res->cfg_phy_cnt);
1086
1087 len = le16_to_cpu(phy_res->byte_count);
1088 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1089 phy_res->cfg_phy_cnt + len);
1090 ampdu_status = le32_to_cpu(rx_pkt_status);
1091 } else {
1092 if (!priv->last_phy_res[0]) {
1093 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1094 return;
1095 }
1096 phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1097 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1098 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1099 len = le16_to_cpu(amsdu->byte_count);
1100 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1101 ampdu_status = iwl_translate_rx_status(priv,
1102 le32_to_cpu(rx_pkt_status));
1103 }
1104
1105 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1106 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1107 phy_res->cfg_phy_cnt);
1108 return;
1109 }
1110
1111 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1112 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1113 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1114 le32_to_cpu(rx_pkt_status));
1115 return;
1116 }
1117
1118 /* This will be used in several places later */
1119 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1120
1121 /* rx_status carries information about the packet to mac80211 */
1122 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1123 rx_status.freq =
1124 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1125 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1126 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1127 rx_status.rate_idx =
1128 iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1129 rx_status.flag = 0;
1130
1131 /* TSF isn't reliable. In order to allow smooth user experience,
1132 * this W/A doesn't propagate it to the mac80211 */
1133 /*rx_status.flag |= RX_FLAG_TSFT;*/
1134
1135 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1136
1137 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1138 rx_status.signal = iwl_calc_rssi(priv, phy_res);
1139
1140 /* Meaningful noise values are available only from beacon statistics,
1141 * which are gathered only when associated, and indicate noise
1142 * only for the associated network channel ...
1143 * Ignore these noise values while scanning (other channels) */
1144 if (iwl_is_associated(priv) &&
1145 !test_bit(STATUS_SCANNING, &priv->status)) {
1146 rx_status.noise = priv->last_rx_noise;
1147 } else {
1148 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1149 }
1150
1151 /* Reset beacon noise level if not associated. */
1152 if (!iwl_is_associated(priv))
1153 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1154
1155#ifdef CONFIG_IWLWIFI_DEBUG
1156 /* Set "1" to report good data frames in groups of 100 */
1157 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1158 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1159#endif
1160 iwl_dbg_log_rx_data_frame(priv, len, header);
1161 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1162 rx_status.signal, rx_status.noise,
1163 (unsigned long long)rx_status.mactime);
1164
1165 /*
1166 * "antenna number"
1167 *
1168 * It seems that the antenna field in the phy flags value
1169 * is actually a bit field. This is undefined by radiotap,
1170 * it wants an actual antenna number but I always get "7"
1171 * for most legacy frames I receive indicating that the
1172 * same frame was received on all three RX chains.
1173 *
1174 * I think this field should be removed in favor of a
1175 * new 802.11n radiotap field "RX chains" that is defined
1176 * as a bitmask.
1177 */
1178 rx_status.antenna =
1179 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1180 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1181
1182 /* set the preamble flag if appropriate */
1183 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1184 rx_status.flag |= RX_FLAG_SHORTPRE;
1185
1186 /* Set up the HT phy flags */
1187 if (rate_n_flags & RATE_MCS_HT_MSK)
1188 rx_status.flag |= RX_FLAG_HT;
1189 if (rate_n_flags & RATE_MCS_HT40_MSK)
1190 rx_status.flag |= RX_FLAG_40MHZ;
1191 if (rate_n_flags & RATE_MCS_SGI_MSK)
1192 rx_status.flag |= RX_FLAG_SHORT_GI;
1193
1194 if (iwl_is_network_packet(priv, header)) {
1195 priv->last_rx_rssi = rx_status.signal;
1196 priv->last_beacon_time = priv->ucode_beacon_time;
1197 priv->last_tsf = le64_to_cpu(phy_res->timestamp);
1198 }
1199
1200 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1201 rxb, &rx_status);
1202}
1203EXPORT_SYMBOL(iwl_rx_reply_rx);
1204
1205/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1206 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1207void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1208 struct iwl_rx_mem_buffer *rxb)
1209{
1210 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1211 priv->last_phy_res[0] = 1;
1212 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1213 sizeof(struct iwl_rx_phy_res));
1214}
1215EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 9ab0e412bf10..107e173112f6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -69,9 +69,8 @@ int iwl_scan_cancel(struct iwl_priv *priv)
69 } 69 }
70 70
71 if (test_bit(STATUS_SCANNING, &priv->status)) { 71 if (test_bit(STATUS_SCANNING, &priv->status)) {
72 if (!test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 72 if (!test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n"); 73 IWL_DEBUG_SCAN(priv, "Queuing scan abort.\n");
74 set_bit(STATUS_SCAN_ABORTING, &priv->status);
75 queue_work(priv->workqueue, &priv->abort_scan); 74 queue_work(priv->workqueue, &priv->abort_scan);
76 75
77 } else 76 } else
@@ -201,9 +200,6 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
201 le32_to_cpu(notif->statistics[0]), 200 le32_to_cpu(notif->statistics[0]),
202 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf); 201 le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
203#endif 202#endif
204
205 if (!priv->is_internal_short_scan)
206 priv->next_scan_jiffies = 0;
207} 203}
208 204
209/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */ 205/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -223,49 +219,24 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
223 /* The HW is no longer scanning */ 219 /* The HW is no longer scanning */
224 clear_bit(STATUS_SCAN_HW, &priv->status); 220 clear_bit(STATUS_SCAN_HW, &priv->status);
225 221
226 IWL_DEBUG_INFO(priv, "Scan pass on %sGHz took %dms\n", 222 IWL_DEBUG_INFO(priv, "Scan on %sGHz took %dms\n",
227 (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) ? 223 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
228 "2.4" : "5.2",
229 jiffies_to_msecs(elapsed_jiffies 224 jiffies_to_msecs(elapsed_jiffies
230 (priv->scan_pass_start, jiffies))); 225 (priv->scan_start, jiffies)));
231 226
232 /* Remove this scanned band from the list of pending 227 /*
233 * bands to scan, band G precedes A in order of scanning 228 * If a request to abort was given, or the scan did not succeed
234 * as seen in iwl_bg_request_scan */
235 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ))
236 priv->scan_bands &= ~BIT(IEEE80211_BAND_2GHZ);
237 else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ))
238 priv->scan_bands &= ~BIT(IEEE80211_BAND_5GHZ);
239
240 /* If a request to abort was given, or the scan did not succeed
241 * then we reset the scan state machine and terminate, 229 * then we reset the scan state machine and terminate,
242 * re-queuing another scan if one has been requested */ 230 * re-queuing another scan if one has been requested
243 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 231 */
232 if (test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status))
244 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n"); 233 IWL_DEBUG_INFO(priv, "Aborted scan completed.\n");
245 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
246 } else {
247 /* If there are more bands on this scan pass reschedule */
248 if (priv->scan_bands)
249 goto reschedule;
250 }
251
252 if (!priv->is_internal_short_scan)
253 priv->next_scan_jiffies = 0;
254 234
255 IWL_DEBUG_INFO(priv, "Setting scan to off\n"); 235 IWL_DEBUG_INFO(priv, "Setting scan to off\n");
256 236
257 clear_bit(STATUS_SCANNING, &priv->status); 237 clear_bit(STATUS_SCANNING, &priv->status);
258 238
259 IWL_DEBUG_INFO(priv, "Scan took %dms\n",
260 jiffies_to_msecs(elapsed_jiffies(priv->scan_start, jiffies)));
261
262 queue_work(priv->workqueue, &priv->scan_completed); 239 queue_work(priv->workqueue, &priv->scan_completed);
263
264 return;
265
266reschedule:
267 priv->scan_pass_start = jiffies;
268 queue_work(priv->workqueue, &priv->request_scan);
269} 240}
270 241
271void iwl_setup_rx_scan_handlers(struct iwl_priv *priv) 242void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
@@ -294,7 +265,8 @@ inline u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
294EXPORT_SYMBOL(iwl_get_active_dwell_time); 265EXPORT_SYMBOL(iwl_get_active_dwell_time);
295 266
296u16 iwl_get_passive_dwell_time(struct iwl_priv *priv, 267u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
297 enum ieee80211_band band) 268 enum ieee80211_band band,
269 struct ieee80211_vif *vif)
298{ 270{
299 u16 passive = (band == IEEE80211_BAND_2GHZ) ? 271 u16 passive = (band == IEEE80211_BAND_2GHZ) ?
300 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 : 272 IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
@@ -304,7 +276,7 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
304 /* If we're associated, we clamp the maximum passive 276 /* If we're associated, we clamp the maximum passive
305 * dwell time to be 98% of the beacon interval (minus 277 * dwell time to be 98% of the beacon interval (minus
306 * 2 * channel tune time) */ 278 * 2 * channel tune time) */
307 passive = priv->beacon_int; 279 passive = vif ? vif->bss_conf.beacon_int : 0;
308 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive) 280 if ((passive > IWL_PASSIVE_DWELL_BASE) || !passive)
309 passive = IWL_PASSIVE_DWELL_BASE; 281 passive = IWL_PASSIVE_DWELL_BASE;
310 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2; 282 passive = (passive * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
@@ -314,150 +286,6 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
314} 286}
315EXPORT_SYMBOL(iwl_get_passive_dwell_time); 287EXPORT_SYMBOL(iwl_get_passive_dwell_time);
316 288
317static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
318 enum ieee80211_band band,
319 struct iwl_scan_channel *scan_ch)
320{
321 const struct ieee80211_supported_band *sband;
322 const struct iwl_channel_info *ch_info;
323 u16 passive_dwell = 0;
324 u16 active_dwell = 0;
325 int i, added = 0;
326 u16 channel = 0;
327
328 sband = iwl_get_hw_mode(priv, band);
329 if (!sband) {
330 IWL_ERR(priv, "invalid band\n");
331 return added;
332 }
333
334 active_dwell = iwl_get_active_dwell_time(priv, band, 0);
335 passive_dwell = iwl_get_passive_dwell_time(priv, band);
336
337 if (passive_dwell <= active_dwell)
338 passive_dwell = active_dwell + 1;
339
340 /* only scan single channel, good enough to reset the RF */
341 /* pick the first valid not in-use channel */
342 if (band == IEEE80211_BAND_5GHZ) {
343 for (i = 14; i < priv->channel_count; i++) {
344 if (priv->channel_info[i].channel !=
345 le16_to_cpu(priv->staging_rxon.channel)) {
346 channel = priv->channel_info[i].channel;
347 ch_info = iwl_get_channel_info(priv,
348 band, channel);
349 if (is_channel_valid(ch_info))
350 break;
351 }
352 }
353 } else {
354 for (i = 0; i < 14; i++) {
355 if (priv->channel_info[i].channel !=
356 le16_to_cpu(priv->staging_rxon.channel)) {
357 channel =
358 priv->channel_info[i].channel;
359 ch_info = iwl_get_channel_info(priv,
360 band, channel);
361 if (is_channel_valid(ch_info))
362 break;
363 }
364 }
365 }
366 if (channel) {
367 scan_ch->channel = cpu_to_le16(channel);
368 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
369 scan_ch->active_dwell = cpu_to_le16(active_dwell);
370 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
371 /* Set txpower levels to defaults */
372 scan_ch->dsp_atten = 110;
373 if (band == IEEE80211_BAND_5GHZ)
374 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
375 else
376 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
377 added++;
378 } else
379 IWL_ERR(priv, "no valid channel found\n");
380 return added;
381}
382
383static int iwl_get_channels_for_scan(struct iwl_priv *priv,
384 enum ieee80211_band band,
385 u8 is_active, u8 n_probes,
386 struct iwl_scan_channel *scan_ch)
387{
388 struct ieee80211_channel *chan;
389 const struct ieee80211_supported_band *sband;
390 const struct iwl_channel_info *ch_info;
391 u16 passive_dwell = 0;
392 u16 active_dwell = 0;
393 int added, i;
394 u16 channel;
395
396 sband = iwl_get_hw_mode(priv, band);
397 if (!sband)
398 return 0;
399
400 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
401 passive_dwell = iwl_get_passive_dwell_time(priv, band);
402
403 if (passive_dwell <= active_dwell)
404 passive_dwell = active_dwell + 1;
405
406 for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
407 chan = priv->scan_request->channels[i];
408
409 if (chan->band != band)
410 continue;
411
412 channel = ieee80211_frequency_to_channel(chan->center_freq);
413 scan_ch->channel = cpu_to_le16(channel);
414
415 ch_info = iwl_get_channel_info(priv, band, channel);
416 if (!is_channel_valid(ch_info)) {
417 IWL_DEBUG_SCAN(priv, "Channel %d is INVALID for this band.\n",
418 channel);
419 continue;
420 }
421
422 if (!is_active || is_channel_passive(ch_info) ||
423 (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
424 scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
425 else
426 scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
427
428 if (n_probes)
429 scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
430
431 scan_ch->active_dwell = cpu_to_le16(active_dwell);
432 scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
433
434 /* Set txpower levels to defaults */
435 scan_ch->dsp_atten = 110;
436
437 /* NOTE: if we were doing 6Mb OFDM for scans we'd use
438 * power level:
439 * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
440 */
441 if (band == IEEE80211_BAND_5GHZ)
442 scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
443 else
444 scan_ch->tx_gain = ((1 << 5) | (5 << 3));
445
446 IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
447 channel, le32_to_cpu(scan_ch->type),
448 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
449 "ACTIVE" : "PASSIVE",
450 (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
451 active_dwell : passive_dwell);
452
453 scan_ch++;
454 added++;
455 }
456
457 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added);
458 return added;
459}
460
461void iwl_init_scan_params(struct iwl_priv *priv) 289void iwl_init_scan_params(struct iwl_priv *priv)
462{ 290{
463 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1; 291 u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
@@ -468,32 +296,36 @@ void iwl_init_scan_params(struct iwl_priv *priv)
468} 296}
469EXPORT_SYMBOL(iwl_init_scan_params); 297EXPORT_SYMBOL(iwl_init_scan_params);
470 298
471static int iwl_scan_initiate(struct iwl_priv *priv) 299static int iwl_scan_initiate(struct iwl_priv *priv, struct ieee80211_vif *vif)
472{ 300{
301 WARN_ON(!mutex_is_locked(&priv->mutex));
302
473 IWL_DEBUG_INFO(priv, "Starting scan...\n"); 303 IWL_DEBUG_INFO(priv, "Starting scan...\n");
474 set_bit(STATUS_SCANNING, &priv->status); 304 set_bit(STATUS_SCANNING, &priv->status);
475 priv->is_internal_short_scan = false; 305 priv->is_internal_short_scan = false;
476 priv->scan_start = jiffies; 306 priv->scan_start = jiffies;
477 priv->scan_pass_start = priv->scan_start;
478 307
479 queue_work(priv->workqueue, &priv->request_scan); 308 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
309 return -EOPNOTSUPP;
310
311 priv->cfg->ops->utils->request_scan(priv, vif);
480 312
481 return 0; 313 return 0;
482} 314}
483 315
484#define IWL_DELAY_NEXT_SCAN (HZ*2)
485
486int iwl_mac_hw_scan(struct ieee80211_hw *hw, 316int iwl_mac_hw_scan(struct ieee80211_hw *hw,
487 struct cfg80211_scan_request *req) 317 struct ieee80211_vif *vif,
318 struct cfg80211_scan_request *req)
488{ 319{
489 unsigned long flags;
490 struct iwl_priv *priv = hw->priv; 320 struct iwl_priv *priv = hw->priv;
491 int ret, i; 321 int ret;
492 322
493 IWL_DEBUG_MAC80211(priv, "enter\n"); 323 IWL_DEBUG_MAC80211(priv, "enter\n");
494 324
325 if (req->n_channels == 0)
326 return -EINVAL;
327
495 mutex_lock(&priv->mutex); 328 mutex_lock(&priv->mutex);
496 spin_lock_irqsave(&priv->lock, flags);
497 329
498 if (!iwl_is_ready_rf(priv)) { 330 if (!iwl_is_ready_rf(priv)) {
499 ret = -EIO; 331 ret = -EIO;
@@ -513,30 +345,15 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
513 goto out_unlock; 345 goto out_unlock;
514 } 346 }
515 347
516 /* We don't schedule scan within next_scan_jiffies period. 348 /* mac80211 will only ask for one band at a time */
517 * Avoid scanning during possible EAPOL exchange, return 349 priv->scan_band = req->channels[0]->band;
518 * success immediately.
519 */
520 if (priv->next_scan_jiffies &&
521 time_after(priv->next_scan_jiffies, jiffies)) {
522 IWL_DEBUG_SCAN(priv, "scan rejected: within next scan period\n");
523 queue_work(priv->workqueue, &priv->scan_completed);
524 ret = 0;
525 goto out_unlock;
526 }
527
528 priv->scan_bands = 0;
529 for (i = 0; i < req->n_channels; i++)
530 priv->scan_bands |= BIT(req->channels[i]->band);
531
532 priv->scan_request = req; 350 priv->scan_request = req;
533 351
534 ret = iwl_scan_initiate(priv); 352 ret = iwl_scan_initiate(priv, vif);
535 353
536 IWL_DEBUG_MAC80211(priv, "leave\n"); 354 IWL_DEBUG_MAC80211(priv, "leave\n");
537 355
538out_unlock: 356out_unlock:
539 spin_unlock_irqrestore(&priv->lock, flags);
540 mutex_unlock(&priv->mutex); 357 mutex_unlock(&priv->mutex);
541 358
542 return ret; 359 return ret;
@@ -547,43 +364,47 @@ EXPORT_SYMBOL(iwl_mac_hw_scan);
547 * internal short scan, this function should only been called while associated. 364 * internal short scan, this function should only been called while associated.
548 * It will reset and tune the radio to prevent possible RF related problem 365 * It will reset and tune the radio to prevent possible RF related problem
549 */ 366 */
550int iwl_internal_short_hw_scan(struct iwl_priv *priv) 367void iwl_internal_short_hw_scan(struct iwl_priv *priv)
551{ 368{
552 int ret = 0; 369 queue_work(priv->workqueue, &priv->start_internal_scan);
370}
371
372void iwl_bg_start_internal_scan(struct work_struct *work)
373{
374 struct iwl_priv *priv =
375 container_of(work, struct iwl_priv, start_internal_scan);
376
377 mutex_lock(&priv->mutex);
553 378
554 if (!iwl_is_ready_rf(priv)) { 379 if (!iwl_is_ready_rf(priv)) {
555 ret = -EIO;
556 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); 380 IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
557 goto out; 381 goto unlock;
558 } 382 }
383
559 if (test_bit(STATUS_SCANNING, &priv->status)) { 384 if (test_bit(STATUS_SCANNING, &priv->status)) {
560 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); 385 IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
561 ret = -EAGAIN; 386 goto unlock;
562 goto out;
563 } 387 }
388
564 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) { 389 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
565 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n"); 390 IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
566 ret = -EAGAIN; 391 goto unlock;
567 goto out;
568 } 392 }
569 393
570 priv->scan_bands = 0; 394 priv->scan_band = priv->band;
571 if (priv->band == IEEE80211_BAND_5GHZ)
572 priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
573 else
574 priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
575 395
576 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n"); 396 IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
577 set_bit(STATUS_SCANNING, &priv->status); 397 set_bit(STATUS_SCANNING, &priv->status);
578 priv->is_internal_short_scan = true; 398 priv->is_internal_short_scan = true;
579 queue_work(priv->workqueue, &priv->request_scan);
580 399
581out: 400 if (WARN_ON(!priv->cfg->ops->utils->request_scan))
582 return ret; 401 goto unlock;
583}
584EXPORT_SYMBOL(iwl_internal_short_hw_scan);
585 402
586#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 403 priv->cfg->ops->utils->request_scan(priv, NULL);
404 unlock:
405 mutex_unlock(&priv->mutex);
406}
407EXPORT_SYMBOL(iwl_bg_start_internal_scan);
587 408
588void iwl_bg_scan_check(struct work_struct *data) 409void iwl_bg_scan_check(struct work_struct *data)
589{ 410{
@@ -646,276 +467,15 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
646 if (WARN_ON(left < ie_len)) 467 if (WARN_ON(left < ie_len))
647 return len; 468 return len;
648 469
649 if (ies) 470 if (ies && ie_len) {
650 memcpy(pos, ies, ie_len); 471 memcpy(pos, ies, ie_len);
651 len += ie_len; 472 len += ie_len;
652 left -= ie_len; 473 }
653 474
654 return (u16)len; 475 return (u16)len;
655} 476}
656EXPORT_SYMBOL(iwl_fill_probe_req); 477EXPORT_SYMBOL(iwl_fill_probe_req);
657 478
658static void iwl_bg_request_scan(struct work_struct *data)
659{
660 struct iwl_priv *priv =
661 container_of(data, struct iwl_priv, request_scan);
662 struct iwl_host_cmd cmd = {
663 .id = REPLY_SCAN_CMD,
664 .len = sizeof(struct iwl_scan_cmd),
665 .flags = CMD_SIZE_HUGE,
666 };
667 struct iwl_scan_cmd *scan;
668 struct ieee80211_conf *conf = NULL;
669 int ret = 0;
670 u32 rate_flags = 0;
671 u16 cmd_len;
672 u16 rx_chain = 0;
673 enum ieee80211_band band;
674 u8 n_probes = 0;
675 u8 rx_ant = priv->hw_params.valid_rx_ant;
676 u8 rate;
677 bool is_active = false;
678 int chan_mod;
679 u8 active_chains;
680
681 conf = ieee80211_get_hw_conf(priv->hw);
682
683 mutex_lock(&priv->mutex);
684
685 cancel_delayed_work(&priv->scan_check);
686
687 if (!iwl_is_ready(priv)) {
688 IWL_WARN(priv, "request scan called when driver not ready.\n");
689 goto done;
690 }
691
692 /* Make sure the scan wasn't canceled before this queued work
693 * was given the chance to run... */
694 if (!test_bit(STATUS_SCANNING, &priv->status))
695 goto done;
696
697 /* This should never be called or scheduled if there is currently
698 * a scan active in the hardware. */
699 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
700 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
701 "Ignoring second request.\n");
702 ret = -EIO;
703 goto done;
704 }
705
706 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
707 IWL_DEBUG_SCAN(priv, "Aborting scan due to device shutdown\n");
708 goto done;
709 }
710
711 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
712 IWL_DEBUG_HC(priv, "Scan request while abort pending. Queuing.\n");
713 goto done;
714 }
715
716 if (iwl_is_rfkill(priv)) {
717 IWL_DEBUG_HC(priv, "Aborting scan due to RF Kill activation\n");
718 goto done;
719 }
720
721 if (!test_bit(STATUS_READY, &priv->status)) {
722 IWL_DEBUG_HC(priv, "Scan request while uninitialized. Queuing.\n");
723 goto done;
724 }
725
726 if (!priv->scan_bands) {
727 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n");
728 goto done;
729 }
730
731 if (!priv->scan) {
732 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
733 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
734 if (!priv->scan) {
735 ret = -ENOMEM;
736 goto done;
737 }
738 }
739 scan = priv->scan;
740 memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
741
742 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
743 scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
744
745 if (iwl_is_associated(priv)) {
746 u16 interval = 0;
747 u32 extra;
748 u32 suspend_time = 100;
749 u32 scan_suspend_time = 100;
750 unsigned long flags;
751
752 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
753 spin_lock_irqsave(&priv->lock, flags);
754 interval = priv->beacon_int;
755 spin_unlock_irqrestore(&priv->lock, flags);
756
757 scan->suspend_time = 0;
758 scan->max_out_time = cpu_to_le32(200 * 1024);
759 if (!interval)
760 interval = suspend_time;
761
762 extra = (suspend_time / interval) << 22;
763 scan_suspend_time = (extra |
764 ((suspend_time % interval) * 1024));
765 scan->suspend_time = cpu_to_le32(scan_suspend_time);
766 IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
767 scan_suspend_time, interval);
768 }
769
770 if (priv->is_internal_short_scan) {
771 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
772 } else if (priv->scan_request->n_ssids) {
773 int i, p = 0;
774 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
775 for (i = 0; i < priv->scan_request->n_ssids; i++) {
776 /* always does wildcard anyway */
777 if (!priv->scan_request->ssids[i].ssid_len)
778 continue;
779 scan->direct_scan[p].id = WLAN_EID_SSID;
780 scan->direct_scan[p].len =
781 priv->scan_request->ssids[i].ssid_len;
782 memcpy(scan->direct_scan[p].ssid,
783 priv->scan_request->ssids[i].ssid,
784 priv->scan_request->ssids[i].ssid_len);
785 n_probes++;
786 p++;
787 }
788 is_active = true;
789 } else
790 IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
791
792 scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
793 scan->tx_cmd.sta_id = priv->hw_params.bcast_sta_id;
794 scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
795
796
797 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) {
798 band = IEEE80211_BAND_2GHZ;
799 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
800 chan_mod = le32_to_cpu(priv->active_rxon.flags & RXON_FLG_CHANNEL_MODE_MSK)
801 >> RXON_FLG_CHANNEL_MODE_POS;
802 if (chan_mod == CHANNEL_MODE_PURE_40) {
803 rate = IWL_RATE_6M_PLCP;
804 } else {
805 rate = IWL_RATE_1M_PLCP;
806 rate_flags = RATE_MCS_CCK_MSK;
807 }
808 scan->good_CRC_th = 0;
809 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) {
810 band = IEEE80211_BAND_5GHZ;
811 rate = IWL_RATE_6M_PLCP;
812 /*
813 * If active scaning is requested but a certain channel
814 * is marked passive, we can do active scanning if we
815 * detect transmissions.
816 */
817 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
818
819 /* Force use of chains B and C (0x6) for scan Rx for 4965
820 * Avoid A (0x1) because of its off-channel reception on A-band.
821 */
822 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
823 rx_ant = ANT_BC;
824 } else {
825 IWL_WARN(priv, "Invalid scan band count\n");
826 goto done;
827 }
828
829 priv->scan_tx_ant[band] =
830 iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band]);
831 rate_flags |= iwl_ant_idx_to_flags(priv->scan_tx_ant[band]);
832 scan->tx_cmd.rate_n_flags = iwl_hw_set_rate_n_flags(rate, rate_flags);
833
834 /* In power save mode use one chain, otherwise use all chains */
835 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
836 /* rx_ant has been set to all valid chains previously */
837 active_chains = rx_ant &
838 ((u8)(priv->chain_noise_data.active_chains));
839 if (!active_chains)
840 active_chains = rx_ant;
841
842 IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
843 priv->chain_noise_data.active_chains);
844
845 rx_ant = first_antenna(active_chains);
846 }
847 /* MIMO is not used here, but value is required */
848 rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
849 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
850 rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
851 rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
852 scan->rx_chain = cpu_to_le16(rx_chain);
853 if (!priv->is_internal_short_scan) {
854 cmd_len = iwl_fill_probe_req(priv,
855 (struct ieee80211_mgmt *)scan->data,
856 priv->scan_request->ie,
857 priv->scan_request->ie_len,
858 IWL_MAX_SCAN_SIZE - sizeof(*scan));
859 } else {
860 cmd_len = iwl_fill_probe_req(priv,
861 (struct ieee80211_mgmt *)scan->data,
862 NULL, 0,
863 IWL_MAX_SCAN_SIZE - sizeof(*scan));
864
865 }
866 scan->tx_cmd.len = cpu_to_le16(cmd_len);
867 if (iwl_is_monitor_mode(priv))
868 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
869
870 scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
871 RXON_FILTER_BCON_AWARE_MSK);
872
873 if (priv->is_internal_short_scan) {
874 scan->channel_count =
875 iwl_get_single_channel_for_scan(priv, band,
876 (void *)&scan->data[le16_to_cpu(
877 scan->tx_cmd.len)]);
878 } else {
879 scan->channel_count =
880 iwl_get_channels_for_scan(priv, band,
881 is_active, n_probes,
882 (void *)&scan->data[le16_to_cpu(
883 scan->tx_cmd.len)]);
884 }
885 if (scan->channel_count == 0) {
886 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
887 goto done;
888 }
889
890 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
891 scan->channel_count * sizeof(struct iwl_scan_channel);
892 cmd.data = scan;
893 scan->len = cpu_to_le16(cmd.len);
894
895 set_bit(STATUS_SCAN_HW, &priv->status);
896 ret = iwl_send_cmd_sync(priv, &cmd);
897 if (ret)
898 goto done;
899
900 queue_delayed_work(priv->workqueue, &priv->scan_check,
901 IWL_SCAN_CHECK_WATCHDOG);
902
903 mutex_unlock(&priv->mutex);
904 return;
905
906 done:
907 /* Cannot perform scan. Make sure we clear scanning
908 * bits from status so next scan request can be performed.
909 * If we don't clear scanning status bit here all next scan
910 * will fail
911 */
912 clear_bit(STATUS_SCAN_HW, &priv->status);
913 clear_bit(STATUS_SCANNING, &priv->status);
914 /* inform mac80211 scan aborted */
915 queue_work(priv->workqueue, &priv->scan_completed);
916 mutex_unlock(&priv->mutex);
917}
918
919void iwl_bg_abort_scan(struct work_struct *work) 479void iwl_bg_abort_scan(struct work_struct *work)
920{ 480{
921 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan); 481 struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
@@ -963,8 +523,8 @@ EXPORT_SYMBOL(iwl_bg_scan_completed);
963void iwl_setup_scan_deferred_work(struct iwl_priv *priv) 523void iwl_setup_scan_deferred_work(struct iwl_priv *priv)
964{ 524{
965 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 525 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
966 INIT_WORK(&priv->request_scan, iwl_bg_request_scan);
967 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 526 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
527 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
968 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 528 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
969} 529}
970EXPORT_SYMBOL(iwl_setup_scan_deferred_work); 530EXPORT_SYMBOL(iwl_setup_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4a6686fa6b36..85ed235ac901 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -29,57 +29,12 @@
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h>
32 33
33#include "iwl-dev.h" 34#include "iwl-dev.h"
34#include "iwl-core.h" 35#include "iwl-core.h"
35#include "iwl-sta.h" 36#include "iwl-sta.h"
36 37
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39
40u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
41{
42 int i;
43 int start = 0;
44 int ret = IWL_INVALID_STATION;
45 unsigned long flags;
46
47 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) ||
48 (priv->iw_mode == NL80211_IFTYPE_AP))
49 start = IWL_STA_ID;
50
51 if (is_broadcast_ether_addr(addr))
52 return priv->hw_params.bcast_sta_id;
53
54 spin_lock_irqsave(&priv->sta_lock, flags);
55 for (i = start; i < priv->hw_params.max_stations; i++)
56 if (priv->stations[i].used &&
57 (!compare_ether_addr(priv->stations[i].sta.sta.addr,
58 addr))) {
59 ret = i;
60 goto out;
61 }
62
63 IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
64 addr, priv->num_stations);
65
66 out:
67 spin_unlock_irqrestore(&priv->sta_lock, flags);
68 return ret;
69}
70EXPORT_SYMBOL(iwl_find_station);
71
72int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
73{
74 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
75 return IWL_AP_ID;
76 } else {
77 u8 *da = ieee80211_get_DA(hdr);
78 return iwl_find_station(priv, da);
79 }
80}
81EXPORT_SYMBOL(iwl_get_ra_sta_id);
82
83/* priv->sta_lock must be held */ 38/* priv->sta_lock must be held */
84static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id) 39static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
85{ 40{
@@ -132,7 +87,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
132 sta_id); 87 sta_id);
133 break; 88 break;
134 case ADD_STA_MODIFY_NON_EXIST_STA: 89 case ADD_STA_MODIFY_NON_EXIST_STA:
135 IWL_ERR(priv, "Attempting to modify non-existing station %d \n", 90 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
136 sta_id); 91 sta_id);
137 break; 92 break;
138 default: 93 default:
@@ -158,13 +113,6 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
158 priv->stations[sta_id].sta.mode == 113 priv->stations[sta_id].sta.mode ==
159 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 114 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
160 addsta->sta.addr); 115 addsta->sta.addr);
161
162 /*
163 * Determine if we wanted to modify or add a station,
164 * if adding a station succeeded we have some more initialization
165 * to do when using station notification. TODO
166 */
167
168 spin_unlock_irqrestore(&priv->sta_lock, flags); 116 spin_unlock_irqrestore(&priv->sta_lock, flags);
169} 117}
170 118
@@ -190,6 +138,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
190 .flags = flags, 138 .flags = flags,
191 .data = data, 139 .data = data,
192 }; 140 };
141 u8 sta_id __maybe_unused = sta->sta.sta_id;
142
143 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
144 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
193 145
194 if (flags & CMD_ASYNC) 146 if (flags & CMD_ASYNC)
195 cmd.callback = iwl_add_sta_callback; 147 cmd.callback = iwl_add_sta_callback;
@@ -263,18 +215,19 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
263} 215}
264 216
265/** 217/**
266 * iwl_add_station - Add station to tables in driver and device 218 * iwl_prep_station - Prepare station information for addition
219 *
220 * should be called with sta_lock held
267 */ 221 */
268u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, 222static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
269 struct ieee80211_sta_ht_cap *ht_info) 223 bool is_ap,
224 struct ieee80211_sta_ht_cap *ht_info)
270{ 225{
271 struct iwl_station_entry *station; 226 struct iwl_station_entry *station;
272 unsigned long flags_spin;
273 int i; 227 int i;
274 int sta_id = IWL_INVALID_STATION; 228 u8 sta_id = IWL_INVALID_STATION;
275 u16 rate; 229 u16 rate;
276 230
277 spin_lock_irqsave(&priv->sta_lock, flags_spin);
278 if (is_ap) 231 if (is_ap)
279 sta_id = IWL_AP_ID; 232 sta_id = IWL_AP_ID;
280 else if (is_broadcast_ether_addr(addr)) 233 else if (is_broadcast_ether_addr(addr))
@@ -292,20 +245,32 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
292 sta_id = i; 245 sta_id = i;
293 } 246 }
294 247
295 /* These two conditions have the same outcome, but keep them separate 248 /*
296 since they have different meanings */ 249 * These two conditions have the same outcome, but keep them
297 if (unlikely(sta_id == IWL_INVALID_STATION)) { 250 * separate
298 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 251 */
252 if (unlikely(sta_id == IWL_INVALID_STATION))
253 return sta_id;
254
255 /*
256 * uCode is not able to deal with multiple requests to add a
257 * station. Keep track if one is in progress so that we do not send
258 * another.
259 */
260 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
261 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
262 sta_id);
299 return sta_id; 263 return sta_id;
300 } 264 }
301 265
302 if (priv->stations[sta_id].used && 266 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
267 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
303 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 268 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
304 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 269 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
270 sta_id, addr);
305 return sta_id; 271 return sta_id;
306 } 272 }
307 273
308
309 station = &priv->stations[sta_id]; 274 station = &priv->stations[sta_id];
310 station->used = IWL_STA_DRIVER_ACTIVE; 275 station->used = IWL_STA_DRIVER_ACTIVE;
311 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", 276 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
@@ -319,10 +284,12 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
319 station->sta.sta.sta_id = sta_id; 284 station->sta.sta.sta_id = sta_id;
320 station->sta.station_flags = 0; 285 station->sta.station_flags = 0;
321 286
322 /* BCAST station and IBSS stations do not work in HT mode */ 287 /*
323 if (sta_id != priv->hw_params.bcast_sta_id && 288 * OK to call unconditionally, since local stations (IBSS BSSID
324 priv->iw_mode != NL80211_IFTYPE_ADHOC) 289 * STA and broadcast STA) pass in a NULL ht_info, and mac80211
325 iwl_set_ht_add_station(priv, sta_id, ht_info); 290 * doesn't allow HT IBSS.
291 */
292 iwl_set_ht_add_station(priv, sta_id, ht_info);
326 293
327 /* 3945 only */ 294 /* 3945 only */
328 rate = (priv->band == IEEE80211_BAND_5GHZ) ? 295 rate = (priv->band == IEEE80211_BAND_5GHZ) ?
@@ -330,86 +297,221 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
330 /* Turn on both antennas for the station... */ 297 /* Turn on both antennas for the station... */
331 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 298 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
332 299
300 return sta_id;
301
302}
303
304#define STA_WAIT_TIMEOUT (HZ/2)
305
306/**
307 * iwl_add_station_common -
308 */
309int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
310 bool is_ap,
311 struct ieee80211_sta_ht_cap *ht_info,
312 u8 *sta_id_r)
313{
314 struct iwl_station_entry *station;
315 unsigned long flags_spin;
316 int ret = 0;
317 u8 sta_id;
318
319 *sta_id_r = 0;
320 spin_lock_irqsave(&priv->sta_lock, flags_spin);
321 sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
322 if (sta_id == IWL_INVALID_STATION) {
323 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
324 addr);
325 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
326 return -EINVAL;
327 }
328
329 /*
330 * uCode is not able to deal with multiple requests to add a
331 * station. Keep track if one is in progress so that we do not send
332 * another.
333 */
334 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
335 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
336 sta_id);
337 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
338 return -EEXIST;
339 }
340
341 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
342 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
343 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
344 sta_id, addr);
345 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
346 return -EEXIST;
347 }
348
349 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
350 station = &priv->stations[sta_id];
333 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 351 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
334 352
335 /* Add station to device's station table */ 353 /* Add station to device's station table */
336 iwl_send_add_sta(priv, &station->sta, flags); 354 ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
337 return sta_id; 355 if (ret) {
356 IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
357 spin_lock_irqsave(&priv->sta_lock, flags_spin);
358 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
359 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
360 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
361 }
362 *sta_id_r = sta_id;
363 return ret;
364}
365EXPORT_SYMBOL(iwl_add_station_common);
366
367static struct iwl_link_quality_cmd *iwl_sta_alloc_lq(struct iwl_priv *priv,
368 u8 sta_id)
369{
370 int i, r;
371 struct iwl_link_quality_cmd *link_cmd;
372 u32 rate_flags;
373
374 link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
375 if (!link_cmd) {
376 IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
377 return NULL;
378 }
379 /* Set up the rate scaling to start at selected rate, fall back
380 * all the way down to 1M in IEEE order, and then spin on 1M */
381 if (priv->band == IEEE80211_BAND_5GHZ)
382 r = IWL_RATE_6M_INDEX;
383 else
384 r = IWL_RATE_1M_INDEX;
338 385
386 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
387 rate_flags = 0;
388 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
389 rate_flags |= RATE_MCS_CCK_MSK;
390
391 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
392 RATE_MCS_ANT_POS;
393
394 link_cmd->rs_table[i].rate_n_flags =
395 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
396 r = iwl_get_prev_ieee_rate(r);
397 }
398
399 link_cmd->general_params.single_stream_ant_msk =
400 first_antenna(priv->hw_params.valid_tx_ant);
401
402 link_cmd->general_params.dual_stream_ant_msk =
403 priv->hw_params.valid_tx_ant &
404 ~first_antenna(priv->hw_params.valid_tx_ant);
405 if (!link_cmd->general_params.dual_stream_ant_msk) {
406 link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
407 } else if (num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
408 link_cmd->general_params.dual_stream_ant_msk =
409 priv->hw_params.valid_tx_ant;
410 }
411
412 link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
413 link_cmd->agg_params.agg_time_limit =
414 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
415
416 link_cmd->sta_id = sta_id;
417
418 return link_cmd;
339} 419}
340EXPORT_SYMBOL(iwl_add_station);
341 420
342static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr) 421/*
422 * iwl_add_bssid_station - Add the special IBSS BSSID station
423 *
424 * Function sleeps.
425 */
426int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
427 u8 *sta_id_r)
343{ 428{
429 int ret;
430 u8 sta_id;
431 struct iwl_link_quality_cmd *link_cmd;
344 unsigned long flags; 432 unsigned long flags;
345 u8 sta_id = iwl_find_station(priv, addr);
346 433
347 BUG_ON(sta_id == IWL_INVALID_STATION); 434 if (*sta_id_r)
435 *sta_id_r = IWL_INVALID_STATION;
348 436
349 IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr); 437 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
438 if (ret) {
439 IWL_ERR(priv, "Unable to add station %pM\n", addr);
440 return ret;
441 }
442
443 if (sta_id_r)
444 *sta_id_r = sta_id;
350 445
351 spin_lock_irqsave(&priv->sta_lock, flags); 446 spin_lock_irqsave(&priv->sta_lock, flags);
447 priv->stations[sta_id].used |= IWL_STA_LOCAL;
448 spin_unlock_irqrestore(&priv->sta_lock, flags);
352 449
353 /* Ucode must be active and driver must be non active */ 450 if (init_rs) {
354 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE) 451 /* Set up default rate scaling table in device's station table */
355 IWL_ERR(priv, "removed non active STA %d\n", sta_id); 452 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
453 if (!link_cmd) {
454 IWL_ERR(priv, "Unable to initialize rate scaling for station %pM.\n",
455 addr);
456 return -ENOMEM;
457 }
356 458
357 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 459 ret = iwl_send_lq_cmd(priv, link_cmd, CMD_SYNC, true);
460 if (ret)
461 IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
358 462
359 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); 463 spin_lock_irqsave(&priv->sta_lock, flags);
360 spin_unlock_irqrestore(&priv->sta_lock, flags); 464 priv->stations[sta_id].lq = link_cmd;
465 spin_unlock_irqrestore(&priv->sta_lock, flags);
466 }
467
468 return 0;
361} 469}
470EXPORT_SYMBOL(iwl_add_bssid_station);
362 471
363static void iwl_remove_sta_callback(struct iwl_priv *priv, 472/**
364 struct iwl_device_cmd *cmd, 473 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
365 struct iwl_rx_packet *pkt) 474 *
475 * priv->sta_lock must be held
476 */
477static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
366{ 478{
367 struct iwl_rem_sta_cmd *rm_sta = 479 /* Ucode must be active and driver must be non active */
368 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 480 if ((priv->stations[sta_id].used &
369 const u8 *addr = rm_sta->addr; 481 (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) != IWL_STA_UCODE_ACTIVE)
482 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
370 483
371 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 484 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
372 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
373 pkt->hdr.flags);
374 return;
375 }
376 485
377 switch (pkt->u.rem_sta.status) { 486 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
378 case REM_STA_SUCCESS_MSK: 487 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
379 iwl_sta_ucode_deactivate(priv, addr);
380 break;
381 default:
382 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
383 break;
384 }
385} 488}
386 489
387static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, 490static int iwl_send_remove_station(struct iwl_priv *priv,
388 u8 flags) 491 struct iwl_station_entry *station)
389{ 492{
390 struct iwl_rx_packet *pkt; 493 struct iwl_rx_packet *pkt;
391 int ret; 494 int ret;
392 495
496 unsigned long flags_spin;
393 struct iwl_rem_sta_cmd rm_sta_cmd; 497 struct iwl_rem_sta_cmd rm_sta_cmd;
394 498
395 struct iwl_host_cmd cmd = { 499 struct iwl_host_cmd cmd = {
396 .id = REPLY_REMOVE_STA, 500 .id = REPLY_REMOVE_STA,
397 .len = sizeof(struct iwl_rem_sta_cmd), 501 .len = sizeof(struct iwl_rem_sta_cmd),
398 .flags = flags, 502 .flags = CMD_SYNC,
399 .data = &rm_sta_cmd, 503 .data = &rm_sta_cmd,
400 }; 504 };
401 505
402 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 506 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
403 rm_sta_cmd.num_sta = 1; 507 rm_sta_cmd.num_sta = 1;
404 memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN); 508 memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
509
510 cmd.flags |= CMD_WANT_SKB;
405 511
406 if (flags & CMD_ASYNC)
407 cmd.callback = iwl_remove_sta_callback;
408 else
409 cmd.flags |= CMD_WANT_SKB;
410 ret = iwl_send_cmd(priv, &cmd); 512 ret = iwl_send_cmd(priv, &cmd);
411 513
412 if (ret || (flags & CMD_ASYNC)) 514 if (ret)
413 return ret; 515 return ret;
414 516
415 pkt = (struct iwl_rx_packet *)cmd.reply_page; 517 pkt = (struct iwl_rx_packet *)cmd.reply_page;
@@ -422,7 +524,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
422 if (!ret) { 524 if (!ret) {
423 switch (pkt->u.rem_sta.status) { 525 switch (pkt->u.rem_sta.status) {
424 case REM_STA_SUCCESS_MSK: 526 case REM_STA_SUCCESS_MSK:
425 iwl_sta_ucode_deactivate(priv, addr); 527 spin_lock_irqsave(&priv->sta_lock, flags_spin);
528 iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
529 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
426 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 530 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
427 break; 531 break;
428 default: 532 default:
@@ -439,45 +543,48 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
439/** 543/**
440 * iwl_remove_station - Remove driver's knowledge of station. 544 * iwl_remove_station - Remove driver's knowledge of station.
441 */ 545 */
442int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap) 546int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
547 const u8 *addr)
443{ 548{
444 int sta_id = IWL_INVALID_STATION; 549 struct iwl_station_entry *station;
445 int i, ret = -EINVAL;
446 unsigned long flags; 550 unsigned long flags;
447 551
448 spin_lock_irqsave(&priv->sta_lock, flags); 552 if (!iwl_is_ready(priv)) {
553 IWL_DEBUG_INFO(priv,
554 "Unable to remove station %pM, device not ready.\n",
555 addr);
556 /*
557 * It is typical for stations to be removed when we are
558 * going down. Return success since device will be down
559 * soon anyway
560 */
561 return 0;
562 }
449 563
450 if (is_ap) 564 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
451 sta_id = IWL_AP_ID; 565 sta_id, addr);
452 else if (is_broadcast_ether_addr(addr))
453 sta_id = priv->hw_params.bcast_sta_id;
454 else
455 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
456 if (priv->stations[i].used &&
457 !compare_ether_addr(priv->stations[i].sta.sta.addr,
458 addr)) {
459 sta_id = i;
460 break;
461 }
462 566
463 if (unlikely(sta_id == IWL_INVALID_STATION)) 567 if (WARN_ON(sta_id == IWL_INVALID_STATION))
464 goto out; 568 return -EINVAL;
465 569
466 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", 570 spin_lock_irqsave(&priv->sta_lock, flags);
467 sta_id, addr);
468 571
469 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 572 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
470 IWL_ERR(priv, "Removing %pM but non DRIVER active\n", 573 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
471 addr); 574 addr);
472 goto out; 575 goto out_err;
473 } 576 }
474 577
475 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 578 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
476 IWL_ERR(priv, "Removing %pM but non UCODE active\n", 579 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
477 addr); 580 addr);
478 goto out; 581 goto out_err;
479 } 582 }
480 583
584 if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
585 kfree(priv->stations[sta_id].lq);
586 priv->stations[sta_id].lq = NULL;
587 }
481 588
482 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE; 589 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
483 590
@@ -485,47 +592,112 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
485 592
486 BUG_ON(priv->num_stations < 0); 593 BUG_ON(priv->num_stations < 0);
487 594
595 station = &priv->stations[sta_id];
488 spin_unlock_irqrestore(&priv->sta_lock, flags); 596 spin_unlock_irqrestore(&priv->sta_lock, flags);
489 597
490 ret = iwl_send_remove_station(priv, addr, CMD_ASYNC); 598 return iwl_send_remove_station(priv, station);
491 return ret; 599out_err:
492out:
493 spin_unlock_irqrestore(&priv->sta_lock, flags); 600 spin_unlock_irqrestore(&priv->sta_lock, flags);
494 return ret; 601 return -EINVAL;
495} 602}
603EXPORT_SYMBOL_GPL(iwl_remove_station);
496 604
497/** 605/**
498 * iwl_clear_stations_table - Clear the driver's station table 606 * iwl_clear_ucode_stations - clear ucode station table bits
499 * 607 *
500 * NOTE: This does not clear or otherwise alter the device's station table. 608 * This function clears all the bits in the driver indicating
609 * which stations are active in the ucode. Call when something
610 * other than explicit station management would cause this in
611 * the ucode, e.g. unassociated RXON.
501 */ 612 */
502void iwl_clear_stations_table(struct iwl_priv *priv) 613void iwl_clear_ucode_stations(struct iwl_priv *priv)
503{ 614{
504 unsigned long flags;
505 int i; 615 int i;
616 unsigned long flags_spin;
617 bool cleared = false;
506 618
507 spin_lock_irqsave(&priv->sta_lock, flags); 619 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
508 620
509 if (iwl_is_alive(priv) && 621 spin_lock_irqsave(&priv->sta_lock, flags_spin);
510 !test_bit(STATUS_EXIT_PENDING, &priv->status) && 622 for (i = 0; i < priv->hw_params.max_stations; i++) {
511 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL)) 623 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
512 IWL_ERR(priv, "Couldn't clear the station table\n"); 624 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
625 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
626 cleared = true;
627 }
628 }
629 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
630
631 if (!cleared)
632 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
633}
634EXPORT_SYMBOL(iwl_clear_ucode_stations);
635
636/**
637 * iwl_restore_stations() - Restore driver known stations to device
638 *
639 * All stations considered active by driver, but not present in ucode, is
640 * restored.
641 *
642 * Function sleeps.
643 */
644void iwl_restore_stations(struct iwl_priv *priv)
645{
646 struct iwl_station_entry *station;
647 unsigned long flags_spin;
648 int i;
649 bool found = false;
650 int ret;
513 651
514 priv->num_stations = 0; 652 if (!iwl_is_ready(priv)) {
515 memset(priv->stations, 0, sizeof(priv->stations)); 653 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
654 return;
655 }
516 656
517 /* clean ucode key table bit map */ 657 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
518 priv->ucode_key_table = 0; 658 spin_lock_irqsave(&priv->sta_lock, flags_spin);
659 for (i = 0; i < priv->hw_params.max_stations; i++) {
660 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
661 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
662 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
663 priv->stations[i].sta.sta.addr);
664 priv->stations[i].sta.mode = 0;
665 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
666 found = true;
667 }
668 }
519 669
520 /* keep track of static keys */ 670 for (i = 0; i < priv->hw_params.max_stations; i++) {
521 for (i = 0; i < WEP_KEYS_MAX ; i++) { 671 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
522 if (priv->wep_keys[i].key_size) 672 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
523 set_bit(i, &priv->ucode_key_table); 673 station = &priv->stations[i];
674 ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
675 if (ret) {
676 IWL_ERR(priv, "Adding station %pM failed.\n",
677 station->sta.sta.addr);
678 spin_lock_irqsave(&priv->sta_lock, flags_spin);
679 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
680 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
681 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
682 }
683 /*
684 * Rate scaling has already been initialized, send
685 * current LQ command
686 */
687 if (station->lq)
688 iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
689 spin_lock_irqsave(&priv->sta_lock, flags_spin);
690 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
691 }
524 } 692 }
525 693
526 spin_unlock_irqrestore(&priv->sta_lock, flags); 694 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
695 if (!found)
696 IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
697 else
698 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
527} 699}
528EXPORT_SYMBOL(iwl_clear_stations_table); 700EXPORT_SYMBOL(iwl_restore_stations);
529 701
530int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 702int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
531{ 703{
@@ -539,7 +711,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
539} 711}
540EXPORT_SYMBOL(iwl_get_free_ucode_key_index); 712EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
541 713
542int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 714static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
543{ 715{
544 int i, not_empty = 0; 716 int i, not_empty = 0;
545 u8 buff[sizeof(struct iwl_wep_cmd) + 717 u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -549,9 +721,11 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
549 struct iwl_host_cmd cmd = { 721 struct iwl_host_cmd cmd = {
550 .id = REPLY_WEPKEY, 722 .id = REPLY_WEPKEY,
551 .data = wep_cmd, 723 .data = wep_cmd,
552 .flags = CMD_ASYNC, 724 .flags = CMD_SYNC,
553 }; 725 };
554 726
727 might_sleep();
728
555 memset(wep_cmd, 0, cmd_size + 729 memset(wep_cmd, 0, cmd_size +
556 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); 730 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
557 731
@@ -581,33 +755,34 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
581 else 755 else
582 return 0; 756 return 0;
583} 757}
584EXPORT_SYMBOL(iwl_send_static_wepkey_cmd); 758
759int iwl_restore_default_wep_keys(struct iwl_priv *priv)
760{
761 WARN_ON(!mutex_is_locked(&priv->mutex));
762
763 return iwl_send_static_wepkey_cmd(priv, 0);
764}
765EXPORT_SYMBOL(iwl_restore_default_wep_keys);
585 766
586int iwl_remove_default_wep_key(struct iwl_priv *priv, 767int iwl_remove_default_wep_key(struct iwl_priv *priv,
587 struct ieee80211_key_conf *keyconf) 768 struct ieee80211_key_conf *keyconf)
588{ 769{
589 int ret; 770 int ret;
590 unsigned long flags;
591 771
592 spin_lock_irqsave(&priv->sta_lock, flags); 772 WARN_ON(!mutex_is_locked(&priv->mutex));
773
593 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 774 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
594 keyconf->keyidx); 775 keyconf->keyidx);
595 776
596 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
597 IWL_ERR(priv, "index %d not used in uCode key table.\n",
598 keyconf->keyidx);
599
600 priv->default_wep_key--;
601 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 777 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
602 if (iwl_is_rfkill(priv)) { 778 if (iwl_is_rfkill(priv)) {
603 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 779 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
604 spin_unlock_irqrestore(&priv->sta_lock, flags); 780 /* but keys in device are clear anyway so return success */
605 return 0; 781 return 0;
606 } 782 }
607 ret = iwl_send_static_wepkey_cmd(priv, 1); 783 ret = iwl_send_static_wepkey_cmd(priv, 1);
608 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 784 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
609 keyconf->keyidx, ret); 785 keyconf->keyidx, ret);
610 spin_unlock_irqrestore(&priv->sta_lock, flags);
611 786
612 return ret; 787 return ret;
613} 788}
@@ -617,7 +792,8 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
617 struct ieee80211_key_conf *keyconf) 792 struct ieee80211_key_conf *keyconf)
618{ 793{
619 int ret; 794 int ret;
620 unsigned long flags; 795
796 WARN_ON(!mutex_is_locked(&priv->mutex));
621 797
622 if (keyconf->keylen != WEP_KEY_LEN_128 && 798 if (keyconf->keylen != WEP_KEY_LEN_128 &&
623 keyconf->keylen != WEP_KEY_LEN_64) { 799 keyconf->keylen != WEP_KEY_LEN_64) {
@@ -629,13 +805,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
629 keyconf->hw_key_idx = HW_KEY_DEFAULT; 805 keyconf->hw_key_idx = HW_KEY_DEFAULT;
630 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 806 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
631 807
632 spin_lock_irqsave(&priv->sta_lock, flags);
633 priv->default_wep_key++;
634
635 if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
636 IWL_ERR(priv, "index %d already used in uCode key table.\n",
637 keyconf->keyidx);
638
639 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; 808 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
640 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, 809 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
641 keyconf->keylen); 810 keyconf->keylen);
@@ -643,7 +812,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
643 ret = iwl_send_static_wepkey_cmd(priv, 0); 812 ret = iwl_send_static_wepkey_cmd(priv, 0);
644 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", 813 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
645 keyconf->keylen, keyconf->keyidx, ret); 814 keyconf->keylen, keyconf->keyidx, ret);
646 spin_unlock_irqrestore(&priv->sta_lock, flags);
647 815
648 return ret; 816 return ret;
649} 817}
@@ -798,18 +966,23 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
798 966
799void iwl_update_tkip_key(struct iwl_priv *priv, 967void iwl_update_tkip_key(struct iwl_priv *priv,
800 struct ieee80211_key_conf *keyconf, 968 struct ieee80211_key_conf *keyconf,
801 const u8 *addr, u32 iv32, u16 *phase1key) 969 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
802{ 970{
803 u8 sta_id = IWL_INVALID_STATION; 971 u8 sta_id;
804 unsigned long flags; 972 unsigned long flags;
805 int i; 973 int i;
806 974
807 sta_id = iwl_find_station(priv, addr); 975 if (sta) {
808 if (sta_id == IWL_INVALID_STATION) { 976 sta_id = iwl_sta_id(sta);
809 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", 977
810 addr); 978 if (sta_id == IWL_INVALID_STATION) {
811 return; 979 IWL_DEBUG_MAC80211(priv, "leave - %pM not initialised.\n",
812 } 980 sta->addr);
981 return;
982 }
983 } else
984 sta_id = priv->hw_params.bcast_sta_id;
985
813 986
814 if (iwl_scan_cancel(priv)) { 987 if (iwl_scan_cancel(priv)) {
815 /* cancel scan failed, just live w/ bad key and rely 988 /* cancel scan failed, just live w/ bad key and rely
@@ -885,7 +1058,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
885 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1058 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
886 1059
887 if (iwl_is_rfkill(priv)) { 1060 if (iwl_is_rfkill(priv)) {
888 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); 1061 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
889 spin_unlock_irqrestore(&priv->sta_lock, flags); 1062 spin_unlock_irqrestore(&priv->sta_lock, flags);
890 return 0; 1063 return 0;
891 } 1064 }
@@ -948,253 +1121,149 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
948} 1121}
949#endif 1122#endif
950 1123
951int iwl_send_lq_cmd(struct iwl_priv *priv,
952 struct iwl_link_quality_cmd *lq, u8 flags)
953{
954 struct iwl_host_cmd cmd = {
955 .id = REPLY_TX_LINK_QUALITY_CMD,
956 .len = sizeof(struct iwl_link_quality_cmd),
957 .flags = flags,
958 .data = lq,
959 };
960
961 if ((lq->sta_id == 0xFF) &&
962 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
963 return -EINVAL;
964
965 if (lq->sta_id == 0xFF)
966 lq->sta_id = IWL_AP_ID;
967
968 iwl_dump_lq_cmd(priv, lq);
969
970 if (iwl_is_associated(priv) && priv->assoc_station_added)
971 return iwl_send_cmd(priv, &cmd);
972
973 return 0;
974}
975EXPORT_SYMBOL(iwl_send_lq_cmd);
976
977/** 1124/**
978 * iwl_sta_init_lq - Initialize a station's hardware rate table 1125 * is_lq_table_valid() - Test one aspect of LQ cmd for validity
979 *
980 * The uCode's station table contains a table of fallback rates
981 * for automatic fallback during transmission.
982 *
983 * NOTE: This sets up a default set of values. These will be replaced later
984 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
985 * rc80211_simple.
986 * 1126 *
987 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before 1127 * It sometimes happens when a HT rate has been in use and we
988 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, 1128 * loose connectivity with AP then mac80211 will first tell us that the
989 * which requires station table entry to exist). 1129 * current channel is not HT anymore before removing the station. In such a
1130 * scenario the RXON flags will be updated to indicate we are not
1131 * communicating HT anymore, but the LQ command may still contain HT rates.
1132 * Test for this to prevent driver from sending LQ command between the time
1133 * RXON flags are updated and when LQ command is updated.
990 */ 1134 */
991static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap) 1135static bool is_lq_table_valid(struct iwl_priv *priv,
1136 struct iwl_link_quality_cmd *lq)
992{ 1137{
993 int i, r; 1138 int i;
994 struct iwl_link_quality_cmd link_cmd = { 1139 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
995 .reserved1 = 0,
996 };
997 u32 rate_flags;
998 1140
999 /* Set up the rate scaling to start at selected rate, fall back 1141 if (ht_conf->is_ht)
1000 * all the way down to 1M in IEEE order, and then spin on 1M */ 1142 return true;
1001 if (is_ap)
1002 r = IWL_RATE_54M_INDEX;
1003 else if (priv->band == IEEE80211_BAND_5GHZ)
1004 r = IWL_RATE_6M_INDEX;
1005 else
1006 r = IWL_RATE_1M_INDEX;
1007 1143
1144 IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
1145 priv->active_rxon.channel);
1008 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) { 1146 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1009 rate_flags = 0; 1147 if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
1010 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE) 1148 IWL_DEBUG_INFO(priv,
1011 rate_flags |= RATE_MCS_CCK_MSK; 1149 "index %d of LQ expects HT channel\n",
1012 1150 i);
1013 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 1151 return false;
1014 RATE_MCS_ANT_POS; 1152 }
1015
1016 link_cmd.rs_table[i].rate_n_flags =
1017 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1018 r = iwl_get_prev_ieee_rate(r);
1019 } 1153 }
1020 1154 return true;
1021 link_cmd.general_params.single_stream_ant_msk =
1022 first_antenna(priv->hw_params.valid_tx_ant);
1023 link_cmd.general_params.dual_stream_ant_msk = 3;
1024 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1025 link_cmd.agg_params.agg_time_limit =
1026 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1027
1028 /* Update the rate scaling for control frame Tx to AP */
1029 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
1030
1031 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1032 sizeof(link_cmd), &link_cmd, NULL);
1033} 1155}
1034 1156
1035/** 1157/**
1036 * iwl_rxon_add_station - add station into station table. 1158 * iwl_send_lq_cmd() - Send link quality command
1159 * @init: This command is sent as part of station initialization right
1160 * after station has been added.
1037 * 1161 *
1038 * there is only one AP station with id= IWL_AP_ID 1162 * The link quality command is sent as the last step of station creation.
1039 * NOTE: mutex must be held before calling this function 1163 * This is the special case in which init is set and we call a callback in
1164 * this case to clear the state indicating that station creation is in
1165 * progress.
1040 */ 1166 */
1041int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap) 1167int iwl_send_lq_cmd(struct iwl_priv *priv,
1168 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
1042{ 1169{
1043 struct ieee80211_sta *sta; 1170 int ret = 0;
1044 struct ieee80211_sta_ht_cap ht_config; 1171 unsigned long flags_spin;
1045 struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
1046 u8 sta_id;
1047 1172
1048 /* 1173 struct iwl_host_cmd cmd = {
1049 * Set HT capabilities. It is ok to set this struct even if not using 1174 .id = REPLY_TX_LINK_QUALITY_CMD,
1050 * HT config: the priv->current_ht_config.is_ht flag will just be false 1175 .len = sizeof(struct iwl_link_quality_cmd),
1051 */ 1176 .flags = flags,
1052 rcu_read_lock(); 1177 .data = lq,
1053 sta = ieee80211_find_sta(priv->vif, addr); 1178 };
1054 if (sta) {
1055 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1056 cur_ht_config = &ht_config;
1057 }
1058 rcu_read_unlock();
1059 1179
1060 /* Add station to device's station table */ 1180 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
1061 sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config); 1181 return -EINVAL;
1062 1182
1063 /* Set up default rate scaling table in device's station table */ 1183 iwl_dump_lq_cmd(priv, lq);
1064 iwl_sta_init_lq(priv, addr, is_ap); 1184 BUG_ON(init && (cmd.flags & CMD_ASYNC));
1065 1185
1066 return sta_id; 1186 if (is_lq_table_valid(priv, lq))
1187 ret = iwl_send_cmd(priv, &cmd);
1188 else
1189 ret = -EINVAL;
1190
1191 if (cmd.flags & CMD_ASYNC)
1192 return ret;
1193
1194 if (init) {
1195 IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
1196 lq->sta_id);
1197 spin_lock_irqsave(&priv->sta_lock, flags_spin);
1198 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
1199 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
1200 }
1201 return ret;
1067} 1202}
1068EXPORT_SYMBOL(iwl_rxon_add_station); 1203EXPORT_SYMBOL(iwl_send_lq_cmd);
1069 1204
1070/** 1205/**
1071 * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table 1206 * iwl_alloc_bcast_station - add broadcast station into driver's station table.
1072 * 1207 *
1073 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before 1208 * This adds the broadcast station into the driver's station table
1074 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD, 1209 * and marks it driver active, so that it will be restored to the
1075 * which requires station table entry to exist). 1210 * device at the next best time.
1076 */ 1211 */
1077static void iwl_sta_init_bcast_lq(struct iwl_priv *priv) 1212int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq)
1078{ 1213{
1079 int i, r; 1214 struct iwl_link_quality_cmd *link_cmd;
1080 struct iwl_link_quality_cmd link_cmd = { 1215 unsigned long flags;
1081 .reserved1 = 0, 1216 u8 sta_id;
1082 };
1083 u32 rate_flags;
1084
1085 /* Set up the rate scaling to start at selected rate, fall back
1086 * all the way down to 1M in IEEE order, and then spin on 1M */
1087 if (priv->band == IEEE80211_BAND_5GHZ)
1088 r = IWL_RATE_6M_INDEX;
1089 else
1090 r = IWL_RATE_1M_INDEX;
1091
1092 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1093 rate_flags = 0;
1094 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
1095 rate_flags |= RATE_MCS_CCK_MSK;
1096 1217
1097 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) << 1218 spin_lock_irqsave(&priv->sta_lock, flags);
1098 RATE_MCS_ANT_POS; 1219 sta_id = iwl_prep_station(priv, iwl_bcast_addr, false, NULL);
1220 if (sta_id == IWL_INVALID_STATION) {
1221 IWL_ERR(priv, "Unable to prepare broadcast station\n");
1222 spin_unlock_irqrestore(&priv->sta_lock, flags);
1099 1223
1100 link_cmd.rs_table[i].rate_n_flags = 1224 return -EINVAL;
1101 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1102 r = iwl_get_prev_ieee_rate(r);
1103 } 1225 }
1104 1226
1105 link_cmd.general_params.single_stream_ant_msk = 1227 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
1106 first_antenna(priv->hw_params.valid_tx_ant); 1228 priv->stations[sta_id].used |= IWL_STA_BCAST;
1107 link_cmd.general_params.dual_stream_ant_msk = 3; 1229 spin_unlock_irqrestore(&priv->sta_lock, flags);
1108 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1109 link_cmd.agg_params.agg_time_limit =
1110 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1111
1112 /* Update the rate scaling for control frame Tx to AP */
1113 link_cmd.sta_id = priv->hw_params.bcast_sta_id;
1114
1115 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1116 sizeof(link_cmd), &link_cmd, NULL);
1117}
1118
1119 1230
1120/** 1231 if (init_lq) {
1121 * iwl_add_bcast_station - add broadcast station into station table. 1232 link_cmd = iwl_sta_alloc_lq(priv, sta_id);
1122 */ 1233 if (!link_cmd) {
1123void iwl_add_bcast_station(struct iwl_priv *priv) 1234 IWL_ERR(priv,
1124{ 1235 "Unable to initialize rate scaling for bcast station.\n");
1125 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n"); 1236 return -ENOMEM;
1126 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); 1237 }
1127 1238
1128 /* Set up default rate scaling table in device's station table */ 1239 spin_lock_irqsave(&priv->sta_lock, flags);
1129 iwl_sta_init_bcast_lq(priv); 1240 priv->stations[sta_id].lq = link_cmd;
1130} 1241 spin_unlock_irqrestore(&priv->sta_lock, flags);
1131EXPORT_SYMBOL(iwl_add_bcast_station); 1242 }
1132 1243
1133/** 1244 return 0;
1134 * iwl3945_add_bcast_station - add broadcast station into station table.
1135 */
1136void iwl3945_add_bcast_station(struct iwl_priv *priv)
1137{
1138 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
1139 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
1140} 1245}
1141EXPORT_SYMBOL(iwl3945_add_bcast_station); 1246EXPORT_SYMBOL_GPL(iwl_alloc_bcast_station);
1142 1247
1143/** 1248void iwl_dealloc_bcast_station(struct iwl_priv *priv)
1144 * iwl_get_sta_id - Find station's index within station table
1145 *
1146 * If new IBSS station, create new entry in station table
1147 */
1148int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1149{ 1249{
1150 int sta_id; 1250 unsigned long flags;
1151 __le16 fc = hdr->frame_control; 1251 int i;
1152
1153 /* If this frame is broadcast or management, use broadcast station id */
1154 if (!ieee80211_is_data(fc) || is_multicast_ether_addr(hdr->addr1))
1155 return priv->hw_params.bcast_sta_id;
1156
1157 switch (priv->iw_mode) {
1158
1159 /* If we are a client station in a BSS network, use the special
1160 * AP station entry (that's the only station we communicate with) */
1161 case NL80211_IFTYPE_STATION:
1162 return IWL_AP_ID;
1163
1164 /* If we are an AP, then find the station, or use BCAST */
1165 case NL80211_IFTYPE_AP:
1166 sta_id = iwl_find_station(priv, hdr->addr1);
1167 if (sta_id != IWL_INVALID_STATION)
1168 return sta_id;
1169 return priv->hw_params.bcast_sta_id;
1170
1171 /* If this frame is going out to an IBSS network, find the station,
1172 * or create a new station table entry */
1173 case NL80211_IFTYPE_ADHOC:
1174 sta_id = iwl_find_station(priv, hdr->addr1);
1175 if (sta_id != IWL_INVALID_STATION)
1176 return sta_id;
1177
1178 /* Create new station table entry */
1179 sta_id = iwl_add_station(priv, hdr->addr1, false,
1180 CMD_ASYNC, NULL);
1181
1182 if (sta_id != IWL_INVALID_STATION)
1183 return sta_id;
1184
1185 IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
1186 "Defaulting to broadcast...\n",
1187 hdr->addr1);
1188 iwl_print_hex_dump(priv, IWL_DL_DROP, (u8 *) hdr, sizeof(*hdr));
1189 return priv->hw_params.bcast_sta_id;
1190 1252
1191 default: 1253 spin_lock_irqsave(&priv->sta_lock, flags);
1192 IWL_WARN(priv, "Unknown mode of operation: %d\n", 1254 for (i = 0; i < priv->hw_params.max_stations; i++) {
1193 priv->iw_mode); 1255 if (!(priv->stations[i].used & IWL_STA_BCAST))
1194 return priv->hw_params.bcast_sta_id; 1256 continue;
1257
1258 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
1259 priv->num_stations--;
1260 BUG_ON(priv->num_stations < 0);
1261 kfree(priv->stations[i].lq);
1262 priv->stations[i].lq = NULL;
1195 } 1263 }
1264 spin_unlock_irqrestore(&priv->sta_lock, flags);
1196} 1265}
1197EXPORT_SYMBOL(iwl_get_sta_id); 1266EXPORT_SYMBOL_GPL(iwl_dealloc_bcast_station);
1198 1267
1199/** 1268/**
1200 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table 1269 * iwl_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
@@ -1214,13 +1283,13 @@ void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
1214} 1283}
1215EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid); 1284EXPORT_SYMBOL(iwl_sta_tx_modify_enable_tid);
1216 1285
1217int iwl_sta_rx_agg_start(struct iwl_priv *priv, 1286int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
1218 const u8 *addr, int tid, u16 ssn) 1287 int tid, u16 ssn)
1219{ 1288{
1220 unsigned long flags; 1289 unsigned long flags;
1221 int sta_id; 1290 int sta_id;
1222 1291
1223 sta_id = iwl_find_station(priv, addr); 1292 sta_id = iwl_sta_id(sta);
1224 if (sta_id == IWL_INVALID_STATION) 1293 if (sta_id == IWL_INVALID_STATION)
1225 return -ENXIO; 1294 return -ENXIO;
1226 1295
@@ -1233,16 +1302,17 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv,
1233 spin_unlock_irqrestore(&priv->sta_lock, flags); 1302 spin_unlock_irqrestore(&priv->sta_lock, flags);
1234 1303
1235 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, 1304 return iwl_send_add_sta(priv, &priv->stations[sta_id].sta,
1236 CMD_ASYNC); 1305 CMD_ASYNC);
1237} 1306}
1238EXPORT_SYMBOL(iwl_sta_rx_agg_start); 1307EXPORT_SYMBOL(iwl_sta_rx_agg_start);
1239 1308
1240int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid) 1309int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
1310 int tid)
1241{ 1311{
1242 unsigned long flags; 1312 unsigned long flags;
1243 int sta_id; 1313 int sta_id;
1244 1314
1245 sta_id = iwl_find_station(priv, addr); 1315 sta_id = iwl_sta_id(sta);
1246 if (sta_id == IWL_INVALID_STATION) { 1316 if (sta_id == IWL_INVALID_STATION) {
1247 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid); 1317 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1248 return -ENXIO; 1318 return -ENXIO;
@@ -1291,3 +1361,22 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1291 1361
1292 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1362 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1293} 1363}
1364EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
1365
1366int iwl_mac_sta_remove(struct ieee80211_hw *hw,
1367 struct ieee80211_vif *vif,
1368 struct ieee80211_sta *sta)
1369{
1370 struct iwl_priv *priv = hw->priv;
1371 struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
1372 int ret;
1373
1374 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
1375 sta->addr);
1376 ret = iwl_remove_station(priv, sta_common->sta_id, sta->addr);
1377 if (ret)
1378 IWL_ERR(priv, "Error removing station %pM\n",
1379 sta->addr);
1380 return ret;
1381}
1382EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 2dc35fe28f56..c2a453a1a991 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -29,44 +29,82 @@
29#ifndef __iwl_sta_h__ 29#ifndef __iwl_sta_h__
30#define __iwl_sta_h__ 30#define __iwl_sta_h__
31 31
32#include "iwl-dev.h"
33
32#define HW_KEY_DYNAMIC 0 34#define HW_KEY_DYNAMIC 0
33#define HW_KEY_DEFAULT 1 35#define HW_KEY_DEFAULT 1
34 36
35/** 37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
36 * iwl_find_station - Find station id for a given BSSID 38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
37 * @bssid: MAC address of station ID to find 39#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
38 */ 40 being activated */
39u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid); 41#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
42 (this is for the IBSS BSSID stations) */
43#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
44
40 45
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
42int iwl_remove_default_wep_key(struct iwl_priv *priv, 46int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key); 47 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv, 48int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key); 49 struct ieee80211_key_conf *key);
50int iwl_restore_default_wep_keys(struct iwl_priv *priv);
46int iwl_set_dynamic_key(struct iwl_priv *priv, 51int iwl_set_dynamic_key(struct iwl_priv *priv,
47 struct ieee80211_key_conf *key, u8 sta_id); 52 struct ieee80211_key_conf *key, u8 sta_id);
48int iwl_remove_dynamic_key(struct iwl_priv *priv, 53int iwl_remove_dynamic_key(struct iwl_priv *priv,
49 struct ieee80211_key_conf *key, u8 sta_id); 54 struct ieee80211_key_conf *key, u8 sta_id);
50void iwl_update_tkip_key(struct iwl_priv *priv, 55void iwl_update_tkip_key(struct iwl_priv *priv,
51 struct ieee80211_key_conf *keyconf, 56 struct ieee80211_key_conf *keyconf,
52 const u8 *addr, u32 iv32, u16 *phase1key); 57 struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
53 58
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 59void iwl_restore_stations(struct iwl_priv *priv);
55void iwl_add_bcast_station(struct iwl_priv *priv); 60void iwl_clear_ucode_stations(struct iwl_priv *priv);
56void iwl3945_add_bcast_station(struct iwl_priv *priv); 61int iwl_alloc_bcast_station(struct iwl_priv *priv, bool init_lq);
57int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 62void iwl_dealloc_bcast_station(struct iwl_priv *priv);
58void iwl_clear_stations_table(struct iwl_priv *priv);
59int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 63int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
60int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
61int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
62int iwl_send_add_sta(struct iwl_priv *priv, 64int iwl_send_add_sta(struct iwl_priv *priv,
63 struct iwl_addsta_cmd *sta, u8 flags); 65 struct iwl_addsta_cmd *sta, u8 flags);
64u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, 66int iwl_add_bssid_station(struct iwl_priv *priv, const u8 *addr, bool init_rs,
65 struct ieee80211_sta_ht_cap *ht_info); 67 u8 *sta_id_r);
68int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
69 bool is_ap,
70 struct ieee80211_sta_ht_cap *ht_info,
71 u8 *sta_id_r);
72int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
73 const u8 *addr);
74int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
75 struct ieee80211_sta *sta);
66void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); 76void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
67int iwl_sta_rx_agg_start(struct iwl_priv *priv, 77int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
68 const u8 *addr, int tid, u16 ssn); 78 int tid, u16 ssn);
69int iwl_sta_rx_agg_stop(struct iwl_priv *priv, const u8 *addr, int tid); 79int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
80 int tid);
70void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id); 81void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
71void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); 82void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
83
84/**
85 * iwl_clear_driver_stations - clear knowledge of all stations from driver
86 * @priv: iwl priv struct
87 *
88 * This is called during iwl_down() to make sure that in the case
89 * we're coming there from a hardware restart mac80211 will be
90 * able to reconfigure stations -- if we're getting there in the
91 * normal down flow then the stations will already be cleared.
92 */
93static inline void iwl_clear_driver_stations(struct iwl_priv *priv)
94{
95 unsigned long flags;
96
97 spin_lock_irqsave(&priv->sta_lock, flags);
98 memset(priv->stations, 0, sizeof(priv->stations));
99 priv->num_stations = 0;
100 spin_unlock_irqrestore(&priv->sta_lock, flags);
101}
102
103static inline int iwl_sta_id(struct ieee80211_sta *sta)
104{
105 if (WARN_ON(!sta))
106 return IWL_INVALID_STATION;
107
108 return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
109}
72#endif /* __iwl_sta_h__ */ 110#endif /* __iwl_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c036d547..1ece2ea09773 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,47 +38,6 @@
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-helpers.h" 39#include "iwl-helpers.h"
40 40
41static const u16 default_tid_to_tx_fifo[] = {
42 IWL_TX_FIFO_AC1,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC0,
45 IWL_TX_FIFO_AC1,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC2,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_AC3,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_NONE,
58 IWL_TX_FIFO_AC3
59};
60
61static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
62 struct iwl_dma_ptr *ptr, size_t size)
63{
64 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
65 GFP_KERNEL);
66 if (!ptr->addr)
67 return -ENOMEM;
68 ptr->size = size;
69 return 0;
70}
71
72static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
73 struct iwl_dma_ptr *ptr)
74{
75 if (unlikely(!ptr->addr))
76 return;
77
78 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
79 memset(ptr, 0, sizeof(*ptr));
80}
81
82/** 41/**
83 * iwl_txq_update_write_ptr - Send new write index to hardware 42 * iwl_txq_update_write_ptr - Send new write index to hardware
84 */ 43 */
@@ -310,6 +269,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
310 q->high_mark = 2; 269 q->high_mark = 2;
311 270
312 q->write_ptr = q->read_ptr = 0; 271 q->write_ptr = q->read_ptr = 0;
272 q->last_read_ptr = 0;
273 q->repeat_same_read_ptr = 0;
313 274
314 return 0; 275 return 0;
315} 276}
@@ -454,611 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
454} 415}
455EXPORT_SYMBOL(iwl_tx_queue_reset); 416EXPORT_SYMBOL(iwl_tx_queue_reset);
456 417
457/**
458 * iwl_hw_txq_ctx_free - Free TXQ Context
459 *
460 * Destroy all TX DMA queues and structures
461 */
462void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
463{
464 int txq_id;
465
466 /* Tx queues */
467 if (priv->txq) {
468 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
469 if (txq_id == IWL_CMD_QUEUE_NUM)
470 iwl_cmd_queue_free(priv);
471 else
472 iwl_tx_queue_free(priv, txq_id);
473 }
474 iwl_free_dma_ptr(priv, &priv->kw);
475
476 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
477
478 /* free tx queue structure */
479 iwl_free_txq_mem(priv);
480}
481EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
482
483/**
484 * iwl_txq_ctx_alloc - allocate TX queue context
485 * Allocate all Tx DMA structures and initialize them
486 *
487 * @param priv
488 * @return error code
489 */
490int iwl_txq_ctx_alloc(struct iwl_priv *priv)
491{
492 int ret;
493 int txq_id, slots_num;
494 unsigned long flags;
495
496 /* Free all tx/cmd queues and keep-warm buffer */
497 iwl_hw_txq_ctx_free(priv);
498
499 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
500 priv->hw_params.scd_bc_tbls_size);
501 if (ret) {
502 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
503 goto error_bc_tbls;
504 }
505 /* Alloc keep-warm buffer */
506 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
507 if (ret) {
508 IWL_ERR(priv, "Keep Warm allocation failed\n");
509 goto error_kw;
510 }
511
512 /* allocate tx queue structure */
513 ret = iwl_alloc_txq_mem(priv);
514 if (ret)
515 goto error;
516
517 spin_lock_irqsave(&priv->lock, flags);
518
519 /* Turn off all Tx DMA fifos */
520 priv->cfg->ops->lib->txq_set_sched(priv, 0);
521
522 /* Tell NIC where to find the "keep warm" buffer */
523 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
524
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /* Alloc and init all Tx queues, including the command queue (#4) */
528 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
529 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
530 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
531 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
532 txq_id);
533 if (ret) {
534 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
535 goto error;
536 }
537 }
538
539 return ret;
540
541 error:
542 iwl_hw_txq_ctx_free(priv);
543 iwl_free_dma_ptr(priv, &priv->kw);
544 error_kw:
545 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
546 error_bc_tbls:
547 return ret;
548}
549
550void iwl_txq_ctx_reset(struct iwl_priv *priv)
551{
552 int txq_id, slots_num;
553 unsigned long flags;
554
555 spin_lock_irqsave(&priv->lock, flags);
556
557 /* Turn off all Tx DMA fifos */
558 priv->cfg->ops->lib->txq_set_sched(priv, 0);
559
560 /* Tell NIC where to find the "keep warm" buffer */
561 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
562
563 spin_unlock_irqrestore(&priv->lock, flags);
564
565 /* Alloc and init all Tx queues, including the command queue (#4) */
566 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
567 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
568 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
569 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
570 }
571}
572
573/**
574 * iwl_txq_ctx_stop - Stop all Tx DMA channels
575 */
576void iwl_txq_ctx_stop(struct iwl_priv *priv)
577{
578 int ch;
579 unsigned long flags;
580
581 /* Turn off all Tx DMA fifos */
582 spin_lock_irqsave(&priv->lock, flags);
583
584 priv->cfg->ops->lib->txq_set_sched(priv, 0);
585
586 /* Stop each Tx DMA channel, and wait for it to be idle */
587 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
588 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
589 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
590 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
591 1000);
592 }
593 spin_unlock_irqrestore(&priv->lock, flags);
594}
595EXPORT_SYMBOL(iwl_txq_ctx_stop);
596
597/*
598 * handle build REPLY_TX command notification.
599 */
600static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
601 struct iwl_tx_cmd *tx_cmd,
602 struct ieee80211_tx_info *info,
603 struct ieee80211_hdr *hdr,
604 u8 std_id)
605{
606 __le16 fc = hdr->frame_control;
607 __le32 tx_flags = tx_cmd->tx_flags;
608
609 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
610 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
611 tx_flags |= TX_CMD_FLG_ACK_MSK;
612 if (ieee80211_is_mgmt(fc))
613 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
614 if (ieee80211_is_probe_resp(fc) &&
615 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
616 tx_flags |= TX_CMD_FLG_TSF_MSK;
617 } else {
618 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
619 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
620 }
621
622 if (ieee80211_is_back_req(fc))
623 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
624
625
626 tx_cmd->sta_id = std_id;
627 if (ieee80211_has_morefrags(fc))
628 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
629
630 if (ieee80211_is_data_qos(fc)) {
631 u8 *qc = ieee80211_get_qos_ctl(hdr);
632 tx_cmd->tid_tspec = qc[0] & 0xf;
633 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
634 } else {
635 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
636 }
637
638 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
639
640 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
641 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
644 if (ieee80211_is_mgmt(fc)) {
645 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
646 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
647 else
648 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
649 } else {
650 tx_cmd->timeout.pm_frame_timeout = 0;
651 }
652
653 tx_cmd->driver_txop = 0;
654 tx_cmd->tx_flags = tx_flags;
655 tx_cmd->next_frame_len = 0;
656}
657
658#define RTS_HCCA_RETRY_LIMIT 3
659#define RTS_DFAULT_RETRY_LIMIT 60
660
661static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
662 struct iwl_tx_cmd *tx_cmd,
663 struct ieee80211_tx_info *info,
664 __le16 fc, int is_hcca)
665{
666 u32 rate_flags;
667 int rate_idx;
668 u8 rts_retry_limit;
669 u8 data_retry_limit;
670 u8 rate_plcp;
671
672 /* Set retry limit on DATA packets and Probe Responses*/
673 if (ieee80211_is_probe_resp(fc))
674 data_retry_limit = 3;
675 else
676 data_retry_limit = IWL_DEFAULT_TX_RETRY;
677 tx_cmd->data_retry_limit = data_retry_limit;
678
679 /* Set retry limit on RTS packets */
680 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
681 RTS_DFAULT_RETRY_LIMIT;
682 if (data_retry_limit < rts_retry_limit)
683 rts_retry_limit = data_retry_limit;
684 tx_cmd->rts_retry_limit = rts_retry_limit;
685
686 /* DATA packets will use the uCode station table for rate/antenna
687 * selection */
688 if (ieee80211_is_data(fc)) {
689 tx_cmd->initial_rate_index = 0;
690 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
691 return;
692 }
693
694 /**
695 * If the current TX rate stored in mac80211 has the MCS bit set, it's
696 * not really a TX rate. Thus, we use the lowest supported rate for
697 * this band. Also use the lowest supported rate if the stored rate
698 * index is invalid.
699 */
700 rate_idx = info->control.rates[0].idx;
701 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
702 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
703 rate_idx = rate_lowest_index(&priv->bands[info->band],
704 info->control.sta);
705 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
706 if (info->band == IEEE80211_BAND_5GHZ)
707 rate_idx += IWL_FIRST_OFDM_RATE;
708 /* Get PLCP rate for tx_cmd->rate_n_flags */
709 rate_plcp = iwl_rates[rate_idx].plcp;
710 /* Zero out flags for this packet */
711 rate_flags = 0;
712
713 /* Set CCK flag as needed */
714 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
715 rate_flags |= RATE_MCS_CCK_MSK;
716
717 /* Set up RTS and CTS flags for certain packets */
718 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
719 case cpu_to_le16(IEEE80211_STYPE_AUTH):
720 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
721 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
722 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
723 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
724 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
725 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
726 }
727 break;
728 default:
729 break;
730 }
731
732 /* Set up antennas */
733 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
734 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
735
736 /* Set the rate in the TX cmd */
737 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
738}
739
740static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
741 struct ieee80211_tx_info *info,
742 struct iwl_tx_cmd *tx_cmd,
743 struct sk_buff *skb_frag,
744 int sta_id)
745{
746 struct ieee80211_key_conf *keyconf = info->control.hw_key;
747
748 switch (keyconf->alg) {
749 case ALG_CCMP:
750 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
751 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
752 if (info->flags & IEEE80211_TX_CTL_AMPDU)
753 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
754 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
755 break;
756
757 case ALG_TKIP:
758 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
759 ieee80211_get_tkip_key(keyconf, skb_frag,
760 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
761 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
762 break;
763
764 case ALG_WEP:
765 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
766 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
767
768 if (keyconf->keylen == WEP_KEY_LEN_128)
769 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
770
771 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
772
773 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
774 "with key %d\n", keyconf->keyidx);
775 break;
776
777 default:
778 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
779 break;
780 }
781}
782
783/*
784 * start REPLY_TX command process
785 */
786int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
787{
788 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
790 struct ieee80211_sta *sta = info->control.sta;
791 struct iwl_station_priv *sta_priv = NULL;
792 struct iwl_tx_queue *txq;
793 struct iwl_queue *q;
794 struct iwl_device_cmd *out_cmd;
795 struct iwl_cmd_meta *out_meta;
796 struct iwl_tx_cmd *tx_cmd;
797 int swq_id, txq_id;
798 dma_addr_t phys_addr;
799 dma_addr_t txcmd_phys;
800 dma_addr_t scratch_phys;
801 u16 len, len_org, firstlen, secondlen;
802 u16 seq_number = 0;
803 __le16 fc;
804 u8 hdr_len;
805 u8 sta_id;
806 u8 wait_write_ptr = 0;
807 u8 tid = 0;
808 u8 *qc = NULL;
809 unsigned long flags;
810
811 spin_lock_irqsave(&priv->lock, flags);
812 if (iwl_is_rfkill(priv)) {
813 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
814 goto drop_unlock;
815 }
816
817 fc = hdr->frame_control;
818
819#ifdef CONFIG_IWLWIFI_DEBUG
820 if (ieee80211_is_auth(fc))
821 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
822 else if (ieee80211_is_assoc_req(fc))
823 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
824 else if (ieee80211_is_reassoc_req(fc))
825 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
826#endif
827
828 /* drop all non-injected data frame if we are not associated */
829 if (ieee80211_is_data(fc) &&
830 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
831 (!iwl_is_associated(priv) ||
832 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
833 !priv->assoc_station_added)) {
834 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
835 goto drop_unlock;
836 }
837
838 hdr_len = ieee80211_hdrlen(fc);
839
840 /* Find (or create) index into station table for destination station */
841 if (info->flags & IEEE80211_TX_CTL_INJECTED)
842 sta_id = priv->hw_params.bcast_sta_id;
843 else
844 sta_id = iwl_get_sta_id(priv, hdr);
845 if (sta_id == IWL_INVALID_STATION) {
846 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
847 hdr->addr1);
848 goto drop_unlock;
849 }
850
851 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
852
853 if (sta)
854 sta_priv = (void *)sta->drv_priv;
855
856 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
857 sta_priv->asleep) {
858 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
859 /*
860 * This sends an asynchronous command to the device,
861 * but we can rely on it being processed before the
862 * next frame is processed -- and the next frame to
863 * this station is the one that will consume this
864 * counter.
865 * For now set the counter to just 1 since we do not
866 * support uAPSD yet.
867 */
868 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
869 }
870
871 txq_id = skb_get_queue_mapping(skb);
872 if (ieee80211_is_data_qos(fc)) {
873 qc = ieee80211_get_qos_ctl(hdr);
874 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
875 if (unlikely(tid >= MAX_TID_COUNT))
876 goto drop_unlock;
877 seq_number = priv->stations[sta_id].tid[tid].seq_number;
878 seq_number &= IEEE80211_SCTL_SEQ;
879 hdr->seq_ctrl = hdr->seq_ctrl &
880 cpu_to_le16(IEEE80211_SCTL_FRAG);
881 hdr->seq_ctrl |= cpu_to_le16(seq_number);
882 seq_number += 0x10;
883 /* aggregation is on for this <sta,tid> */
884 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
885 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
886 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
887 }
888 }
889
890 txq = &priv->txq[txq_id];
891 swq_id = txq->swq_id;
892 q = &txq->q;
893
894 if (unlikely(iwl_queue_space(q) < q->high_mark))
895 goto drop_unlock;
896
897 if (ieee80211_is_data_qos(fc))
898 priv->stations[sta_id].tid[tid].tfds_in_queue++;
899
900 /* Set up driver data for this TFD */
901 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
902 txq->txb[q->write_ptr].skb[0] = skb;
903
904 /* Set up first empty entry in queue's array of Tx/cmd buffers */
905 out_cmd = txq->cmd[q->write_ptr];
906 out_meta = &txq->meta[q->write_ptr];
907 tx_cmd = &out_cmd->cmd.tx;
908 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
909 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
910
911 /*
912 * Set up the Tx-command (not MAC!) header.
913 * Store the chosen Tx queue and TFD index within the sequence field;
914 * after Tx, uCode's Tx response will return this value so driver can
915 * locate the frame within the tx queue and do post-tx processing.
916 */
917 out_cmd->hdr.cmd = REPLY_TX;
918 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
919 INDEX_TO_SEQ(q->write_ptr)));
920
921 /* Copy MAC header from skb into command buffer */
922 memcpy(tx_cmd->hdr, hdr, hdr_len);
923
924
925 /* Total # bytes to be transmitted */
926 len = (u16)skb->len;
927 tx_cmd->len = cpu_to_le16(len);
928
929 if (info->control.hw_key)
930 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
931
932 /* TODO need this for burst mode later on */
933 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
934 iwl_dbg_log_tx_data_frame(priv, len, hdr);
935
936 /* set is_hcca to 0; it probably will never be implemented */
937 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
938
939 iwl_update_stats(priv, true, fc, len);
940 /*
941 * Use the first empty entry in this queue's command buffer array
942 * to contain the Tx command and MAC header concatenated together
943 * (payload data will be in another buffer).
944 * Size of this varies, due to varying MAC header length.
945 * If end is not dword aligned, we'll have 2 extra bytes at the end
946 * of the MAC header (device reads on dword boundaries).
947 * We'll tell device about this padding later.
948 */
949 len = sizeof(struct iwl_tx_cmd) +
950 sizeof(struct iwl_cmd_header) + hdr_len;
951
952 len_org = len;
953 firstlen = len = (len + 3) & ~3;
954
955 if (len_org != len)
956 len_org = 1;
957 else
958 len_org = 0;
959
960 /* Tell NIC about any 2-byte padding after MAC header */
961 if (len_org)
962 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
963
964 /* Physical address of this Tx command's header (not MAC header!),
965 * within command buffer array. */
966 txcmd_phys = pci_map_single(priv->pci_dev,
967 &out_cmd->hdr, len,
968 PCI_DMA_BIDIRECTIONAL);
969 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
970 pci_unmap_len_set(out_meta, len, len);
971 /* Add buffer containing Tx command and MAC(!) header to TFD's
972 * first entry */
973 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
974 txcmd_phys, len, 1, 0);
975
976 if (!ieee80211_has_morefrags(hdr->frame_control)) {
977 txq->need_update = 1;
978 if (qc)
979 priv->stations[sta_id].tid[tid].seq_number = seq_number;
980 } else {
981 wait_write_ptr = 1;
982 txq->need_update = 0;
983 }
984
985 /* Set up TFD's 2nd entry to point directly to remainder of skb,
986 * if any (802.11 null frames have no payload). */
987 secondlen = len = skb->len - hdr_len;
988 if (len) {
989 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
990 len, PCI_DMA_TODEVICE);
991 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
992 phys_addr, len,
993 0, 0);
994 }
995
996 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
997 offsetof(struct iwl_tx_cmd, scratch);
998
999 len = sizeof(struct iwl_tx_cmd) +
1000 sizeof(struct iwl_cmd_header) + hdr_len;
1001 /* take back ownership of DMA buffer to enable update */
1002 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
1003 len, PCI_DMA_BIDIRECTIONAL);
1004 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1005 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1006
1007 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1008 le16_to_cpu(out_cmd->hdr.sequence));
1009 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
1010 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1011 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1012
1013 /* Set up entry for this TFD in Tx byte-count array */
1014 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1015 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
1016 le16_to_cpu(tx_cmd->len));
1017
1018 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
1019 len, PCI_DMA_BIDIRECTIONAL);
1020
1021 trace_iwlwifi_dev_tx(priv,
1022 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1023 sizeof(struct iwl_tfd),
1024 &out_cmd->hdr, firstlen,
1025 skb->data + hdr_len, secondlen);
1026
1027 /* Tell device the write index *just past* this latest filled TFD */
1028 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1029 iwl_txq_update_write_ptr(priv, txq);
1030 spin_unlock_irqrestore(&priv->lock, flags);
1031
1032 /*
1033 * At this point the frame is "transmitted" successfully
1034 * and we will get a TX status notification eventually,
1035 * regardless of the value of ret. "ret" only indicates
1036 * whether or not we should update the write pointer.
1037 */
1038
1039 /* avoid atomic ops if it isn't an associated client */
1040 if (sta_priv && sta_priv->client)
1041 atomic_inc(&sta_priv->pending_frames);
1042
1043 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1044 if (wait_write_ptr) {
1045 spin_lock_irqsave(&priv->lock, flags);
1046 txq->need_update = 1;
1047 iwl_txq_update_write_ptr(priv, txq);
1048 spin_unlock_irqrestore(&priv->lock, flags);
1049 } else {
1050 iwl_stop_queue(priv, txq->swq_id);
1051 }
1052 }
1053
1054 return 0;
1055
1056drop_unlock:
1057 spin_unlock_irqrestore(&priv->lock, flags);
1058 return -1;
1059}
1060EXPORT_SYMBOL(iwl_tx_skb);
1061
1062/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 418/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1063 419
1064/** 420/**
@@ -1192,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1192 return idx; 548 return idx;
1193} 549}
1194 550
1195static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1196{
1197 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1198 struct ieee80211_sta *sta;
1199 struct iwl_station_priv *sta_priv;
1200
1201 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1202 if (sta) {
1203 sta_priv = (void *)sta->drv_priv;
1204 /* avoid atomic ops if this isn't a client */
1205 if (sta_priv->client &&
1206 atomic_dec_return(&sta_priv->pending_frames) == 0)
1207 ieee80211_sta_block_awake(priv->hw, sta, false);
1208 }
1209
1210 ieee80211_tx_status_irqsafe(priv->hw, skb);
1211}
1212
1213int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1214{
1215 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1216 struct iwl_queue *q = &txq->q;
1217 struct iwl_tx_info *tx_info;
1218 int nfreed = 0;
1219 struct ieee80211_hdr *hdr;
1220
1221 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1222 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1223 "is out of range [0-%d] %d %d.\n", txq_id,
1224 index, q->n_bd, q->write_ptr, q->read_ptr);
1225 return 0;
1226 }
1227
1228 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1229 q->read_ptr != index;
1230 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1231
1232 tx_info = &txq->txb[txq->q.read_ptr];
1233 iwl_tx_status(priv, tx_info->skb[0]);
1234
1235 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1236 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1237 nfreed++;
1238 tx_info->skb[0] = NULL;
1239
1240 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1241 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1242
1243 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1244 }
1245 return nfreed;
1246}
1247EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1248
1249
1250/** 551/**
1251 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 552 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1252 * 553 *
@@ -1340,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1340 641
1341 if (!(meta->flags & CMD_ASYNC)) { 642 if (!(meta->flags & CMD_ASYNC)) {
1342 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 643 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1343 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", 644 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
1344 get_cmd_string(cmd->hdr.cmd)); 645 get_cmd_string(cmd->hdr.cmd));
1345 wake_up_interruptible(&priv->wait_command_queue); 646 wake_up_interruptible(&priv->wait_command_queue);
1346 } 647 }
@@ -1348,358 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1348} 649}
1349EXPORT_SYMBOL(iwl_tx_cmd_complete); 650EXPORT_SYMBOL(iwl_tx_cmd_complete);
1350 651
1351/*
1352 * Find first available (lowest unused) Tx Queue, mark it "active".
1353 * Called only when finding queue for aggregation.
1354 * Should never return anything < 7, because they should already
1355 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1356 */
1357static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1358{
1359 int txq_id;
1360
1361 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1362 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1363 return txq_id;
1364 return -1;
1365}
1366
1367int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1368{
1369 int sta_id;
1370 int tx_fifo;
1371 int txq_id;
1372 int ret;
1373 unsigned long flags;
1374 struct iwl_tid_data *tid_data;
1375
1376 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1377 tx_fifo = default_tid_to_tx_fifo[tid];
1378 else
1379 return -EINVAL;
1380
1381 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1382 __func__, ra, tid);
1383
1384 sta_id = iwl_find_station(priv, ra);
1385 if (sta_id == IWL_INVALID_STATION) {
1386 IWL_ERR(priv, "Start AGG on invalid station\n");
1387 return -ENXIO;
1388 }
1389 if (unlikely(tid >= MAX_TID_COUNT))
1390 return -EINVAL;
1391
1392 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1393 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1394 return -ENXIO;
1395 }
1396
1397 txq_id = iwl_txq_ctx_activate_free(priv);
1398 if (txq_id == -1) {
1399 IWL_ERR(priv, "No free aggregation queue available\n");
1400 return -ENXIO;
1401 }
1402
1403 spin_lock_irqsave(&priv->sta_lock, flags);
1404 tid_data = &priv->stations[sta_id].tid[tid];
1405 *ssn = SEQ_TO_SN(tid_data->seq_number);
1406 tid_data->agg.txq_id = txq_id;
1407 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1408 spin_unlock_irqrestore(&priv->sta_lock, flags);
1409
1410 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1411 sta_id, tid, *ssn);
1412 if (ret)
1413 return ret;
1414
1415 if (tid_data->tfds_in_queue == 0) {
1416 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1417 tid_data->agg.state = IWL_AGG_ON;
1418 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1419 } else {
1420 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1421 tid_data->tfds_in_queue);
1422 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1423 }
1424 return ret;
1425}
1426EXPORT_SYMBOL(iwl_tx_agg_start);
1427
1428int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1429{
1430 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1431 struct iwl_tid_data *tid_data;
1432 int write_ptr, read_ptr;
1433 unsigned long flags;
1434
1435 if (!ra) {
1436 IWL_ERR(priv, "ra = NULL\n");
1437 return -EINVAL;
1438 }
1439
1440 if (unlikely(tid >= MAX_TID_COUNT))
1441 return -EINVAL;
1442
1443 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1444 tx_fifo_id = default_tid_to_tx_fifo[tid];
1445 else
1446 return -EINVAL;
1447
1448 sta_id = iwl_find_station(priv, ra);
1449
1450 if (sta_id == IWL_INVALID_STATION) {
1451 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1452 return -ENXIO;
1453 }
1454
1455 if (priv->stations[sta_id].tid[tid].agg.state ==
1456 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1457 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1458 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1459 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1460 return 0;
1461 }
1462
1463 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1464 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1465
1466 tid_data = &priv->stations[sta_id].tid[tid];
1467 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1468 txq_id = tid_data->agg.txq_id;
1469 write_ptr = priv->txq[txq_id].q.write_ptr;
1470 read_ptr = priv->txq[txq_id].q.read_ptr;
1471
1472 /* The queue is not empty */
1473 if (write_ptr != read_ptr) {
1474 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1475 priv->stations[sta_id].tid[tid].agg.state =
1476 IWL_EMPTYING_HW_QUEUE_DELBA;
1477 return 0;
1478 }
1479
1480 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1481 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1482
1483 spin_lock_irqsave(&priv->lock, flags);
1484 /*
1485 * the only reason this call can fail is queue number out of range,
1486 * which can happen if uCode is reloaded and all the station
1487 * information are lost. if it is outside the range, there is no need
1488 * to deactivate the uCode queue, just return "success" to allow
1489 * mac80211 to clean up it own data.
1490 */
1491 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1492 tx_fifo_id);
1493 spin_unlock_irqrestore(&priv->lock, flags);
1494
1495 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1496
1497 return 0;
1498}
1499EXPORT_SYMBOL(iwl_tx_agg_stop);
1500
1501int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1502{
1503 struct iwl_queue *q = &priv->txq[txq_id].q;
1504 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1505 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1506
1507 switch (priv->stations[sta_id].tid[tid].agg.state) {
1508 case IWL_EMPTYING_HW_QUEUE_DELBA:
1509 /* We are reclaiming the last packet of the */
1510 /* aggregated HW queue */
1511 if ((txq_id == tid_data->agg.txq_id) &&
1512 (q->read_ptr == q->write_ptr)) {
1513 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1514 int tx_fifo = default_tid_to_tx_fifo[tid];
1515 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1516 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1517 ssn, tx_fifo);
1518 tid_data->agg.state = IWL_AGG_OFF;
1519 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1520 }
1521 break;
1522 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1523 /* We are reclaiming the last packet of the queue */
1524 if (tid_data->tfds_in_queue == 0) {
1525 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1526 tid_data->agg.state = IWL_AGG_ON;
1527 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1528 }
1529 break;
1530 }
1531 return 0;
1532}
1533EXPORT_SYMBOL(iwl_txq_check_empty);
1534
1535/**
1536 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1537 *
1538 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1539 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1540 */
1541static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1542 struct iwl_ht_agg *agg,
1543 struct iwl_compressed_ba_resp *ba_resp)
1544
1545{
1546 int i, sh, ack;
1547 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1548 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1549 u64 bitmap;
1550 int successes = 0;
1551 struct ieee80211_tx_info *info;
1552
1553 if (unlikely(!agg->wait_for_ba)) {
1554 IWL_ERR(priv, "Received BA when not expected\n");
1555 return -EINVAL;
1556 }
1557
1558 /* Mark that the expected block-ack response arrived */
1559 agg->wait_for_ba = 0;
1560 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1561
1562 /* Calculate shift to align block-ack bits with our Tx window bits */
1563 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1564 if (sh < 0) /* tbw something is wrong with indices */
1565 sh += 0x100;
1566
1567 /* don't use 64-bit values for now */
1568 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1569
1570 if (agg->frame_count > (64 - sh)) {
1571 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1572 return -1;
1573 }
1574
1575 /* check for success or failure according to the
1576 * transmitted bitmap and block-ack bitmap */
1577 bitmap &= agg->bitmap;
1578
1579 /* For each frame attempted in aggregation,
1580 * update driver's record of tx frame's status. */
1581 for (i = 0; i < agg->frame_count ; i++) {
1582 ack = bitmap & (1ULL << i);
1583 successes += !!ack;
1584 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1585 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1586 agg->start_idx + i);
1587 }
1588
1589 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1590 memset(&info->status, 0, sizeof(info->status));
1591 info->flags |= IEEE80211_TX_STAT_ACK;
1592 info->flags |= IEEE80211_TX_STAT_AMPDU;
1593 info->status.ampdu_ack_map = successes;
1594 info->status.ampdu_ack_len = agg->frame_count;
1595 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1596
1597 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1598
1599 return 0;
1600}
1601
1602/**
1603 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1604 *
1605 * Handles block-acknowledge notification from device, which reports success
1606 * of frames sent via aggregation.
1607 */
1608void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1609 struct iwl_rx_mem_buffer *rxb)
1610{
1611 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1612 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1613 struct iwl_tx_queue *txq = NULL;
1614 struct iwl_ht_agg *agg;
1615 int index;
1616 int sta_id;
1617 int tid;
1618
1619 /* "flow" corresponds to Tx queue */
1620 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1621
1622 /* "ssn" is start of block-ack Tx window, corresponds to index
1623 * (in Tx queue's circular buffer) of first TFD/frame in window */
1624 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1625
1626 if (scd_flow >= priv->hw_params.max_txq_num) {
1627 IWL_ERR(priv,
1628 "BUG_ON scd_flow is bigger than number of queues\n");
1629 return;
1630 }
1631
1632 txq = &priv->txq[scd_flow];
1633 sta_id = ba_resp->sta_id;
1634 tid = ba_resp->tid;
1635 agg = &priv->stations[sta_id].tid[tid].agg;
1636
1637 /* Find index just before block-ack window */
1638 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1639
1640 /* TODO: Need to get this copy more safely - now good for debug */
1641
1642 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1643 "sta_id = %d\n",
1644 agg->wait_for_ba,
1645 (u8 *) &ba_resp->sta_addr_lo32,
1646 ba_resp->sta_id);
1647 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1648 "%d, scd_ssn = %d\n",
1649 ba_resp->tid,
1650 ba_resp->seq_ctl,
1651 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1652 ba_resp->scd_flow,
1653 ba_resp->scd_ssn);
1654 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1655 agg->start_idx,
1656 (unsigned long long)agg->bitmap);
1657
1658 /* Update driver's record of ACK vs. not for each frame in window */
1659 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1660
1661 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1662 * block-ack window (we assume that they've been successfully
1663 * transmitted ... if not, it's too late anyway). */
1664 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1665 /* calculate mac80211 ampdu sw queue to wake */
1666 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1667 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1668
1669 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1670 priv->mac80211_registered &&
1671 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1672 iwl_wake_queue(priv, txq->swq_id);
1673
1674 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1675 }
1676}
1677EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1678
1679#ifdef CONFIG_IWLWIFI_DEBUG 652#ifdef CONFIG_IWLWIFI_DEBUG
1680#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 653#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
654#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1681 655
1682const char *iwl_get_tx_fail_reason(u32 status) 656const char *iwl_get_tx_fail_reason(u32 status)
1683{ 657{
1684 switch (status & TX_STATUS_MSK) { 658 switch (status & TX_STATUS_MSK) {
1685 case TX_STATUS_SUCCESS: 659 case TX_STATUS_SUCCESS:
1686 return "SUCCESS"; 660 return "SUCCESS";
1687 TX_STATUS_ENTRY(SHORT_LIMIT); 661 TX_STATUS_POSTPONE(DELAY);
1688 TX_STATUS_ENTRY(LONG_LIMIT); 662 TX_STATUS_POSTPONE(FEW_BYTES);
1689 TX_STATUS_ENTRY(FIFO_UNDERRUN); 663 TX_STATUS_POSTPONE(BT_PRIO);
1690 TX_STATUS_ENTRY(MGMNT_ABORT); 664 TX_STATUS_POSTPONE(QUIET_PERIOD);
1691 TX_STATUS_ENTRY(NEXT_FRAG); 665 TX_STATUS_POSTPONE(CALC_TTAK);
1692 TX_STATUS_ENTRY(LIFE_EXPIRE); 666 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1693 TX_STATUS_ENTRY(DEST_PS); 667 TX_STATUS_FAIL(SHORT_LIMIT);
1694 TX_STATUS_ENTRY(ABORTED); 668 TX_STATUS_FAIL(LONG_LIMIT);
1695 TX_STATUS_ENTRY(BT_RETRY); 669 TX_STATUS_FAIL(FIFO_UNDERRUN);
1696 TX_STATUS_ENTRY(STA_INVALID); 670 TX_STATUS_FAIL(DRAIN_FLOW);
1697 TX_STATUS_ENTRY(FRAG_DROPPED); 671 TX_STATUS_FAIL(RFKILL_FLUSH);
1698 TX_STATUS_ENTRY(TID_DISABLE); 672 TX_STATUS_FAIL(LIFE_EXPIRE);
1699 TX_STATUS_ENTRY(FRAME_FLUSHED); 673 TX_STATUS_FAIL(DEST_PS);
1700 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); 674 TX_STATUS_FAIL(HOST_ABORTED);
1701 TX_STATUS_ENTRY(TX_LOCKED); 675 TX_STATUS_FAIL(BT_RETRY);
1702 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); 676 TX_STATUS_FAIL(STA_INVALID);
677 TX_STATUS_FAIL(FRAG_DROPPED);
678 TX_STATUS_FAIL(TID_DISABLE);
679 TX_STATUS_FAIL(FIFO_FLUSHED);
680 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
681 TX_STATUS_FAIL(FW_DROP);
682 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
1703 } 683 }
1704 684
1705 return "UNKNOWN"; 685 return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b55e4f39a9e1..3e5bffb6034f 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -352,11 +352,11 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
352 352
353static void iwl3945_unset_hw_params(struct iwl_priv *priv) 353static void iwl3945_unset_hw_params(struct iwl_priv *priv)
354{ 354{
355 if (priv->shared_virt) 355 if (priv->_3945.shared_virt)
356 dma_free_coherent(&priv->pci_dev->dev, 356 dma_free_coherent(&priv->pci_dev->dev,
357 sizeof(struct iwl3945_shared), 357 sizeof(struct iwl3945_shared),
358 priv->shared_virt, 358 priv->_3945.shared_virt,
359 priv->shared_phys); 359 priv->_3945.shared_phys);
360} 360}
361 361
362static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, 362static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -505,24 +505,15 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
505 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); 505 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
506#endif 506#endif
507 507
508 /* drop all non-injected data frame if we are not associated */
509 if (ieee80211_is_data(fc) &&
510 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
511 (!iwl_is_associated(priv) ||
512 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
513 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
514 goto drop_unlock;
515 }
516
517 spin_unlock_irqrestore(&priv->lock, flags); 508 spin_unlock_irqrestore(&priv->lock, flags);
518 509
519 hdr_len = ieee80211_hdrlen(fc); 510 hdr_len = ieee80211_hdrlen(fc);
520 511
521 /* Find (or create) index into station table for destination station */ 512 /* Find index into station table for destination station */
522 if (info->flags & IEEE80211_TX_CTL_INJECTED) 513 if (!info->control.sta)
523 sta_id = priv->hw_params.bcast_sta_id; 514 sta_id = priv->hw_params.bcast_sta_id;
524 else 515 else
525 sta_id = iwl_get_sta_id(priv, hdr); 516 sta_id = iwl_sta_id(info->control.sta);
526 if (sta_id == IWL_INVALID_STATION) { 517 if (sta_id == IWL_INVALID_STATION) {
527 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n", 518 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
528 hdr->addr1); 519 hdr->addr1);
@@ -607,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
607 txq->need_update = 0; 598 txq->need_update = 0;
608 } 599 }
609 600
610 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 601 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
611 le16_to_cpu(out_cmd->hdr.sequence)); 602 le16_to_cpu(out_cmd->hdr.sequence));
612 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); 603 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
613 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); 604 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
614 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, 605 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
615 ieee80211_hdrlen(fc)); 606 ieee80211_hdrlen(fc));
@@ -754,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
754 if (iwl_is_associated(priv)) 745 if (iwl_is_associated(priv))
755 add_time = 746 add_time =
756 iwl3945_usecs_to_beacons( 747 iwl3945_usecs_to_beacons(
757 le64_to_cpu(params->start_time) - priv->last_tsf, 748 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
758 le16_to_cpu(priv->rxon_timing.beacon_interval)); 749 le16_to_cpu(priv->rxon_timing.beacon_interval));
759 750
760 memset(&spectrum, 0, sizeof(spectrum)); 751 memset(&spectrum, 0, sizeof(spectrum));
@@ -768,7 +759,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
768 759
769 if (iwl_is_associated(priv)) 760 if (iwl_is_associated(priv))
770 spectrum.start_time = 761 spectrum.start_time =
771 iwl3945_add_beacon_time(priv->last_beacon_time, 762 iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
772 add_time, 763 add_time,
773 le16_to_cpu(priv->rxon_timing.beacon_interval)); 764 le16_to_cpu(priv->rxon_timing.beacon_interval));
774 else 765 else
@@ -857,7 +848,6 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
857#endif 848#endif
858 849
859 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 850 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
860 return;
861} 851}
862 852
863static void iwl3945_bg_beacon_update(struct work_struct *work) 853static void iwl3945_bg_beacon_update(struct work_struct *work)
@@ -966,7 +956,7 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
966 * statistics request from the host as well as for the periodic 956 * statistics request from the host as well as for the periodic
967 * statistics notifications (after received beacons) from the uCode. 957 * statistics notifications (after received beacons) from the uCode.
968 */ 958 */
969 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics; 959 priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
970 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics; 960 priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
971 961
972 iwl_setup_rx_scan_handlers(priv); 962 iwl_setup_rx_scan_handlers(priv);
@@ -1613,9 +1603,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1613 return pos; 1603 return pos;
1614} 1604}
1615 1605
1616/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1617#define IWL3945_MAX_EVENT_LOG_SIZE (512)
1618
1619#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1606#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1620 1607
1621int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 1608int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -1642,16 +1629,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1642 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1629 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1643 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1630 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1644 1631
1645 if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) { 1632 if (capacity > priv->cfg->max_event_log_size) {
1646 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1633 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1647 capacity, IWL3945_MAX_EVENT_LOG_SIZE); 1634 capacity, priv->cfg->max_event_log_size);
1648 capacity = IWL3945_MAX_EVENT_LOG_SIZE; 1635 capacity = priv->cfg->max_event_log_size;
1649 } 1636 }
1650 1637
1651 if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) { 1638 if (next_entry > priv->cfg->max_event_log_size) {
1652 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 1639 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1653 next_entry, IWL3945_MAX_EVENT_LOG_SIZE); 1640 next_entry, priv->cfg->max_event_log_size);
1654 next_entry = IWL3945_MAX_EVENT_LOG_SIZE; 1641 next_entry = priv->cfg->max_event_log_size;
1655 } 1642 }
1656 1643
1657 size = num_wraps ? capacity : next_entry; 1644 size = num_wraps ? capacity : next_entry;
@@ -1860,7 +1847,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1860static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1847static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1861 enum ieee80211_band band, 1848 enum ieee80211_band band,
1862 u8 is_active, u8 n_probes, 1849 u8 is_active, u8 n_probes,
1863 struct iwl3945_scan_channel *scan_ch) 1850 struct iwl3945_scan_channel *scan_ch,
1851 struct ieee80211_vif *vif)
1864{ 1852{
1865 struct ieee80211_channel *chan; 1853 struct ieee80211_channel *chan;
1866 const struct ieee80211_supported_band *sband; 1854 const struct ieee80211_supported_band *sband;
@@ -1874,7 +1862,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1874 return 0; 1862 return 0;
1875 1863
1876 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes); 1864 active_dwell = iwl_get_active_dwell_time(priv, band, n_probes);
1877 passive_dwell = iwl_get_passive_dwell_time(priv, band); 1865 passive_dwell = iwl_get_passive_dwell_time(priv, band, vif);
1878 1866
1879 if (passive_dwell <= active_dwell) 1867 if (passive_dwell <= active_dwell)
1880 passive_dwell = active_dwell + 1; 1868 passive_dwell = active_dwell + 1;
@@ -1947,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1947 added++; 1935 added++;
1948 } 1936 }
1949 1937
1950 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); 1938 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1951 return added; 1939 return added;
1952} 1940}
1953 1941
@@ -2122,6 +2110,28 @@ static void iwl3945_nic_start(struct iwl_priv *priv)
2122 iwl_write32(priv, CSR_RESET, 0); 2110 iwl_write32(priv, CSR_RESET, 0);
2123} 2111}
2124 2112
2113#define IWL3945_UCODE_GET(item) \
2114static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
2115{ \
2116 return le32_to_cpu(ucode->u.v1.item); \
2117}
2118
2119static u32 iwl3945_ucode_get_header_size(u32 api_ver)
2120{
2121 return 24;
2122}
2123
2124static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
2125{
2126 return (u8 *) ucode->u.v1.data;
2127}
2128
2129IWL3945_UCODE_GET(inst_size);
2130IWL3945_UCODE_GET(data_size);
2131IWL3945_UCODE_GET(init_size);
2132IWL3945_UCODE_GET(init_data_size);
2133IWL3945_UCODE_GET(boot_size);
2134
2125/** 2135/**
2126 * iwl3945_read_ucode - Read uCode images from disk file. 2136 * iwl3945_read_ucode - Read uCode images from disk file.
2127 * 2137 *
@@ -2170,7 +2180,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2170 goto error; 2180 goto error;
2171 2181
2172 /* Make sure that we got at least our header! */ 2182 /* Make sure that we got at least our header! */
2173 if (ucode_raw->size < priv->cfg->ops->ucode->get_header_size(1)) { 2183 if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
2174 IWL_ERR(priv, "File size way too small!\n"); 2184 IWL_ERR(priv, "File size way too small!\n");
2175 ret = -EINVAL; 2185 ret = -EINVAL;
2176 goto err_release; 2186 goto err_release;
@@ -2181,13 +2191,12 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2181 2191
2182 priv->ucode_ver = le32_to_cpu(ucode->ver); 2192 priv->ucode_ver = le32_to_cpu(ucode->ver);
2183 api_ver = IWL_UCODE_API(priv->ucode_ver); 2193 api_ver = IWL_UCODE_API(priv->ucode_ver);
2184 inst_size = priv->cfg->ops->ucode->get_inst_size(ucode, api_ver); 2194 inst_size = iwl3945_ucode_get_inst_size(ucode);
2185 data_size = priv->cfg->ops->ucode->get_data_size(ucode, api_ver); 2195 data_size = iwl3945_ucode_get_data_size(ucode);
2186 init_size = priv->cfg->ops->ucode->get_init_size(ucode, api_ver); 2196 init_size = iwl3945_ucode_get_init_size(ucode);
2187 init_data_size = 2197 init_data_size = iwl3945_ucode_get_init_data_size(ucode);
2188 priv->cfg->ops->ucode->get_init_data_size(ucode, api_ver); 2198 boot_size = iwl3945_ucode_get_boot_size(ucode);
2189 boot_size = priv->cfg->ops->ucode->get_boot_size(ucode, api_ver); 2199 src = iwl3945_ucode_get_data(ucode);
2190 src = priv->cfg->ops->ucode->get_data(ucode, api_ver);
2191 2200
2192 /* api_ver should match the api version forming part of the 2201 /* api_ver should match the api version forming part of the
2193 * firmware filename ... but we don't check for that and only rely 2202 * firmware filename ... but we don't check for that and only rely
@@ -2236,7 +2245,7 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2236 2245
2237 2246
2238 /* Verify size of file vs. image size info in file's header */ 2247 /* Verify size of file vs. image size info in file's header */
2239 if (ucode_raw->size != priv->cfg->ops->ucode->get_header_size(api_ver) + 2248 if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
2240 inst_size + data_size + init_size + 2249 inst_size + data_size + init_size +
2241 init_data_size + boot_size) { 2250 init_data_size + boot_size) {
2242 2251
@@ -2490,8 +2499,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2490 goto restart; 2499 goto restart;
2491 } 2500 }
2492 2501
2493 iwl_clear_stations_table(priv);
2494
2495 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2502 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
2496 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2503 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2497 2504
@@ -2513,13 +2520,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2513 /* After the ALIVE response, we can send commands to 3945 uCode */ 2520 /* After the ALIVE response, we can send commands to 3945 uCode */
2514 set_bit(STATUS_ALIVE, &priv->status); 2521 set_bit(STATUS_ALIVE, &priv->status);
2515 2522
2523 if (priv->cfg->ops->lib->recover_from_tx_stall) {
2524 /* Enable timer to monitor the driver queues */
2525 mod_timer(&priv->monitor_recover,
2526 jiffies +
2527 msecs_to_jiffies(priv->cfg->monitor_recover_period));
2528 }
2529
2516 if (iwl_is_rfkill(priv)) 2530 if (iwl_is_rfkill(priv))
2517 return; 2531 return;
2518 2532
2519 ieee80211_wake_queues(priv->hw); 2533 ieee80211_wake_queues(priv->hw);
2520 2534
2521 priv->active_rate = priv->rates_mask; 2535 priv->active_rate = IWL_RATES_MASK;
2522 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2523 2536
2524 iwl_power_update_mode(priv, true); 2537 iwl_power_update_mode(priv, true);
2525 2538
@@ -2531,11 +2544,11 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2531 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2544 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2532 } else { 2545 } else {
2533 /* Initialize our rx_config data */ 2546 /* Initialize our rx_config data */
2534 iwl_connection_init_rx_config(priv, priv->iw_mode); 2547 iwl_connection_init_rx_config(priv, NULL);
2535 } 2548 }
2536 2549
2537 /* Configure Bluetooth device coexistence support */ 2550 /* Configure Bluetooth device coexistence support */
2538 iwl_send_bt_config(priv); 2551 priv->cfg->ops->hcmd->send_bt_config(priv);
2539 2552
2540 /* Configure the adapter for unassociated operation */ 2553 /* Configure the adapter for unassociated operation */
2541 iwlcore_commit_rxon(priv); 2554 iwlcore_commit_rxon(priv);
@@ -2548,17 +2561,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2548 set_bit(STATUS_READY, &priv->status); 2561 set_bit(STATUS_READY, &priv->status);
2549 wake_up_interruptible(&priv->wait_command_queue); 2562 wake_up_interruptible(&priv->wait_command_queue);
2550 2563
2551 /* reassociate for ADHOC mode */
2552 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
2553 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
2554 priv->vif);
2555 if (beacon)
2556 iwl_mac_beacon_update(priv->hw, beacon);
2557 }
2558
2559 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2560 iwl_set_mode(priv, priv->iw_mode);
2561
2562 return; 2564 return;
2563 2565
2564 restart: 2566 restart:
@@ -2580,7 +2582,10 @@ static void __iwl3945_down(struct iwl_priv *priv)
2580 if (!exit_pending) 2582 if (!exit_pending)
2581 set_bit(STATUS_EXIT_PENDING, &priv->status); 2583 set_bit(STATUS_EXIT_PENDING, &priv->status);
2582 2584
2583 iwl_clear_stations_table(priv); 2585 /* Station information will now be cleared in device */
2586 iwl_clear_ucode_stations(priv);
2587 iwl_dealloc_bcast_station(priv);
2588 iwl_clear_driver_stations(priv);
2584 2589
2585 /* Unblock any waiting calls */ 2590 /* Unblock any waiting calls */
2586 wake_up_interruptible_all(&priv->wait_command_queue); 2591 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2661,6 +2666,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
2661{ 2666{
2662 int rc, i; 2667 int rc, i;
2663 2668
2669 rc = iwl_alloc_bcast_station(priv, false);
2670 if (rc)
2671 return rc;
2672
2664 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) { 2673 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
2665 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n"); 2674 IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
2666 return -EIO; 2675 return -EIO;
@@ -2714,12 +2723,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
2714 2723
2715 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2724 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2716 2725
2717 iwl_clear_stations_table(priv);
2718
2719 /* load bootstrap state machine, 2726 /* load bootstrap state machine,
2720 * load bootstrap program into processor's memory, 2727 * load bootstrap program into processor's memory,
2721 * prepare to load the "initialize" uCode */ 2728 * prepare to load the "initialize" uCode */
2722 priv->cfg->ops->lib->load_ucode(priv); 2729 rc = priv->cfg->ops->lib->load_ucode(priv);
2723 2730
2724 if (rc) { 2731 if (rc) {
2725 IWL_ERR(priv, 2732 IWL_ERR(priv,
@@ -2787,7 +2794,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2787static void iwl3945_rfkill_poll(struct work_struct *data) 2794static void iwl3945_rfkill_poll(struct work_struct *data)
2788{ 2795{
2789 struct iwl_priv *priv = 2796 struct iwl_priv *priv =
2790 container_of(data, struct iwl_priv, rfkill_poll.work); 2797 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2791 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); 2798 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2792 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) 2799 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2793 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 2800 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -2806,22 +2813,18 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2806 2813
2807 /* Keep this running, even if radio now enabled. This will be 2814 /* Keep this running, even if radio now enabled. This will be
2808 * cancelled in mac_start() if system decides to start again */ 2815 * cancelled in mac_start() if system decides to start again */
2809 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2816 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2810 round_jiffies_relative(2 * HZ)); 2817 round_jiffies_relative(2 * HZ));
2811 2818
2812} 2819}
2813 2820
2814#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 2821void iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
2815static void iwl3945_bg_request_scan(struct work_struct *data)
2816{ 2822{
2817 struct iwl_priv *priv =
2818 container_of(data, struct iwl_priv, request_scan);
2819 struct iwl_host_cmd cmd = { 2823 struct iwl_host_cmd cmd = {
2820 .id = REPLY_SCAN_CMD, 2824 .id = REPLY_SCAN_CMD,
2821 .len = sizeof(struct iwl3945_scan_cmd), 2825 .len = sizeof(struct iwl3945_scan_cmd),
2822 .flags = CMD_SIZE_HUGE, 2826 .flags = CMD_SIZE_HUGE,
2823 }; 2827 };
2824 int rc = 0;
2825 struct iwl3945_scan_cmd *scan; 2828 struct iwl3945_scan_cmd *scan;
2826 struct ieee80211_conf *conf = NULL; 2829 struct ieee80211_conf *conf = NULL;
2827 u8 n_probes = 0; 2830 u8 n_probes = 0;
@@ -2830,8 +2833,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2830 2833
2831 conf = ieee80211_get_hw_conf(priv->hw); 2834 conf = ieee80211_get_hw_conf(priv->hw);
2832 2835
2833 mutex_lock(&priv->mutex);
2834
2835 cancel_delayed_work(&priv->scan_check); 2836 cancel_delayed_work(&priv->scan_check);
2836 2837
2837 if (!iwl_is_ready(priv)) { 2838 if (!iwl_is_ready(priv)) {
@@ -2849,7 +2850,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2849 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 2850 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
2850 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " 2851 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
2851 "Ignoring second request.\n"); 2852 "Ignoring second request.\n");
2852 rc = -EIO;
2853 goto done; 2853 goto done;
2854 } 2854 }
2855 2855
@@ -2875,20 +2875,15 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2875 goto done; 2875 goto done;
2876 } 2876 }
2877 2877
2878 if (!priv->scan_bands) { 2878 if (!priv->scan_cmd) {
2879 IWL_DEBUG_HC(priv, "Aborting scan due to no requested bands\n"); 2879 priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2880 goto done; 2880 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2881 } 2881 if (!priv->scan_cmd) {
2882 2882 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2883 if (!priv->scan) {
2884 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2885 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2886 if (!priv->scan) {
2887 rc = -ENOMEM;
2888 goto done; 2883 goto done;
2889 } 2884 }
2890 } 2885 }
2891 scan = priv->scan; 2886 scan = priv->scan_cmd;
2892 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE); 2887 memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
2893 2888
2894 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH; 2889 scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
@@ -2904,7 +2899,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2904 IWL_DEBUG_INFO(priv, "Scanning while associated...\n"); 2899 IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
2905 2900
2906 spin_lock_irqsave(&priv->lock, flags); 2901 spin_lock_irqsave(&priv->lock, flags);
2907 interval = priv->beacon_int; 2902 interval = vif ? vif->bss_conf.beacon_int : 0;
2908 spin_unlock_irqrestore(&priv->lock, flags); 2903 spin_unlock_irqrestore(&priv->lock, flags);
2909 2904
2910 scan->suspend_time = 0; 2905 scan->suspend_time = 0;
@@ -2927,7 +2922,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2927 scan_suspend_time, interval); 2922 scan_suspend_time, interval);
2928 } 2923 }
2929 2924
2930 if (priv->scan_request->n_ssids) { 2925 if (priv->is_internal_short_scan) {
2926 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
2927 } else if (priv->scan_request->n_ssids) {
2931 int i, p = 0; 2928 int i, p = 0;
2932 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 2929 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2933 for (i = 0; i < priv->scan_request->n_ssids; i++) { 2930 for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2955,41 +2952,49 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2955 2952
2956 /* flags + rate selection */ 2953 /* flags + rate selection */
2957 2954
2958 if (priv->scan_bands & BIT(IEEE80211_BAND_2GHZ)) { 2955 switch (priv->scan_band) {
2956 case IEEE80211_BAND_2GHZ:
2959 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK; 2957 scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
2960 scan->tx_cmd.rate = IWL_RATE_1M_PLCP; 2958 scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
2961 scan->good_CRC_th = 0; 2959 scan->good_CRC_th = 0;
2962 band = IEEE80211_BAND_2GHZ; 2960 band = IEEE80211_BAND_2GHZ;
2963 } else if (priv->scan_bands & BIT(IEEE80211_BAND_5GHZ)) { 2961 break;
2962 case IEEE80211_BAND_5GHZ:
2964 scan->tx_cmd.rate = IWL_RATE_6M_PLCP; 2963 scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
2965 /* 2964 /*
2966 * If active scaning is requested but a certain channel 2965 * If active scaning is requested but a certain channel
2967 * is marked passive, we can do active scanning if we 2966 * is marked passive, we can do active scanning if we
2968 * detect transmissions. 2967 * detect transmissions.
2969 */ 2968 */
2970 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 2969 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
2970 IWL_GOOD_CRC_TH_DISABLED;
2971 band = IEEE80211_BAND_5GHZ; 2971 band = IEEE80211_BAND_5GHZ;
2972 } else { 2972 break;
2973 IWL_WARN(priv, "Invalid scan band count\n"); 2973 default:
2974 IWL_WARN(priv, "Invalid scan band\n");
2974 goto done; 2975 goto done;
2975 } 2976 }
2976 2977
2977 scan->tx_cmd.len = cpu_to_le16( 2978 if (!priv->is_internal_short_scan) {
2979 scan->tx_cmd.len = cpu_to_le16(
2978 iwl_fill_probe_req(priv, 2980 iwl_fill_probe_req(priv,
2979 (struct ieee80211_mgmt *)scan->data, 2981 (struct ieee80211_mgmt *)scan->data,
2980 priv->scan_request->ie, 2982 priv->scan_request->ie,
2981 priv->scan_request->ie_len, 2983 priv->scan_request->ie_len,
2982 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2984 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2983 2985 } else {
2986 scan->tx_cmd.len = cpu_to_le16(
2987 iwl_fill_probe_req(priv,
2988 (struct ieee80211_mgmt *)scan->data,
2989 NULL, 0,
2990 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2991 }
2984 /* select Rx antennas */ 2992 /* select Rx antennas */
2985 scan->flags |= iwl3945_get_antenna_flags(priv); 2993 scan->flags |= iwl3945_get_antenna_flags(priv);
2986 2994
2987 if (iwl_is_monitor_mode(priv))
2988 scan->filter_flags = RXON_FILTER_PROMISC_MSK;
2989
2990 scan->channel_count = 2995 scan->channel_count =
2991 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes, 2996 iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
2992 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]); 2997 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)], vif);
2993 2998
2994 if (scan->channel_count == 0) { 2999 if (scan->channel_count == 0) {
2995 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count); 3000 IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
@@ -3002,14 +3007,12 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
3002 scan->len = cpu_to_le16(cmd.len); 3007 scan->len = cpu_to_le16(cmd.len);
3003 3008
3004 set_bit(STATUS_SCAN_HW, &priv->status); 3009 set_bit(STATUS_SCAN_HW, &priv->status);
3005 rc = iwl_send_cmd_sync(priv, &cmd); 3010 if (iwl_send_cmd_sync(priv, &cmd))
3006 if (rc)
3007 goto done; 3011 goto done;
3008 3012
3009 queue_delayed_work(priv->workqueue, &priv->scan_check, 3013 queue_delayed_work(priv->workqueue, &priv->scan_check,
3010 IWL_SCAN_CHECK_WATCHDOG); 3014 IWL_SCAN_CHECK_WATCHDOG);
3011 3015
3012 mutex_unlock(&priv->mutex);
3013 return; 3016 return;
3014 3017
3015 done: 3018 done:
@@ -3023,7 +3026,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
3023 3026
3024 /* inform mac80211 scan aborted */ 3027 /* inform mac80211 scan aborted */
3025 queue_work(priv->workqueue, &priv->scan_completed); 3028 queue_work(priv->workqueue, &priv->scan_completed);
3026 mutex_unlock(&priv->mutex);
3027} 3029}
3028 3030
3029static void iwl3945_bg_restart(struct work_struct *data) 3031static void iwl3945_bg_restart(struct work_struct *data)
@@ -3065,28 +3067,25 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
3065 mutex_unlock(&priv->mutex); 3067 mutex_unlock(&priv->mutex);
3066} 3068}
3067 3069
3068#define IWL_DELAY_NEXT_SCAN (HZ*2) 3070void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3069
3070void iwl3945_post_associate(struct iwl_priv *priv)
3071{ 3071{
3072 int rc = 0; 3072 int rc = 0;
3073 struct ieee80211_conf *conf = NULL; 3073 struct ieee80211_conf *conf = NULL;
3074 3074
3075 if (priv->iw_mode == NL80211_IFTYPE_AP) { 3075 if (!vif || !priv->is_open)
3076 return;
3077
3078 if (vif->type == NL80211_IFTYPE_AP) {
3076 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3079 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3077 return; 3080 return;
3078 } 3081 }
3079 3082
3080
3081 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3083 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3082 priv->assoc_id, priv->active_rxon.bssid_addr); 3084 vif->bss_conf.aid, priv->active_rxon.bssid_addr);
3083 3085
3084 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3086 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3085 return; 3087 return;
3086 3088
3087 if (!priv->vif || !priv->is_open)
3088 return;
3089
3090 iwl_scan_cancel_timeout(priv, 200); 3089 iwl_scan_cancel_timeout(priv, 200);
3091 3090
3092 conf = ieee80211_get_hw_conf(priv->hw); 3091 conf = ieee80211_get_hw_conf(priv->hw);
@@ -3095,7 +3094,7 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3095 iwlcore_commit_rxon(priv); 3094 iwlcore_commit_rxon(priv);
3096 3095
3097 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3096 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
3098 iwl_setup_rxon_timing(priv); 3097 iwl_setup_rxon_timing(priv, vif);
3099 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3098 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3100 sizeof(priv->rxon_timing), &priv->rxon_timing); 3099 sizeof(priv->rxon_timing), &priv->rxon_timing);
3101 if (rc) 3100 if (rc)
@@ -3104,57 +3103,40 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3104 3103
3105 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3104 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3106 3105
3107 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3106 priv->staging_rxon.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3108 3107
3109 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3108 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3110 priv->assoc_id, priv->beacon_int); 3109 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3111 3110
3112 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3111 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
3113 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3112 priv->staging_rxon.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3114 else 3113 else
3115 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3114 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3116 3115
3117 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3116 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3118 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME) 3117 if (vif->bss_conf.assoc_capability & WLAN_CAPABILITY_SHORT_SLOT_TIME)
3119 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK; 3118 priv->staging_rxon.flags |= RXON_FLG_SHORT_SLOT_MSK;
3120 else 3119 else
3121 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3120 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3122 3121
3123 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) 3122 if (vif->type == NL80211_IFTYPE_ADHOC)
3124 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3123 priv->staging_rxon.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3125
3126 } 3124 }
3127 3125
3128 iwlcore_commit_rxon(priv); 3126 iwlcore_commit_rxon(priv);
3129 3127
3130 switch (priv->iw_mode) { 3128 switch (vif->type) {
3131 case NL80211_IFTYPE_STATION: 3129 case NL80211_IFTYPE_STATION:
3132 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 3130 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
3133 break; 3131 break;
3134
3135 case NL80211_IFTYPE_ADHOC: 3132 case NL80211_IFTYPE_ADHOC:
3136
3137 priv->assoc_id = 1;
3138 iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
3139 iwl3945_sync_sta(priv, IWL_STA_ID,
3140 (priv->band == IEEE80211_BAND_5GHZ) ?
3141 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
3142 CMD_ASYNC);
3143 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
3144 iwl3945_send_beacon_cmd(priv); 3133 iwl3945_send_beacon_cmd(priv);
3145
3146 break; 3134 break;
3147
3148 default: 3135 default:
3149 IWL_ERR(priv, "%s Should not be called in %d mode\n", 3136 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3150 __func__, priv->iw_mode); 3137 __func__, vif->type);
3151 break; 3138 break;
3152 } 3139 }
3153
3154 iwl_activate_qos(priv, 0);
3155
3156 /* we have just associated, don't start scan too early */
3157 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
3158} 3140}
3159 3141
3160/***************************************************************************** 3142/*****************************************************************************
@@ -3213,7 +3195,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3213 3195
3214 /* ucode is running and will send rfkill notifications, 3196 /* ucode is running and will send rfkill notifications,
3215 * no need to poll the killswitch state anymore */ 3197 * no need to poll the killswitch state anymore */
3216 cancel_delayed_work(&priv->rfkill_poll); 3198 cancel_delayed_work(&priv->_3945.rfkill_poll);
3217 3199
3218 iwl_led_start(priv); 3200 iwl_led_start(priv);
3219 3201
@@ -3254,7 +3236,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3254 flush_workqueue(priv->workqueue); 3236 flush_workqueue(priv->workqueue);
3255 3237
3256 /* start polling the killswitch state again */ 3238 /* start polling the killswitch state again */
3257 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 3239 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3258 round_jiffies_relative(2 * HZ)); 3240 round_jiffies_relative(2 * HZ));
3259 3241
3260 IWL_DEBUG_MAC80211(priv, "leave\n"); 3242 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3276,7 +3258,7 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3276 return NETDEV_TX_OK; 3258 return NETDEV_TX_OK;
3277} 3259}
3278 3260
3279void iwl3945_config_ap(struct iwl_priv *priv) 3261void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif)
3280{ 3262{
3281 int rc = 0; 3263 int rc = 0;
3282 3264
@@ -3292,7 +3274,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3292 3274
3293 /* RXON Timing */ 3275 /* RXON Timing */
3294 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd)); 3276 memset(&priv->rxon_timing, 0, sizeof(struct iwl_rxon_time_cmd));
3295 iwl_setup_rxon_timing(priv); 3277 iwl_setup_rxon_timing(priv, vif);
3296 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING, 3278 rc = iwl_send_cmd_pdu(priv, REPLY_RXON_TIMING,
3297 sizeof(priv->rxon_timing), 3279 sizeof(priv->rxon_timing),
3298 &priv->rxon_timing); 3280 &priv->rxon_timing);
@@ -3300,9 +3282,10 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3300 IWL_WARN(priv, "REPLY_RXON_TIMING failed - " 3282 IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
3301 "Attempting to continue.\n"); 3283 "Attempting to continue.\n");
3302 3284
3303 /* FIXME: what should be the assoc_id for AP? */ 3285 priv->staging_rxon.assoc_id = 0;
3304 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id); 3286
3305 if (priv->assoc_capability & WLAN_CAPABILITY_SHORT_PREAMBLE) 3287 if (vif->bss_conf.assoc_capability &
3288 WLAN_CAPABILITY_SHORT_PREAMBLE)
3306 priv->staging_rxon.flags |= 3289 priv->staging_rxon.flags |=
3307 RXON_FLG_SHORT_PREAMBLE_MSK; 3290 RXON_FLG_SHORT_PREAMBLE_MSK;
3308 else 3291 else
@@ -3310,22 +3293,21 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3310 ~RXON_FLG_SHORT_PREAMBLE_MSK; 3293 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3311 3294
3312 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) { 3295 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) {
3313 if (priv->assoc_capability & 3296 if (vif->bss_conf.assoc_capability &
3314 WLAN_CAPABILITY_SHORT_SLOT_TIME) 3297 WLAN_CAPABILITY_SHORT_SLOT_TIME)
3315 priv->staging_rxon.flags |= 3298 priv->staging_rxon.flags |=
3316 RXON_FLG_SHORT_SLOT_MSK; 3299 RXON_FLG_SHORT_SLOT_MSK;
3317 else 3300 else
3318 priv->staging_rxon.flags &= 3301 priv->staging_rxon.flags &=
3319 ~RXON_FLG_SHORT_SLOT_MSK; 3302 ~RXON_FLG_SHORT_SLOT_MSK;
3320 3303
3321 if (priv->iw_mode == NL80211_IFTYPE_ADHOC) 3304 if (vif->type == NL80211_IFTYPE_ADHOC)
3322 priv->staging_rxon.flags &= 3305 priv->staging_rxon.flags &=
3323 ~RXON_FLG_SHORT_SLOT_MSK; 3306 ~RXON_FLG_SHORT_SLOT_MSK;
3324 } 3307 }
3325 /* restore RXON assoc */ 3308 /* restore RXON assoc */
3326 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3309 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3327 iwlcore_commit_rxon(priv); 3310 iwlcore_commit_rxon(priv);
3328 iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
3329 } 3311 }
3330 iwl3945_send_beacon_cmd(priv); 3312 iwl3945_send_beacon_cmd(priv);
3331 3313
@@ -3340,7 +3322,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3340 struct ieee80211_key_conf *key) 3322 struct ieee80211_key_conf *key)
3341{ 3323{
3342 struct iwl_priv *priv = hw->priv; 3324 struct iwl_priv *priv = hw->priv;
3343 const u8 *addr;
3344 int ret = 0; 3325 int ret = 0;
3345 u8 sta_id = IWL_INVALID_STATION; 3326 u8 sta_id = IWL_INVALID_STATION;
3346 u8 static_key; 3327 u8 static_key;
@@ -3352,21 +3333,24 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3352 return -EOPNOTSUPP; 3333 return -EOPNOTSUPP;
3353 } 3334 }
3354 3335
3355 addr = sta ? sta->addr : iwl_bcast_addr;
3356 static_key = !iwl_is_associated(priv); 3336 static_key = !iwl_is_associated(priv);
3357 3337
3358 if (!static_key) { 3338 if (!static_key) {
3359 sta_id = iwl_find_station(priv, addr); 3339 if (!sta) {
3360 if (sta_id == IWL_INVALID_STATION) { 3340 sta_id = priv->hw_params.bcast_sta_id;
3361 IWL_DEBUG_MAC80211(priv, "leave - %pM not in station map.\n", 3341 } else {
3362 addr); 3342 sta_id = iwl_sta_id(sta);
3363 return -EINVAL; 3343 if (sta_id == IWL_INVALID_STATION) {
3344 IWL_DEBUG_MAC80211(priv,
3345 "leave - %pM not in station map.\n",
3346 sta->addr);
3347 return -EINVAL;
3348 }
3364 } 3349 }
3365 } 3350 }
3366 3351
3367 mutex_lock(&priv->mutex); 3352 mutex_lock(&priv->mutex);
3368 iwl_scan_cancel_timeout(priv, 100); 3353 iwl_scan_cancel_timeout(priv, 100);
3369 mutex_unlock(&priv->mutex);
3370 3354
3371 switch (cmd) { 3355 switch (cmd) {
3372 case SET_KEY: 3356 case SET_KEY:
@@ -3387,11 +3371,45 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3387 ret = -EINVAL; 3371 ret = -EINVAL;
3388 } 3372 }
3389 3373
3374 mutex_unlock(&priv->mutex);
3390 IWL_DEBUG_MAC80211(priv, "leave\n"); 3375 IWL_DEBUG_MAC80211(priv, "leave\n");
3391 3376
3392 return ret; 3377 return ret;
3393} 3378}
3394 3379
3380static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3381 struct ieee80211_vif *vif,
3382 struct ieee80211_sta *sta)
3383{
3384 struct iwl_priv *priv = hw->priv;
3385 struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
3386 int ret;
3387 bool is_ap = vif->type == NL80211_IFTYPE_STATION;
3388 u8 sta_id;
3389
3390 sta_priv->common.sta_id = IWL_INVALID_STATION;
3391
3392 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3393 sta->addr);
3394
3395 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
3396 &sta_id);
3397 if (ret) {
3398 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3399 sta->addr, ret);
3400 /* Should we return success if return code is EEXIST ? */
3401 return ret;
3402 }
3403
3404 sta_priv->common.sta_id = sta_id;
3405
3406 /* Initialize rate scaling */
3407 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3408 sta->addr);
3409 iwl3945_rs_rate_init(priv, sta, sta_id);
3410
3411 return 0;
3412}
3395/***************************************************************************** 3413/*****************************************************************************
3396 * 3414 *
3397 * sysfs attributes 3415 * sysfs attributes
@@ -3591,7 +3609,7 @@ static ssize_t store_measurement(struct device *d,
3591 struct iwl_priv *priv = dev_get_drvdata(d); 3609 struct iwl_priv *priv = dev_get_drvdata(d);
3592 struct ieee80211_measurement_params params = { 3610 struct ieee80211_measurement_params params = {
3593 .channel = le16_to_cpu(priv->active_rxon.channel), 3611 .channel = le16_to_cpu(priv->active_rxon.channel),
3594 .start_time = cpu_to_le64(priv->last_tsf), 3612 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3595 .duration = cpu_to_le16(1), 3613 .duration = cpu_to_le16(1),
3596 }; 3614 };
3597 u8 type = IWL_MEASURE_BASIC; 3615 u8 type = IWL_MEASURE_BASIC;
@@ -3655,44 +3673,6 @@ static ssize_t show_channels(struct device *d,
3655 3673
3656static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL); 3674static DEVICE_ATTR(channels, S_IRUSR, show_channels, NULL);
3657 3675
3658static ssize_t show_statistics(struct device *d,
3659 struct device_attribute *attr, char *buf)
3660{
3661 struct iwl_priv *priv = dev_get_drvdata(d);
3662 u32 size = sizeof(struct iwl3945_notif_statistics);
3663 u32 len = 0, ofs = 0;
3664 u8 *data = (u8 *)&priv->statistics_39;
3665 int rc = 0;
3666
3667 if (!iwl_is_alive(priv))
3668 return -EAGAIN;
3669
3670 mutex_lock(&priv->mutex);
3671 rc = iwl_send_statistics_request(priv, CMD_SYNC, false);
3672 mutex_unlock(&priv->mutex);
3673
3674 if (rc) {
3675 len = sprintf(buf,
3676 "Error sending statistics request: 0x%08X\n", rc);
3677 return len;
3678 }
3679
3680 while (size && (PAGE_SIZE - len)) {
3681 hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
3682 PAGE_SIZE - len, 1);
3683 len = strlen(buf);
3684 if (PAGE_SIZE - len)
3685 buf[len++] = '\n';
3686
3687 ofs += 16;
3688 size -= min(size, 16U);
3689 }
3690
3691 return len;
3692}
3693
3694static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
3695
3696static ssize_t show_antenna(struct device *d, 3676static ssize_t show_antenna(struct device *d,
3697 struct device_attribute *attr, char *buf) 3677 struct device_attribute *attr, char *buf)
3698{ 3678{
@@ -3774,14 +3754,21 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3774 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3754 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3775 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3755 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3776 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3756 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3777 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll); 3757 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3778 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3758 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
3779 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
3780 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3759 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
3760 INIT_WORK(&priv->start_internal_scan, iwl_bg_start_internal_scan);
3781 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check); 3761 INIT_DELAYED_WORK(&priv->scan_check, iwl_bg_scan_check);
3782 3762
3783 iwl3945_hw_setup_deferred_work(priv); 3763 iwl3945_hw_setup_deferred_work(priv);
3784 3764
3765 if (priv->cfg->ops->lib->recover_from_tx_stall) {
3766 init_timer(&priv->monitor_recover);
3767 priv->monitor_recover.data = (unsigned long)priv;
3768 priv->monitor_recover.function =
3769 priv->cfg->ops->lib->recover_from_tx_stall;
3770 }
3771
3785 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3772 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3786 iwl3945_irq_tasklet, (unsigned long)priv); 3773 iwl3945_irq_tasklet, (unsigned long)priv);
3787} 3774}
@@ -3793,7 +3780,10 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3793 cancel_delayed_work_sync(&priv->init_alive_start); 3780 cancel_delayed_work_sync(&priv->init_alive_start);
3794 cancel_delayed_work(&priv->scan_check); 3781 cancel_delayed_work(&priv->scan_check);
3795 cancel_delayed_work(&priv->alive_start); 3782 cancel_delayed_work(&priv->alive_start);
3783 cancel_work_sync(&priv->start_internal_scan);
3796 cancel_work_sync(&priv->beacon_update); 3784 cancel_work_sync(&priv->beacon_update);
3785 if (priv->cfg->ops->lib->recover_from_tx_stall)
3786 del_timer_sync(&priv->monitor_recover);
3797} 3787}
3798 3788
3799static struct attribute *iwl3945_sysfs_entries[] = { 3789static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3804,7 +3794,6 @@ static struct attribute *iwl3945_sysfs_entries[] = {
3804 &dev_attr_filter_flags.attr, 3794 &dev_attr_filter_flags.attr,
3805 &dev_attr_measurement.attr, 3795 &dev_attr_measurement.attr,
3806 &dev_attr_retry_rate.attr, 3796 &dev_attr_retry_rate.attr,
3807 &dev_attr_statistics.attr,
3808 &dev_attr_status.attr, 3797 &dev_attr_status.attr,
3809 &dev_attr_temperature.attr, 3798 &dev_attr_temperature.attr,
3810 &dev_attr_tx_power.attr, 3799 &dev_attr_tx_power.attr,
@@ -3831,7 +3820,9 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3831 .conf_tx = iwl_mac_conf_tx, 3820 .conf_tx = iwl_mac_conf_tx,
3832 .reset_tsf = iwl_mac_reset_tsf, 3821 .reset_tsf = iwl_mac_reset_tsf,
3833 .bss_info_changed = iwl_bss_info_changed, 3822 .bss_info_changed = iwl_bss_info_changed,
3834 .hw_scan = iwl_mac_hw_scan 3823 .hw_scan = iwl_mac_hw_scan,
3824 .sta_add = iwl3945_mac_sta_add,
3825 .sta_remove = iwl_mac_sta_remove,
3835}; 3826};
3836 3827
3837static int iwl3945_init_drv(struct iwl_priv *priv) 3828static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3850,9 +3841,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3850 mutex_init(&priv->mutex); 3841 mutex_init(&priv->mutex);
3851 mutex_init(&priv->sync_cmd_mutex); 3842 mutex_init(&priv->sync_cmd_mutex);
3852 3843
3853 /* Clear the driver's (not device's) station table */
3854 iwl_clear_stations_table(priv);
3855
3856 priv->ieee_channels = NULL; 3844 priv->ieee_channels = NULL;
3857 priv->ieee_rates = NULL; 3845 priv->ieee_rates = NULL;
3858 priv->band = IEEE80211_BAND_2GHZ; 3846 priv->band = IEEE80211_BAND_2GHZ;
@@ -3860,12 +3848,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3860 priv->iw_mode = NL80211_IFTYPE_STATION; 3848 priv->iw_mode = NL80211_IFTYPE_STATION;
3861 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3849 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3862 3850
3863 iwl_reset_qos(priv);
3864
3865 priv->qos_data.qos_active = 0;
3866 priv->qos_data.qos_cap.val = 0;
3867
3868 priv->rates_mask = IWL_RATES_MASK;
3869 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3851 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3870 3852
3871 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3853 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@ -3901,6 +3883,8 @@ err:
3901 return ret; 3883 return ret;
3902} 3884}
3903 3885
3886#define IWL3945_MAX_PROBE_REQUEST 200
3887
3904static int iwl3945_setup_mac(struct iwl_priv *priv) 3888static int iwl3945_setup_mac(struct iwl_priv *priv)
3905{ 3889{
3906 int ret; 3890 int ret;
@@ -3908,10 +3892,10 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3908 3892
3909 hw->rate_control_algorithm = "iwl-3945-rs"; 3893 hw->rate_control_algorithm = "iwl-3945-rs";
3910 hw->sta_data_size = sizeof(struct iwl3945_sta_priv); 3894 hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
3895 hw->vif_data_size = sizeof(struct iwl_vif_priv);
3911 3896
3912 /* Tell mac80211 our characteristics */ 3897 /* Tell mac80211 our characteristics */
3913 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3898 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3914 IEEE80211_HW_NOISE_DBM |
3915 IEEE80211_HW_SPECTRUM_MGMT; 3899 IEEE80211_HW_SPECTRUM_MGMT;
3916 3900
3917 if (!priv->cfg->broken_powersave) 3901 if (!priv->cfg->broken_powersave)
@@ -3927,7 +3911,7 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
3927 3911
3928 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945; 3912 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
3929 /* we create the 802.11 header and a zero-length SSID element */ 3913 /* we create the 802.11 header and a zero-length SSID element */
3930 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2; 3914 hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
3931 3915
3932 /* Default value; 4 EDCA QOS priorities */ 3916 /* Default value; 4 EDCA QOS priorities */
3933 hw->queues = 4; 3917 hw->queues = 4;
@@ -4130,7 +4114,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4130 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4114 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4131 4115
4132 /* Start monitoring the killswitch */ 4116 /* Start monitoring the killswitch */
4133 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 4117 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
4134 2 * HZ); 4118 2 * HZ);
4135 4119
4136 return 0; 4120 return 0;
@@ -4204,7 +4188,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4204 4188
4205 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4189 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4206 4190
4207 cancel_delayed_work_sync(&priv->rfkill_poll); 4191 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
4208 4192
4209 iwl3945_dealloc_ucode_pci(priv); 4193 iwl3945_dealloc_ucode_pci(priv);
4210 4194
@@ -4213,7 +4197,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4213 iwl3945_hw_txq_ctx_free(priv); 4197 iwl3945_hw_txq_ctx_free(priv);
4214 4198
4215 iwl3945_unset_hw_params(priv); 4199 iwl3945_unset_hw_params(priv);
4216 iwl_clear_stations_table(priv);
4217 4200
4218 /*netif_stop_queue(dev); */ 4201 /*netif_stop_queue(dev); */
4219 flush_workqueue(priv->workqueue); 4202 flush_workqueue(priv->workqueue);
@@ -4235,7 +4218,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4235 4218
4236 iwl_free_channel_map(priv); 4219 iwl_free_channel_map(priv);
4237 iwlcore_free_geos(priv); 4220 iwlcore_free_geos(priv);
4238 kfree(priv->scan); 4221 kfree(priv->scan_cmd);
4239 if (priv->ibss_beacon) 4222 if (priv->ibss_beacon)
4240 dev_kfree_skb(priv->ibss_beacon); 4223 dev_kfree_skb(priv->ibss_beacon);
4241 4224
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index b9d34a766964..03f998d098c5 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -17,7 +17,7 @@ config IWM
17config IWM_DEBUG 17config IWM_DEBUG
18 bool "Enable full debugging output in iwmc3200wifi" 18 bool "Enable full debugging output in iwmc3200wifi"
19 depends on IWM && DEBUG_FS 19 depends on IWM && DEBUG_FS
20 ---help--- 20 help
21 This option will enable debug tracing and setting for iwm 21 This option will enable debug tracing and setting for iwm
22 22
23 You can set the debug level and module through debugfs. By 23 You can set the debug level and module through debugfs. By
@@ -30,3 +30,10 @@ config IWM_DEBUG
30 Or, if you want the full debug, for all modules: 30 Or, if you want the full debug, for all modules:
31 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level 31 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
32 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules 32 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
33
34config IWM_TRACING
35 bool "Enable event tracing for iwmc3200wifi"
36 depends on IWM && EVENT_TRACING
37 help
38 Say Y here to trace all the commands and responses between
39 the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index d34291b652d3..cdc7e07ba113 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -3,3 +3,8 @@ iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
3iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o 3iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
4 4
5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o 5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
6iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
7
8CFLAGS_trace.o := -I$(src)
9
10ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwmc3200wifi/bus.h b/drivers/net/wireless/iwmc3200wifi/bus.h
index 836663eec257..62edd5888a7b 100644
--- a/drivers/net/wireless/iwmc3200wifi/bus.h
+++ b/drivers/net/wireless/iwmc3200wifi/bus.h
@@ -31,7 +31,7 @@ struct iwm_if_ops {
31 int (*disable)(struct iwm_priv *iwm); 31 int (*disable)(struct iwm_priv *iwm);
32 int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count); 32 int (*send_chunk)(struct iwm_priv *iwm, u8* buf, int count);
33 33
34 int (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir); 34 void (*debugfs_init)(struct iwm_priv *iwm, struct dentry *parent_dir);
35 void (*debugfs_exit)(struct iwm_priv *iwm); 35 void (*debugfs_exit)(struct iwm_priv *iwm);
36 36
37 const char *umac_name; 37 const char *umac_name;
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a1d45cce0ebc..902e95f70f6e 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -264,7 +264,7 @@ static int iwm_cfg80211_get_station(struct wiphy *wiphy,
264int iwm_cfg80211_inform_bss(struct iwm_priv *iwm) 264int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
265{ 265{
266 struct wiphy *wiphy = iwm_to_wiphy(iwm); 266 struct wiphy *wiphy = iwm_to_wiphy(iwm);
267 struct iwm_bss_info *bss, *next; 267 struct iwm_bss_info *bss;
268 struct iwm_umac_notif_bss_info *umac_bss; 268 struct iwm_umac_notif_bss_info *umac_bss;
269 struct ieee80211_mgmt *mgmt; 269 struct ieee80211_mgmt *mgmt;
270 struct ieee80211_channel *channel; 270 struct ieee80211_channel *channel;
@@ -272,7 +272,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
272 s32 signal; 272 s32 signal;
273 int freq; 273 int freq;
274 274
275 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) { 275 list_for_each_entry(bss, &iwm->bss_list, node) {
276 umac_bss = bss->bss; 276 umac_bss = bss->bss;
277 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf); 277 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
278 278
@@ -726,23 +726,26 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
726 CFG_POWER_INDEX, iwm->conf.power_index); 726 CFG_POWER_INDEX, iwm->conf.power_index);
727} 727}
728 728
729int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, 729static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
730 struct cfg80211_pmksa *pmksa) 730 struct net_device *netdev,
731 struct cfg80211_pmksa *pmksa)
731{ 732{
732 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 733 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
733 734
734 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD); 735 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
735} 736}
736 737
737int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, 738static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
738 struct cfg80211_pmksa *pmksa) 739 struct net_device *netdev,
740 struct cfg80211_pmksa *pmksa)
739{ 741{
740 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 742 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
741 743
742 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL); 744 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
743} 745}
744 746
745int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) 747static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
748 struct net_device *netdev)
746{ 749{
747 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 750 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
748 struct cfg80211_pmksa pmksa; 751 struct cfg80211_pmksa pmksa;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 42df7262f9f7..330c7d9cf101 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -507,7 +507,7 @@ static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
507 return ret; 507 return ret;
508 } 508 }
509 509
510 /* When succeding, the send_target routine returns the seq number */ 510 /* When succeeding, the send_target routine returns the seq number */
511 seq_num = ret; 511 seq_num = ret;
512 512
513 ret = wait_event_interruptible_timeout(iwm->nonwifi_queue, 513 ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
@@ -782,10 +782,9 @@ int iwm_send_mlme_profile(struct iwm_priv *iwm)
782 return 0; 782 return 0;
783} 783}
784 784
785int iwm_invalidate_mlme_profile(struct iwm_priv *iwm) 785int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
786{ 786{
787 struct iwm_umac_invalidate_profile invalid; 787 struct iwm_umac_invalidate_profile invalid;
788 int ret;
789 788
790 invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE; 789 invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
791 invalid.hdr.buf_size = 790 invalid.hdr.buf_size =
@@ -794,7 +793,14 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
794 793
795 invalid.reason = WLAN_REASON_UNSPECIFIED; 794 invalid.reason = WLAN_REASON_UNSPECIFIED;
796 795
797 ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1); 796 return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
797}
798
799int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
800{
801 int ret;
802
803 ret = __iwm_invalidate_mlme_profile(iwm);
798 if (ret) 804 if (ret)
799 return ret; 805 return ret;
800 806
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 3dfd9f0e9003..7e16bcf59978 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -488,6 +488,7 @@ int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
488 void *payload, u16 payload_size); 488 void *payload, u16 payload_size);
489int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags); 489int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
490int iwm_send_mlme_profile(struct iwm_priv *iwm); 490int iwm_send_mlme_profile(struct iwm_priv *iwm);
491int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
491int iwm_invalidate_mlme_profile(struct iwm_priv *iwm); 492int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
492int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); 493int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
493int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); 494int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
diff --git a/drivers/net/wireless/iwmc3200wifi/debug.h b/drivers/net/wireless/iwmc3200wifi/debug.h
index e35c9b693d1f..a0c13a49ab3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/debug.h
+++ b/drivers/net/wireless/iwmc3200wifi/debug.h
@@ -113,13 +113,10 @@ struct iwm_debugfs {
113}; 113};
114 114
115#ifdef CONFIG_IWM_DEBUG 115#ifdef CONFIG_IWM_DEBUG
116int iwm_debugfs_init(struct iwm_priv *iwm); 116void iwm_debugfs_init(struct iwm_priv *iwm);
117void iwm_debugfs_exit(struct iwm_priv *iwm); 117void iwm_debugfs_exit(struct iwm_priv *iwm);
118#else 118#else
119static inline int iwm_debugfs_init(struct iwm_priv *iwm) 119static inline void iwm_debugfs_init(struct iwm_priv *iwm) {}
120{
121 return 0;
122}
123static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {} 120static inline void iwm_debugfs_exit(struct iwm_priv *iwm) {}
124#endif 121#endif
125 122
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index cbb81befdb55..53b0b7711f02 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -48,12 +48,11 @@ static struct {
48 48
49#define add_dbg_module(dbg, name, id, initlevel) \ 49#define add_dbg_module(dbg, name, id, initlevel) \
50do { \ 50do { \
51 struct dentry *d; \
52 dbg.dbg_module[id] = (initlevel); \ 51 dbg.dbg_module[id] = (initlevel); \
53 d = debugfs_create_x8(name, 0600, dbg.dbgdir, \ 52 dbg.dbg_module_dentries[id] = \
54 &(dbg.dbg_module[id])); \ 53 debugfs_create_x8(name, 0600, \
55 if (!IS_ERR(d)) \ 54 dbg.dbgdir, \
56 dbg.dbg_module_dentries[id] = d; \ 55 &(dbg.dbg_module[id])); \
57} while (0) 56} while (0)
58 57
59static int iwm_debugfs_u32_read(void *data, u64 *val) 58static int iwm_debugfs_u32_read(void *data, u64 *val)
@@ -266,7 +265,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
266 size_t count, loff_t *ppos) 265 size_t count, loff_t *ppos)
267{ 266{
268 struct iwm_priv *iwm = filp->private_data; 267 struct iwm_priv *iwm = filp->private_data;
269 struct iwm_rx_ticket_node *ticket, *next; 268 struct iwm_rx_ticket_node *ticket;
270 char *buf; 269 char *buf;
271 int buf_len = 4096, i; 270 int buf_len = 4096, i;
272 size_t len = 0; 271 size_t len = 0;
@@ -281,7 +280,8 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
281 if (!buf) 280 if (!buf)
282 return -ENOMEM; 281 return -ENOMEM;
283 282
284 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { 283 spin_lock(&iwm->ticket_lock);
284 list_for_each_entry(ticket, &iwm->rx_tickets, node) {
285 len += snprintf(buf + len, buf_len - len, "Ticket #%d\n", 285 len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
286 ticket->ticket->id); 286 ticket->ticket->id);
287 len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n", 287 len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
@@ -289,14 +289,17 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
289 len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n", 289 len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
290 ticket->ticket->flags); 290 ticket->ticket->flags);
291 } 291 }
292 spin_unlock(&iwm->ticket_lock);
292 293
293 for (i = 0; i < IWM_RX_ID_HASH; i++) { 294 for (i = 0; i < IWM_RX_ID_HASH; i++) {
294 struct iwm_rx_packet *packet, *nxt; 295 struct iwm_rx_packet *packet;
295 struct list_head *pkt_list = &iwm->rx_packets[i]; 296 struct list_head *pkt_list = &iwm->rx_packets[i];
297
296 if (!list_empty(pkt_list)) { 298 if (!list_empty(pkt_list)) {
297 len += snprintf(buf + len, buf_len - len, 299 len += snprintf(buf + len, buf_len - len,
298 "Packet hash #%d\n", i); 300 "Packet hash #%d\n", i);
299 list_for_each_entry_safe(packet, nxt, pkt_list, node) { 301 spin_lock(&iwm->packet_lock[i]);
302 list_for_each_entry(packet, pkt_list, node) {
300 len += snprintf(buf + len, buf_len - len, 303 len += snprintf(buf + len, buf_len - len,
301 "\tPacket id: %d\n", 304 "\tPacket id: %d\n",
302 packet->id); 305 packet->id);
@@ -304,6 +307,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
304 "\tPacket length: %lu\n", 307 "\tPacket length: %lu\n",
305 packet->pkt_size); 308 packet->pkt_size);
306 } 309 }
310 spin_unlock(&iwm->packet_lock[i]);
307 } 311 }
308 } 312 }
309 313
@@ -418,89 +422,29 @@ static const struct file_operations iwm_debugfs_fw_err_fops = {
418 .read = iwm_debugfs_fw_err_read, 422 .read = iwm_debugfs_fw_err_read,
419}; 423};
420 424
421int iwm_debugfs_init(struct iwm_priv *iwm) 425void iwm_debugfs_init(struct iwm_priv *iwm)
422{ 426{
423 int i, result; 427 int i;
424 char devdir[16];
425 428
426 iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL); 429 iwm->dbg.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
427 result = PTR_ERR(iwm->dbg.rootdir); 430 iwm->dbg.devdir = debugfs_create_dir(wiphy_name(iwm_to_wiphy(iwm)),
428 if (!result || IS_ERR(iwm->dbg.rootdir)) { 431 iwm->dbg.rootdir);
429 if (result == -ENODEV) {
430 IWM_ERR(iwm, "DebugFS (CONFIG_DEBUG_FS) not "
431 "enabled in kernel config\n");
432 result = 0; /* No debugfs support */
433 }
434 IWM_ERR(iwm, "Couldn't create rootdir: %d\n", result);
435 goto error;
436 }
437
438 snprintf(devdir, sizeof(devdir), "%s", wiphy_name(iwm_to_wiphy(iwm)));
439
440 iwm->dbg.devdir = debugfs_create_dir(devdir, iwm->dbg.rootdir);
441 result = PTR_ERR(iwm->dbg.devdir);
442 if (IS_ERR(iwm->dbg.devdir) && (result != -ENODEV)) {
443 IWM_ERR(iwm, "Couldn't create devdir: %d\n", result);
444 goto error;
445 }
446
447 iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir); 432 iwm->dbg.dbgdir = debugfs_create_dir("debug", iwm->dbg.devdir);
448 result = PTR_ERR(iwm->dbg.dbgdir);
449 if (IS_ERR(iwm->dbg.dbgdir) && (result != -ENODEV)) {
450 IWM_ERR(iwm, "Couldn't create dbgdir: %d\n", result);
451 goto error;
452 }
453
454 iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir); 433 iwm->dbg.rxdir = debugfs_create_dir("rx", iwm->dbg.devdir);
455 result = PTR_ERR(iwm->dbg.rxdir);
456 if (IS_ERR(iwm->dbg.rxdir) && (result != -ENODEV)) {
457 IWM_ERR(iwm, "Couldn't create rx dir: %d\n", result);
458 goto error;
459 }
460
461 iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir); 434 iwm->dbg.txdir = debugfs_create_dir("tx", iwm->dbg.devdir);
462 result = PTR_ERR(iwm->dbg.txdir);
463 if (IS_ERR(iwm->dbg.txdir) && (result != -ENODEV)) {
464 IWM_ERR(iwm, "Couldn't create tx dir: %d\n", result);
465 goto error;
466 }
467
468 iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir); 435 iwm->dbg.busdir = debugfs_create_dir("bus", iwm->dbg.devdir);
469 result = PTR_ERR(iwm->dbg.busdir); 436 if (iwm->bus_ops->debugfs_init)
470 if (IS_ERR(iwm->dbg.busdir) && (result != -ENODEV)) { 437 iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
471 IWM_ERR(iwm, "Couldn't create bus dir: %d\n", result);
472 goto error;
473 }
474
475 if (iwm->bus_ops->debugfs_init) {
476 result = iwm->bus_ops->debugfs_init(iwm, iwm->dbg.busdir);
477 if (result < 0) {
478 IWM_ERR(iwm, "Couldn't create bus entry: %d\n", result);
479 goto error;
480 }
481 }
482
483 438
484 iwm->dbg.dbg_level = IWM_DL_NONE; 439 iwm->dbg.dbg_level = IWM_DL_NONE;
485 iwm->dbg.dbg_level_dentry = 440 iwm->dbg.dbg_level_dentry =
486 debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm, 441 debugfs_create_file("level", 0200, iwm->dbg.dbgdir, iwm,
487 &fops_iwm_dbg_level); 442 &fops_iwm_dbg_level);
488 result = PTR_ERR(iwm->dbg.dbg_level_dentry);
489 if (IS_ERR(iwm->dbg.dbg_level_dentry) && (result != -ENODEV)) {
490 IWM_ERR(iwm, "Couldn't create dbg_level: %d\n", result);
491 goto error;
492 }
493
494 443
495 iwm->dbg.dbg_modules = IWM_DM_DEFAULT; 444 iwm->dbg.dbg_modules = IWM_DM_DEFAULT;
496 iwm->dbg.dbg_modules_dentry = 445 iwm->dbg.dbg_modules_dentry =
497 debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm, 446 debugfs_create_file("modules", 0200, iwm->dbg.dbgdir, iwm,
498 &fops_iwm_dbg_modules); 447 &fops_iwm_dbg_modules);
499 result = PTR_ERR(iwm->dbg.dbg_modules_dentry);
500 if (IS_ERR(iwm->dbg.dbg_modules_dentry) && (result != -ENODEV)) {
501 IWM_ERR(iwm, "Couldn't create dbg_modules: %d\n", result);
502 goto error;
503 }
504 448
505 for (i = 0; i < __IWM_DM_NR; i++) 449 for (i = 0; i < __IWM_DM_NR; i++)
506 add_dbg_module(iwm->dbg, iwm_debug_module[i].name, 450 add_dbg_module(iwm->dbg, iwm_debug_module[i].name,
@@ -509,44 +453,15 @@ int iwm_debugfs_init(struct iwm_priv *iwm)
509 iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200, 453 iwm->dbg.txq_dentry = debugfs_create_file("queues", 0200,
510 iwm->dbg.txdir, iwm, 454 iwm->dbg.txdir, iwm,
511 &iwm_debugfs_txq_fops); 455 &iwm_debugfs_txq_fops);
512 result = PTR_ERR(iwm->dbg.txq_dentry);
513 if (IS_ERR(iwm->dbg.txq_dentry) && (result != -ENODEV)) {
514 IWM_ERR(iwm, "Couldn't create tx queue: %d\n", result);
515 goto error;
516 }
517
518 iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200, 456 iwm->dbg.tx_credit_dentry = debugfs_create_file("credits", 0200,
519 iwm->dbg.txdir, iwm, 457 iwm->dbg.txdir, iwm,
520 &iwm_debugfs_tx_credit_fops); 458 &iwm_debugfs_tx_credit_fops);
521 result = PTR_ERR(iwm->dbg.tx_credit_dentry);
522 if (IS_ERR(iwm->dbg.tx_credit_dentry) && (result != -ENODEV)) {
523 IWM_ERR(iwm, "Couldn't create tx credit: %d\n", result);
524 goto error;
525 }
526
527 iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200, 459 iwm->dbg.rx_ticket_dentry = debugfs_create_file("tickets", 0200,
528 iwm->dbg.rxdir, iwm, 460 iwm->dbg.rxdir, iwm,
529 &iwm_debugfs_rx_ticket_fops); 461 &iwm_debugfs_rx_ticket_fops);
530 result = PTR_ERR(iwm->dbg.rx_ticket_dentry);
531 if (IS_ERR(iwm->dbg.rx_ticket_dentry) && (result != -ENODEV)) {
532 IWM_ERR(iwm, "Couldn't create rx ticket: %d\n", result);
533 goto error;
534 }
535
536 iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200, 462 iwm->dbg.fw_err_dentry = debugfs_create_file("last_fw_err", 0200,
537 iwm->dbg.dbgdir, iwm, 463 iwm->dbg.dbgdir, iwm,
538 &iwm_debugfs_fw_err_fops); 464 &iwm_debugfs_fw_err_fops);
539 result = PTR_ERR(iwm->dbg.fw_err_dentry);
540 if (IS_ERR(iwm->dbg.fw_err_dentry) && (result != -ENODEV)) {
541 IWM_ERR(iwm, "Couldn't create last FW err: %d\n", result);
542 goto error;
543 }
544
545
546 return 0;
547
548 error:
549 return result;
550} 465}
551 466
552void iwm_debugfs_exit(struct iwm_priv *iwm) 467void iwm_debugfs_exit(struct iwm_priv *iwm)
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 229de990379c..9531b18cf72a 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -105,6 +105,7 @@
105#include "hal.h" 105#include "hal.h"
106#include "umac.h" 106#include "umac.h"
107#include "debug.h" 107#include "debug.h"
108#include "trace.h"
108 109
109static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm, 110static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
110 struct iwm_nonwifi_cmd *cmd, 111 struct iwm_nonwifi_cmd *cmd,
@@ -207,9 +208,9 @@ void iwm_cmd_flush(struct iwm_priv *iwm)
207 208
208struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num) 209struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
209{ 210{
210 struct iwm_wifi_cmd *cmd, *next; 211 struct iwm_wifi_cmd *cmd;
211 212
212 list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending) 213 list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
213 if (cmd->seq_num == seq_num) { 214 if (cmd->seq_num == seq_num) {
214 list_del(&cmd->pending); 215 list_del(&cmd->pending);
215 return cmd; 216 return cmd;
@@ -218,12 +219,12 @@ struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
218 return NULL; 219 return NULL;
219} 220}
220 221
221struct iwm_nonwifi_cmd * 222struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
222iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode) 223 u8 seq_num, u8 cmd_opcode)
223{ 224{
224 struct iwm_nonwifi_cmd *cmd, *next; 225 struct iwm_nonwifi_cmd *cmd;
225 226
226 list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) 227 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
227 if ((cmd->seq_num == seq_num) && 228 if ((cmd->seq_num == seq_num) &&
228 (cmd->udma_cmd.opcode == cmd_opcode) && 229 (cmd->udma_cmd.opcode == cmd_opcode) &&
229 (cmd->resp_received)) { 230 (cmd->resp_received)) {
@@ -277,6 +278,7 @@ static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
277 udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr, 278 udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
278 udma_cmd->op1_sz, udma_cmd->op2); 279 udma_cmd->op1_sz, udma_cmd->op2);
279 280
281 trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
280 return iwm_bus_send_chunk(iwm, buf->start, buf->len); 282 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
281} 283}
282 284
@@ -363,6 +365,7 @@ static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
363 return ret; 365 return ret;
364 } 366 }
365 367
368 trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
366 return iwm_bus_send_chunk(iwm, buf->start, buf->len); 369 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
367} 370}
368 371
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
index 0adfdc85765d..c20936d9b6b7 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -75,7 +75,8 @@ do { \
75 75
76 76
77/* UDMA IN OP CODE -- cmd bits [3:0] */ 77/* UDMA IN OP CODE -- cmd bits [3:0] */
78#define UDMA_IN_OPCODE_MASK 0xF 78#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
79#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
79 80
80#define UDMA_IN_OPCODE_GENERAL_RESP 0x0 81#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
81#define UDMA_IN_OPCODE_READ_RESP 0x1 82#define UDMA_IN_OPCODE_READ_RESP 0x1
@@ -130,7 +131,7 @@ do { \
130#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \ 131#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
131 IWM_MAX_WIFI_HEADERS_SIZE) 132 IWM_MAX_WIFI_HEADERS_SIZE)
132 133
133#define IWM_HAL_CONCATENATE_BUF_SIZE 8192 134#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
134 135
135struct iwm_wifi_cmd_buff { 136struct iwm_wifi_cmd_buff {
136 u16 len; 137 u16 len;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 79ffa3b98d73..13266c3842f8 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -48,6 +48,7 @@
48#include "umac.h" 48#include "umac.h"
49#include "lmac.h" 49#include "lmac.h"
50#include "eeprom.h" 50#include "eeprom.h"
51#include "trace.h"
51 52
52#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation" 53#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
53#define IWM_AUTHOR "<ilw@linux.intel.com>" 54#define IWM_AUTHOR "<ilw@linux.intel.com>"
@@ -268,7 +269,9 @@ struct iwm_priv {
268 269
269 struct sk_buff_head rx_list; 270 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 271 struct list_head rx_tickets;
272 spinlock_t ticket_lock;
271 struct list_head rx_packets[IWM_RX_ID_HASH]; 273 struct list_head rx_packets[IWM_RX_ID_HASH];
274 spinlock_t packet_lock[IWM_RX_ID_HASH];
272 struct workqueue_struct *rx_wq; 275 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 276 struct work_struct rx_worker;
274 277
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 23856d359e12..362002735b12 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -277,8 +277,11 @@ int iwm_priv_init(struct iwm_priv *iwm)
277 277
278 skb_queue_head_init(&iwm->rx_list); 278 skb_queue_head_init(&iwm->rx_list);
279 INIT_LIST_HEAD(&iwm->rx_tickets); 279 INIT_LIST_HEAD(&iwm->rx_tickets);
280 for (i = 0; i < IWM_RX_ID_HASH; i++) 280 spin_lock_init(&iwm->ticket_lock);
281 for (i = 0; i < IWM_RX_ID_HASH; i++) {
281 INIT_LIST_HEAD(&iwm->rx_packets[i]); 282 INIT_LIST_HEAD(&iwm->rx_packets[i]);
283 spin_lock_init(&iwm->packet_lock[i]);
284 }
282 285
283 INIT_WORK(&iwm->rx_worker, iwm_rx_worker); 286 INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
284 287
@@ -424,9 +427,9 @@ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
424static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd, 427static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
425 u8 source) 428 u8 source)
426{ 429{
427 struct iwm_notif *notif, *next; 430 struct iwm_notif *notif;
428 431
429 list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) { 432 list_for_each_entry(notif, &iwm->pending_notif, pending) {
430 if ((notif->cmd_id == cmd) && (notif->src == source)) { 433 if ((notif->cmd_id == cmd) && (notif->src == source)) {
431 list_del(&notif->pending); 434 list_del(&notif->pending);
432 return notif; 435 return notif;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 3257d4fad835..e1184deca559 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -343,15 +343,17 @@ static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
343static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id) 343static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
344{ 344{
345 u8 id_hash = IWM_RX_ID_GET_HASH(id); 345 u8 id_hash = IWM_RX_ID_GET_HASH(id);
346 struct list_head *packet_list; 346 struct iwm_rx_packet *packet;
347 struct iwm_rx_packet *packet, *next;
348
349 packet_list = &iwm->rx_packets[id_hash];
350 347
351 list_for_each_entry_safe(packet, next, packet_list, node) 348 spin_lock(&iwm->packet_lock[id_hash]);
352 if (packet->id == id) 349 list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
350 if (packet->id == id) {
351 list_del(&packet->node);
352 spin_unlock(&iwm->packet_lock[id_hash]);
353 return packet; 353 return packet;
354 }
354 355
356 spin_unlock(&iwm->packet_lock[id_hash]);
355 return NULL; 357 return NULL;
356} 358}
357 359
@@ -389,18 +391,22 @@ void iwm_rx_free(struct iwm_priv *iwm)
389 struct iwm_rx_packet *packet, *np; 391 struct iwm_rx_packet *packet, *np;
390 int i; 392 int i;
391 393
394 spin_lock(&iwm->ticket_lock);
392 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) { 395 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
393 list_del(&ticket->node); 396 list_del(&ticket->node);
394 iwm_rx_ticket_node_free(ticket); 397 iwm_rx_ticket_node_free(ticket);
395 } 398 }
399 spin_unlock(&iwm->ticket_lock);
396 400
397 for (i = 0; i < IWM_RX_ID_HASH; i++) { 401 for (i = 0; i < IWM_RX_ID_HASH; i++) {
402 spin_lock(&iwm->packet_lock[i]);
398 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i], 403 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
399 node) { 404 node) {
400 list_del(&packet->node); 405 list_del(&packet->node);
401 kfree_skb(packet->skb); 406 kfree_skb(packet->skb);
402 kfree(packet); 407 kfree(packet);
403 } 408 }
409 spin_unlock(&iwm->packet_lock[i]);
404 } 410 }
405} 411}
406 412
@@ -425,10 +431,13 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
425 return PTR_ERR(ticket_node); 431 return PTR_ERR(ticket_node);
426 432
427 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n", 433 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
428 ticket->action == IWM_RX_TICKET_RELEASE ? 434 __le16_to_cpu(ticket->action) ==
435 IWM_RX_TICKET_RELEASE ?
429 "RELEASE" : "DROP", 436 "RELEASE" : "DROP",
430 ticket->id); 437 ticket->id);
438 spin_lock(&iwm->ticket_lock);
431 list_add_tail(&ticket_node->node, &iwm->rx_tickets); 439 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
440 spin_unlock(&iwm->ticket_lock);
432 441
433 /* 442 /*
434 * We received an Rx ticket, most likely there's 443 * We received an Rx ticket, most likely there's
@@ -461,6 +470,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
461 struct iwm_rx_packet *packet; 470 struct iwm_rx_packet *packet;
462 u16 id, buf_offset; 471 u16 id, buf_offset;
463 u32 packet_size; 472 u32 packet_size;
473 u8 id_hash;
464 474
465 IWM_DBG_RX(iwm, DBG, "\n"); 475 IWM_DBG_RX(iwm, DBG, "\n");
466 476
@@ -478,7 +488,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
478 if (IS_ERR(packet)) 488 if (IS_ERR(packet))
479 return PTR_ERR(packet); 489 return PTR_ERR(packet);
480 490
481 list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]); 491 id_hash = IWM_RX_ID_GET_HASH(id);
492 spin_lock(&iwm->packet_lock[id_hash]);
493 list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
494 spin_unlock(&iwm->packet_lock[id_hash]);
482 495
483 /* We might (unlikely) have received the packet _after_ the ticket */ 496 /* We might (unlikely) have received the packet _after_ the ticket */
484 queue_work(iwm->rx_wq, &iwm->rx_worker); 497 queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -519,6 +532,8 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
519 unsigned long buf_size, 532 unsigned long buf_size,
520 struct iwm_wifi_cmd *cmd) 533 struct iwm_wifi_cmd *cmd)
521{ 534{
535 struct wiphy *wiphy = iwm_to_wiphy(iwm);
536 struct ieee80211_channel *chan;
522 struct iwm_umac_notif_assoc_complete *complete = 537 struct iwm_umac_notif_assoc_complete *complete =
523 (struct iwm_umac_notif_assoc_complete *)buf; 538 (struct iwm_umac_notif_assoc_complete *)buf;
524 539
@@ -527,6 +542,18 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
527 542
528 switch (le32_to_cpu(complete->status)) { 543 switch (le32_to_cpu(complete->status)) {
529 case UMAC_ASSOC_COMPLETE_SUCCESS: 544 case UMAC_ASSOC_COMPLETE_SUCCESS:
545 chan = ieee80211_get_channel(wiphy,
546 ieee80211_channel_to_frequency(complete->channel));
547 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
548 /* Associated to a unallowed channel, disassociate. */
549 __iwm_invalidate_mlme_profile(iwm);
550 IWM_WARN(iwm, "Couldn't associate with %pM due to "
551 "channel %d is disabled. Check your local "
552 "regulatory setting.\n",
553 complete->bssid, complete->channel);
554 goto failure;
555 }
556
530 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status); 557 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
531 memcpy(iwm->bssid, complete->bssid, ETH_ALEN); 558 memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
532 iwm->channel = complete->channel; 559 iwm->channel = complete->channel;
@@ -563,6 +590,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
563 GFP_KERNEL); 590 GFP_KERNEL);
564 break; 591 break;
565 case UMAC_ASSOC_COMPLETE_FAILURE: 592 case UMAC_ASSOC_COMPLETE_FAILURE:
593 failure:
566 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); 594 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
567 memset(iwm->bssid, 0, ETH_ALEN); 595 memset(iwm->bssid, 0, ETH_ALEN);
568 iwm->channel = 0; 596 iwm->channel = 0;
@@ -757,7 +785,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
757 (struct iwm_umac_notif_bss_info *)buf; 785 (struct iwm_umac_notif_bss_info *)buf;
758 struct ieee80211_channel *channel; 786 struct ieee80211_channel *channel;
759 struct ieee80211_supported_band *band; 787 struct ieee80211_supported_band *band;
760 struct iwm_bss_info *bss, *next; 788 struct iwm_bss_info *bss;
761 s32 signal; 789 s32 signal;
762 int freq; 790 int freq;
763 u16 frame_len = le16_to_cpu(umac_bss->frame_len); 791 u16 frame_len = le16_to_cpu(umac_bss->frame_len);
@@ -776,7 +804,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
776 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi); 804 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
777 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len); 805 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
778 806
779 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) 807 list_for_each_entry(bss, &iwm->bss_list, node)
780 if (bss->bss->table_idx == umac_bss->table_idx) 808 if (bss->bss->table_idx == umac_bss->table_idx)
781 break; 809 break;
782 810
@@ -843,16 +871,15 @@ static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
843 int i; 871 int i;
844 872
845 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) { 873 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
846 table_idx = (le16_to_cpu(bss_rm->entries[i]) 874 table_idx = le16_to_cpu(bss_rm->entries[i]) &
847 & IWM_BSS_REMOVE_INDEX_MSK); 875 IWM_BSS_REMOVE_INDEX_MSK;
848 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) 876 list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
849 if (bss->bss->table_idx == cpu_to_le16(table_idx)) { 877 if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
850 struct ieee80211_mgmt *mgmt; 878 struct ieee80211_mgmt *mgmt;
851 879
852 mgmt = (struct ieee80211_mgmt *) 880 mgmt = (struct ieee80211_mgmt *)
853 (bss->bss->frame_buf); 881 (bss->bss->frame_buf);
854 IWM_DBG_MLME(iwm, ERR, 882 IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
855 "BSS removed: %pM\n",
856 mgmt->bssid); 883 mgmt->bssid);
857 list_del(&bss->node); 884 list_del(&bss->node);
858 kfree(bss->bss); 885 kfree(bss->bss);
@@ -1224,18 +1251,24 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1224 u8 source, cmd_id; 1251 u8 source, cmd_id;
1225 u16 seq_num; 1252 u16 seq_num;
1226 u32 count; 1253 u32 count;
1227 u8 resp;
1228 1254
1229 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; 1255 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1230 cmd_id = wifi_hdr->sw_hdr.cmd.cmd; 1256 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1231
1232 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); 1257 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1233 if (source >= IWM_SRC_NUM) { 1258 if (source >= IWM_SRC_NUM) {
1234 IWM_CRIT(iwm, "invalid source %d\n", source); 1259 IWM_CRIT(iwm, "invalid source %d\n", source);
1235 return -EINVAL; 1260 return -EINVAL;
1236 } 1261 }
1237 1262
1238 count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT)); 1263 if (cmd_id == REPLY_RX_MPDU_CMD)
1264 trace_iwm_rx_packet(iwm, buf, buf_size);
1265 else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
1266 (source == UMAC_HDI_IN_SOURCE_FW))
1267 trace_iwm_rx_ticket(iwm, buf, buf_size);
1268 else
1269 trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
1270
1271 count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
1239 count += sizeof(struct iwm_umac_wifi_in_hdr) - 1272 count += sizeof(struct iwm_umac_wifi_in_hdr) -
1240 sizeof(struct iwm_dev_cmd_hdr); 1273 sizeof(struct iwm_dev_cmd_hdr);
1241 if (count > buf_size) { 1274 if (count > buf_size) {
@@ -1243,8 +1276,6 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1243 return -EINVAL; 1276 return -EINVAL;
1244 } 1277 }
1245 1278
1246 resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
1247
1248 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); 1279 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
1249 1280
1250 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n", 1281 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
@@ -1317,8 +1348,9 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1317{ 1348{
1318 u8 seq_num; 1349 u8 seq_num;
1319 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf; 1350 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
1320 struct iwm_nonwifi_cmd *cmd, *next; 1351 struct iwm_nonwifi_cmd *cmd;
1321 1352
1353 trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
1322 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM); 1354 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
1323 1355
1324 /* 1356 /*
@@ -1329,7 +1361,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1329 * That means we only support synchronised non wifi command response 1361 * That means we only support synchronised non wifi command response
1330 * schemes. 1362 * schemes.
1331 */ 1363 */
1332 list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) 1364 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
1333 if (cmd->seq_num == seq_num) { 1365 if (cmd->seq_num == seq_num) {
1334 cmd->resp_received = 1; 1366 cmd->resp_received = 1;
1335 cmd->buf.len = buf_size; 1367 cmd->buf.len = buf_size;
@@ -1648,6 +1680,7 @@ void iwm_rx_worker(struct work_struct *work)
1648 * We stop whenever a ticket is missing its packet, as we're 1680 * We stop whenever a ticket is missing its packet, as we're
1649 * supposed to send the packets in order. 1681 * supposed to send the packets in order.
1650 */ 1682 */
1683 spin_lock(&iwm->ticket_lock);
1651 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { 1684 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
1652 struct iwm_rx_packet *packet = 1685 struct iwm_rx_packet *packet =
1653 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id)); 1686 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1656,12 +1689,12 @@ void iwm_rx_worker(struct work_struct *work)
1656 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d " 1689 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
1657 "to be handled first\n", 1690 "to be handled first\n",
1658 le16_to_cpu(ticket->ticket->id)); 1691 le16_to_cpu(ticket->ticket->id));
1659 return; 1692 break;
1660 } 1693 }
1661 1694
1662 list_del(&ticket->node); 1695 list_del(&ticket->node);
1663 list_del(&packet->node);
1664 iwm_rx_process_packet(iwm, packet, ticket); 1696 iwm_rx_process_packet(iwm, packet, ticket);
1665 } 1697 }
1698 spin_unlock(&iwm->ticket_lock);
1666} 1699}
1667 1700
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 1eafd6dec3fd..edcb52330cf5 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -366,21 +366,13 @@ static const struct file_operations iwm_debugfs_sdio_fops = {
366 .read = iwm_debugfs_sdio_read, 366 .read = iwm_debugfs_sdio_read,
367}; 367};
368 368
369static int if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir) 369static void if_sdio_debugfs_init(struct iwm_priv *iwm, struct dentry *parent_dir)
370{ 370{
371 int result;
372 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); 371 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
373 372
374 hw->cccr_dentry = debugfs_create_file("cccr", 0200, 373 hw->cccr_dentry = debugfs_create_file("cccr", 0200,
375 parent_dir, iwm, 374 parent_dir, iwm,
376 &iwm_debugfs_sdio_fops); 375 &iwm_debugfs_sdio_fops);
377 result = PTR_ERR(hw->cccr_dentry);
378 if (IS_ERR(hw->cccr_dentry) && (result != -ENODEV)) {
379 IWM_ERR(iwm, "Couldn't create CCCR entry: %d\n", result);
380 return result;
381 }
382
383 return 0;
384} 376}
385 377
386static void if_sdio_debugfs_exit(struct iwm_priv *iwm) 378static void if_sdio_debugfs_exit(struct iwm_priv *iwm)
@@ -440,11 +432,7 @@ static int iwm_sdio_probe(struct sdio_func *func,
440 hw = iwm_private(iwm); 432 hw = iwm_private(iwm);
441 hw->iwm = iwm; 433 hw->iwm = iwm;
442 434
443 ret = iwm_debugfs_init(iwm); 435 iwm_debugfs_init(iwm);
444 if (ret < 0) {
445 IWM_ERR(iwm, "Debugfs registration failed\n");
446 goto if_free;
447 }
448 436
449 sdio_set_drvdata(func, hw); 437 sdio_set_drvdata(func, hw);
450 438
@@ -473,7 +461,6 @@ static int iwm_sdio_probe(struct sdio_func *func,
473 destroy_workqueue(hw->isr_wq); 461 destroy_workqueue(hw->isr_wq);
474 debugfs_exit: 462 debugfs_exit:
475 iwm_debugfs_exit(iwm); 463 iwm_debugfs_exit(iwm);
476 if_free:
477 iwm_if_free(iwm); 464 iwm_if_free(iwm);
478 return ret; 465 return ret;
479} 466}
@@ -492,8 +479,6 @@ static void iwm_sdio_remove(struct sdio_func *func)
492 sdio_set_drvdata(func, NULL); 479 sdio_set_drvdata(func, NULL);
493 480
494 dev_info(dev, "IWM SDIO remove\n"); 481 dev_info(dev, "IWM SDIO remove\n");
495
496 return;
497} 482}
498 483
499static const struct sdio_device_id iwm_sdio_ids[] = { 484static const struct sdio_device_id iwm_sdio_ids[] = {
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
new file mode 100644
index 000000000000..904d36f22311
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.c
@@ -0,0 +1,3 @@
1#include "iwm.h"
2#define CREATE_TRACE_POINTS
3#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
new file mode 100644
index 000000000000..abb4805fa8df
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -0,0 +1,283 @@
1#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWM_TRACE_H__
3
4#include <linux/tracepoint.h>
5
6#if !defined(CONFIG_IWM_TRACING)
7#undef TRACE_EVENT
8#define TRACE_EVENT(name, proto, ...) \
9static inline void trace_ ## name(proto) {}
10#endif
11
12#undef TRACE_SYSTEM
13#define TRACE_SYSTEM iwm
14
15#define IWM_ENTRY __array(char, ndev_name, 16)
16#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
17#define IWM_PR_FMT "%s"
18#define IWM_PR_ARG __entry->ndev_name
19
20TRACE_EVENT(iwm_tx_nonwifi_cmd,
21 TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
22
23 TP_ARGS(iwm, hdr),
24
25 TP_STRUCT__entry(
26 IWM_ENTRY
27 __field(u8, opcode)
28 __field(u8, resp)
29 __field(u8, eot)
30 __field(u8, hw)
31 __field(u16, seq)
32 __field(u32, addr)
33 __field(u32, op1)
34 __field(u32, op2)
35 ),
36
37 TP_fast_assign(
38 IWM_ASSIGN;
39 __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
40 __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
41 __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
42 __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
43 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
44 __entry->addr = le32_to_cpu(hdr->addr);
45 __entry->op1 = le32_to_cpu(hdr->op1_sz);
46 __entry->op2 = le32_to_cpu(hdr->op2);
47 ),
48
49 TP_printk(
50 IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
51 "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
52 IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
53 __entry->hw, __entry->seq, __entry->addr, __entry->op1,
54 __entry->op2
55 )
56);
57
58TRACE_EVENT(iwm_tx_wifi_cmd,
59 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
60
61 TP_ARGS(iwm, hdr),
62
63 TP_STRUCT__entry(
64 IWM_ENTRY
65 __field(u8, opcode)
66 __field(u8, lmac)
67 __field(u8, resp)
68 __field(u8, eot)
69 __field(u8, ra_tid)
70 __field(u8, credit_group)
71 __field(u8, color)
72 __field(u16, seq)
73 ),
74
75 TP_fast_assign(
76 IWM_ASSIGN;
77 __entry->opcode = hdr->sw_hdr.cmd.cmd;
78 __entry->lmac = 0;
79 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
80 __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
81 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
82 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
83 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
84 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
85 if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
86 __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
87 __entry->lmac = 1;
88 __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
89 }
90 ),
91
92 TP_printk(
93 IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
94 "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
95 IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
96 __entry->resp, __entry->eot, __entry->seq, __entry->color,
97 __entry->ra_tid, __entry->credit_group
98 )
99);
100
101TRACE_EVENT(iwm_tx_packets,
102 TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
103
104 TP_ARGS(iwm, buf, len),
105
106 TP_STRUCT__entry(
107 IWM_ENTRY
108 __field(u8, eot)
109 __field(u8, ra_tid)
110 __field(u8, credit_group)
111 __field(u8, color)
112 __field(u16, seq)
113 __field(u8, npkt)
114 __field(u32, bytes)
115 ),
116
117 TP_fast_assign(
118 struct iwm_umac_wifi_out_hdr *hdr =
119 (struct iwm_umac_wifi_out_hdr *)buf;
120
121 IWM_ASSIGN;
122 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
123 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
124 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
125 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
126 __entry->seq = __le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
127 __entry->npkt = 1;
128 __entry->bytes = len;
129
130 if (!__entry->eot) {
131 int count;
132 u8 *ptr = buf;
133
134 __entry->npkt = 0;
135 while (ptr < buf + len) {
136 count = GET_VAL32(hdr->sw_hdr.meta_data,
137 UMAC_FW_CMD_BYTE_COUNT);
138 ptr += ALIGN(sizeof(*hdr) + count, 16);
139 hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
140 __entry->npkt++;
141 }
142 }
143 ),
144
145 TP_printk(
146 IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
147 "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
148 IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
149 __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
150 __entry->credit_group, __entry->npkt, __entry->bytes
151 )
152);
153
154TRACE_EVENT(iwm_rx_nonwifi_cmd,
155 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
156
157 TP_ARGS(iwm, buf, len),
158
159 TP_STRUCT__entry(
160 IWM_ENTRY
161 __field(u8, opcode)
162 __field(u16, seq)
163 __field(u32, len)
164 ),
165
166 TP_fast_assign(
167 struct iwm_udma_in_hdr *hdr = buf;
168
169 IWM_ASSIGN;
170 __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
171 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
172 __entry->len = len;
173 ),
174
175 TP_printk(
176 IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
177 IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
178 )
179);
180
181TRACE_EVENT(iwm_rx_wifi_cmd,
182 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
183
184 TP_ARGS(iwm, hdr),
185
186 TP_STRUCT__entry(
187 IWM_ENTRY
188 __field(u8, cmd)
189 __field(u8, source)
190 __field(u16, seq)
191 __field(u32, count)
192 ),
193
194 TP_fast_assign(
195 IWM_ASSIGN;
196 __entry->cmd = hdr->sw_hdr.cmd.cmd;
197 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
198 __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
199 __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
200 ),
201
202 TP_printk(
203 IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
204 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
205 __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
206 __entry->cmd, __entry->seq, __entry->count
207 )
208);
209
210#define iwm_ticket_action_symbol \
211 { IWM_RX_TICKET_DROP, "DROP" }, \
212 { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
213 { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
214 { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
215
216TRACE_EVENT(iwm_rx_ticket,
217 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
218
219 TP_ARGS(iwm, buf, len),
220
221 TP_STRUCT__entry(
222 IWM_ENTRY
223 __field(u8, action)
224 __field(u8, reason)
225 __field(u16, id)
226 __field(u16, flags)
227 ),
228
229 TP_fast_assign(
230 struct iwm_rx_ticket *ticket =
231 ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
232
233 IWM_ASSIGN;
234 __entry->id = le16_to_cpu(ticket->id);
235 __entry->action = le16_to_cpu(ticket->action);
236 __entry->flags = le16_to_cpu(ticket->flags);
237 __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
238 ),
239
240 TP_printk(
241 IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
242 IWM_PR_ARG, __entry->id,
243 __print_symbolic(__entry->action, iwm_ticket_action_symbol),
244 __entry->reason ? "reason" : "flags",
245 __entry->reason ? __entry->reason : __entry->flags,
246 __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
247 )
248);
249
250TRACE_EVENT(iwm_rx_packet,
251 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
252
253 TP_ARGS(iwm, buf, len),
254
255 TP_STRUCT__entry(
256 IWM_ENTRY
257 __field(u8, source)
258 __field(u16, id)
259 __field(u32, len)
260 ),
261
262 TP_fast_assign(
263 struct iwm_umac_wifi_in_hdr *hdr = buf;
264
265 IWM_ASSIGN;
266 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
267 __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
268 __entry->len = len - sizeof(*hdr);
269 ),
270
271 TP_printk(
272 IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
273 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
274 "LMAC" : "UMAC", __entry->id, __entry->len
275 )
276);
277#endif
278
279#undef TRACE_INCLUDE_PATH
280#define TRACE_INCLUDE_PATH .
281#undef TRACE_INCLUDE_FILE
282#define TRACE_INCLUDE_FILE trace
283#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index f6a02f123f31..3216621fc55a 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -302,8 +302,8 @@ void iwm_tx_credit_init_pools(struct iwm_priv *iwm,
302 302
303#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr) 303#define IWM_UDMA_HDR_LEN sizeof(struct iwm_umac_wifi_out_hdr)
304 304
305static int iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb, 305static __le16 iwm_tx_build_packet(struct iwm_priv *iwm, struct sk_buff *skb,
306 int pool_id, u8 *buf) 306 int pool_id, u8 *buf)
307{ 307{
308 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf; 308 struct iwm_umac_wifi_out_hdr *hdr = (struct iwm_umac_wifi_out_hdr *)buf;
309 struct iwm_udma_wifi_cmd udma_cmd; 309 struct iwm_udma_wifi_cmd udma_cmd;
@@ -347,6 +347,7 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
347 /* mark EOP for the last packet */ 347 /* mark EOP for the last packet */
348 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1); 348 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
349 349
350 trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
350 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count); 351 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
351 352
352 txq->concat_count = 0; 353 txq->concat_count = 0;
@@ -451,7 +452,6 @@ void iwm_tx_worker(struct work_struct *work)
451int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 452int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
452{ 453{
453 struct iwm_priv *iwm = ndev_to_iwm(netdev); 454 struct iwm_priv *iwm = ndev_to_iwm(netdev);
454 struct net_device *ndev = iwm_to_ndev(iwm);
455 struct wireless_dev *wdev = iwm_to_wdev(iwm); 455 struct wireless_dev *wdev = iwm_to_wdev(iwm);
456 struct iwm_tx_info *tx_info; 456 struct iwm_tx_info *tx_info;
457 struct iwm_tx_queue *txq; 457 struct iwm_tx_queue *txq;
@@ -518,12 +518,12 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
518 518
519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); 519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
520 520
521 ndev->stats.tx_packets++; 521 netdev->stats.tx_packets++;
522 ndev->stats.tx_bytes += skb->len; 522 netdev->stats.tx_bytes += skb->len;
523 return NETDEV_TX_OK; 523 return NETDEV_TX_OK;
524 524
525 drop: 525 drop:
526 ndev->stats.tx_dropped++; 526 netdev->stats.tx_dropped++;
527 dev_kfree_skb_any(skb); 527 dev_kfree_skb_any(skb);
528 return NETDEV_TX_OK; 528 return NETDEV_TX_OK;
529} 529}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 7f54a145ca65..0cbba3ecc813 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -362,7 +362,7 @@ struct iwm_udma_out_wifi_hdr {
362#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4 362#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
363#define IWM_RX_TICKET_AMSDU_MSK 0x8 363#define IWM_RX_TICKET_AMSDU_MSK 0x8
364#define IWM_RX_TICKET_DROP_REASON_POS 4 364#define IWM_RX_TICKET_DROP_REASON_POS 4
365#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS) 365#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
366 366
367#define IWM_RX_DROP_NO_DROP 0x0 367#define IWM_RX_DROP_NO_DROP 0x0
368#define IWM_RX_DROP_BAD_CRC 0x1 368#define IWM_RX_DROP_BAD_CRC 0x1
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 12a2ef9dacea..aa06070e5eab 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -32,6 +32,9 @@ u8 lbs_bg_rates[MAX_RATES] =
320x00, 0x00 }; 320x00, 0x00 };
33 33
34 34
35static int assoc_helper_wep_keys(struct lbs_private *priv,
36 struct assoc_request *assoc_req);
37
35/** 38/**
36 * @brief This function finds common rates between rates and card rates. 39 * @brief This function finds common rates between rates and card rates.
37 * 40 *
@@ -611,7 +614,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
611 614
612 if (status_code) { 615 if (status_code) {
613 lbs_mac_event_disconnected(priv); 616 lbs_mac_event_disconnected(priv);
614 ret = -1; 617 ret = status_code;
615 goto done; 618 goto done;
616 } 619 }
617 620
@@ -814,7 +817,24 @@ static int lbs_try_associate(struct lbs_private *priv,
814 goto out; 817 goto out;
815 818
816 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE); 819 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
820 /* If the association fails with current auth mode, let's
821 * try by changing the auth mode
822 */
823 if ((priv->authtype_auto) &&
824 (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
825 (assoc_req->secinfo.wep_enabled) &&
826 (priv->connect_status != LBS_CONNECTED)) {
827 if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
828 priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
829 else
830 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
831 if (!assoc_helper_wep_keys(priv, assoc_req))
832 ret = lbs_associate(priv, assoc_req,
833 CMD_802_11_ASSOCIATE);
834 }
817 835
836 if (ret)
837 ret = -1;
818out: 838out:
819 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 839 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
820 return ret; 840 return ret;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index ce7bec402a33..9d5d3ccf08c8 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -79,6 +79,7 @@ static const u32 cipher_suites[] = {
79 79
80 80
81static int lbs_cfg_set_channel(struct wiphy *wiphy, 81static int lbs_cfg_set_channel(struct wiphy *wiphy,
82 struct net_device *netdev,
82 struct ieee80211_channel *chan, 83 struct ieee80211_channel *chan,
83 enum nl80211_channel_type channel_type) 84 enum nl80211_channel_type channel_type)
84{ 85{
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a48ccaffb288..de2caac11dd6 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
75 return -ENOMEM; 75 return -ENOMEM;
76 76
77 pos += snprintf(buf+pos, len-pos, 77 pos += snprintf(buf+pos, len-pos,
78 "# | ch | rssi | bssid | cap | Qual | SSID \n"); 78 "# | ch | rssi | bssid | cap | Qual | SSID\n");
79 79
80 mutex_lock(&priv->lock); 80 mutex_lock(&priv->lock);
81 list_for_each_entry (iter_bss, &priv->network_list, list) { 81 list_for_each_entry (iter_bss, &priv->network_list, list) {
@@ -757,15 +757,12 @@ void lbs_debugfs_init(void)
757{ 757{
758 if (!lbs_dir) 758 if (!lbs_dir)
759 lbs_dir = debugfs_create_dir("lbs_wireless", NULL); 759 lbs_dir = debugfs_create_dir("lbs_wireless", NULL);
760
761 return;
762} 760}
763 761
764void lbs_debugfs_remove(void) 762void lbs_debugfs_remove(void)
765{ 763{
766 if (lbs_dir) 764 if (lbs_dir)
767 debugfs_remove(lbs_dir); 765 debugfs_remove(lbs_dir);
768 return;
769} 766}
770 767
771void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev) 768void lbs_debugfs_init_one(struct lbs_private *priv, struct net_device *dev)
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6875e1498bd5..a54880e4ad2b 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -134,6 +134,7 @@ struct lbs_private {
134 u8 wpa_ie_len; 134 u8 wpa_ie_len;
135 u16 wep_tx_keyidx; 135 u16 wep_tx_keyidx;
136 struct enc_key wep_keys[4]; 136 struct enc_key wep_keys[4];
137 u8 authtype_auto;
137 138
138 /* Wake On LAN */ 139 /* Wake On LAN */
139 uint32_t wol_criteria; 140 uint32_t wol_criteria;
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 6d55439a7b97..08e4e3908003 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -777,7 +777,7 @@ static void if_cs_release(struct pcmcia_device *p_dev)
777 777
778 lbs_deb_enter(LBS_DEB_CS); 778 lbs_deb_enter(LBS_DEB_CS);
779 779
780 free_irq(p_dev->irq.AssignedIRQ, card); 780 free_irq(p_dev->irq, card);
781 pcmcia_disable_device(p_dev); 781 pcmcia_disable_device(p_dev);
782 if (card->iobase) 782 if (card->iobase)
783 ioport_unmap(card->iobase); 783 ioport_unmap(card->iobase);
@@ -807,8 +807,7 @@ static int if_cs_ioprobe(struct pcmcia_device *p_dev,
807 p_dev->io.NumPorts1 = cfg->io.win[0].len; 807 p_dev->io.NumPorts1 = cfg->io.win[0].len;
808 808
809 /* Do we need to allocate an interrupt? */ 809 /* Do we need to allocate an interrupt? */
810 if (cfg->irq.IRQInfo1) 810 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
811 p_dev->conf.Attributes |= CONF_ENABLE_IRQ;
812 811
813 /* IO window settings */ 812 /* IO window settings */
814 if (cfg->io.nwin != 1) { 813 if (cfg->io.nwin != 1) {
@@ -837,9 +836,6 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
837 card->p_dev = p_dev; 836 card->p_dev = p_dev;
838 p_dev->priv = card; 837 p_dev->priv = card;
839 838
840 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
841 p_dev->irq.Handler = NULL;
842
843 p_dev->conf.Attributes = 0; 839 p_dev->conf.Attributes = 0;
844 p_dev->conf.IntType = INT_MEMORY_AND_IO; 840 p_dev->conf.IntType = INT_MEMORY_AND_IO;
845 841
@@ -854,13 +850,8 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
854 * a handler to the interrupt, unless the 'Handler' member of 850 * a handler to the interrupt, unless the 'Handler' member of
855 * the irq structure is initialized. 851 * the irq structure is initialized.
856 */ 852 */
857 if (p_dev->conf.Attributes & CONF_ENABLE_IRQ) { 853 if (!p_dev->irq)
858 ret = pcmcia_request_irq(p_dev, &p_dev->irq); 854 goto out1;
859 if (ret) {
860 lbs_pr_err("error in pcmcia_request_irq\n");
861 goto out1;
862 }
863 }
864 855
865 /* Initialize io access */ 856 /* Initialize io access */
866 card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1); 857 card->iobase = ioport_map(p_dev->io.BasePort1, p_dev->io.NumPorts1);
@@ -883,7 +874,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
883 874
884 /* Finally, report what we've done */ 875 /* Finally, report what we've done */
885 lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n", 876 lbs_deb_cs("irq %d, io 0x%04x-0x%04x\n",
886 p_dev->irq.AssignedIRQ, p_dev->io.BasePort1, 877 p_dev->irq, p_dev->io.BasePort1,
887 p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1); 878 p_dev->io.BasePort1 + p_dev->io.NumPorts1 - 1);
888 879
889 /* 880 /*
@@ -940,7 +931,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
940 priv->fw_ready = 1; 931 priv->fw_ready = 1;
941 932
942 /* Now actually get the IRQ */ 933 /* Now actually get the IRQ */
943 ret = request_irq(p_dev->irq.AssignedIRQ, if_cs_interrupt, 934 ret = request_irq(p_dev->irq, if_cs_interrupt,
944 IRQF_SHARED, DRV_NAME, card); 935 IRQF_SHARED, DRV_NAME, card);
945 if (ret) { 936 if (ret) {
946 lbs_pr_err("error in request_irq\n"); 937 lbs_pr_err("error in request_irq\n");
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7d1a3c6b6ce0..64dd345d30f5 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -35,6 +35,8 @@
35#include <linux/mmc/card.h> 35#include <linux/mmc/card.h>
36#include <linux/mmc/sdio_func.h> 36#include <linux/mmc/sdio_func.h>
37#include <linux/mmc/sdio_ids.h> 37#include <linux/mmc/sdio_ids.h>
38#include <linux/mmc/sdio.h>
39#include <linux/mmc/host.h>
38 40
39#include "host.h" 41#include "host.h"
40#include "decl.h" 42#include "decl.h"
@@ -313,12 +315,30 @@ out:
313 return ret; 315 return ret;
314} 316}
315 317
318static int if_sdio_wait_status(struct if_sdio_card *card, const u8 condition)
319{
320 u8 status;
321 unsigned long timeout;
322 int ret = 0;
323
324 timeout = jiffies + HZ;
325 while (1) {
326 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret);
327 if (ret)
328 return ret;
329 if ((status & condition) == condition)
330 break;
331 if (time_after(jiffies, timeout))
332 return -ETIMEDOUT;
333 mdelay(1);
334 }
335 return ret;
336}
337
316static int if_sdio_card_to_host(struct if_sdio_card *card) 338static int if_sdio_card_to_host(struct if_sdio_card *card)
317{ 339{
318 int ret; 340 int ret;
319 u8 status;
320 u16 size, type, chunk; 341 u16 size, type, chunk;
321 unsigned long timeout;
322 342
323 lbs_deb_enter(LBS_DEB_SDIO); 343 lbs_deb_enter(LBS_DEB_SDIO);
324 344
@@ -333,19 +353,9 @@ static int if_sdio_card_to_host(struct if_sdio_card *card)
333 goto out; 353 goto out;
334 } 354 }
335 355
336 timeout = jiffies + HZ; 356 ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
337 while (1) { 357 if (ret)
338 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 358 goto out;
339 if (ret)
340 goto out;
341 if (status & IF_SDIO_IO_RDY)
342 break;
343 if (time_after(jiffies, timeout)) {
344 ret = -ETIMEDOUT;
345 goto out;
346 }
347 mdelay(1);
348 }
349 359
350 /* 360 /*
351 * The transfer must be in one transaction or the firmware 361 * The transfer must be in one transaction or the firmware
@@ -412,8 +422,6 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
412{ 422{
413 struct if_sdio_card *card; 423 struct if_sdio_card *card;
414 struct if_sdio_packet *packet; 424 struct if_sdio_packet *packet;
415 unsigned long timeout;
416 u8 status;
417 int ret; 425 int ret;
418 unsigned long flags; 426 unsigned long flags;
419 427
@@ -433,25 +441,15 @@ static void if_sdio_host_to_card_worker(struct work_struct *work)
433 441
434 sdio_claim_host(card->func); 442 sdio_claim_host(card->func);
435 443
436 timeout = jiffies + HZ; 444 ret = if_sdio_wait_status(card, IF_SDIO_IO_RDY);
437 while (1) { 445 if (ret == 0) {
438 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 446 ret = sdio_writesb(card->func, card->ioport,
439 if (ret) 447 packet->buffer, packet->nb);
440 goto release;
441 if (status & IF_SDIO_IO_RDY)
442 break;
443 if (time_after(jiffies, timeout)) {
444 ret = -ETIMEDOUT;
445 goto release;
446 }
447 mdelay(1);
448 } 448 }
449 449
450 ret = sdio_writesb(card->func, card->ioport,
451 packet->buffer, packet->nb);
452 if (ret) 450 if (ret)
453 goto release; 451 lbs_pr_err("error %d sending packet to firmware\n", ret);
454release: 452
455 sdio_release_host(card->func); 453 sdio_release_host(card->func);
456 454
457 kfree(packet); 455 kfree(packet);
@@ -464,10 +462,11 @@ release:
464/* Firmware */ 462/* Firmware */
465/********************************************************************/ 463/********************************************************************/
466 464
465#define FW_DL_READY_STATUS (IF_SDIO_IO_RDY | IF_SDIO_DL_RDY)
466
467static int if_sdio_prog_helper(struct if_sdio_card *card) 467static int if_sdio_prog_helper(struct if_sdio_card *card)
468{ 468{
469 int ret; 469 int ret;
470 u8 status;
471 const struct firmware *fw; 470 const struct firmware *fw;
472 unsigned long timeout; 471 unsigned long timeout;
473 u8 *chunk_buffer; 472 u8 *chunk_buffer;
@@ -499,20 +498,14 @@ static int if_sdio_prog_helper(struct if_sdio_card *card)
499 size = fw->size; 498 size = fw->size;
500 499
501 while (size) { 500 while (size) {
502 timeout = jiffies + HZ; 501 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
503 while (1) { 502 if (ret)
504 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 503 goto release;
505 if (ret) 504
506 goto release; 505 /* On some platforms (like Davinci) the chip needs more time
507 if ((status & IF_SDIO_IO_RDY) && 506 * between helper blocks.
508 (status & IF_SDIO_DL_RDY)) 507 */
509 break; 508 mdelay(2);
510 if (time_after(jiffies, timeout)) {
511 ret = -ETIMEDOUT;
512 goto release;
513 }
514 mdelay(1);
515 }
516 509
517 chunk_size = min(size, (size_t)60); 510 chunk_size = min(size, (size_t)60);
518 511
@@ -582,7 +575,6 @@ out:
582static int if_sdio_prog_real(struct if_sdio_card *card) 575static int if_sdio_prog_real(struct if_sdio_card *card)
583{ 576{
584 int ret; 577 int ret;
585 u8 status;
586 const struct firmware *fw; 578 const struct firmware *fw;
587 unsigned long timeout; 579 unsigned long timeout;
588 u8 *chunk_buffer; 580 u8 *chunk_buffer;
@@ -614,20 +606,9 @@ static int if_sdio_prog_real(struct if_sdio_card *card)
614 size = fw->size; 606 size = fw->size;
615 607
616 while (size) { 608 while (size) {
617 timeout = jiffies + HZ; 609 ret = if_sdio_wait_status(card, FW_DL_READY_STATUS);
618 while (1) { 610 if (ret)
619 status = sdio_readb(card->func, IF_SDIO_STATUS, &ret); 611 goto release;
620 if (ret)
621 goto release;
622 if ((status & IF_SDIO_IO_RDY) &&
623 (status & IF_SDIO_DL_RDY))
624 break;
625 if (time_after(jiffies, timeout)) {
626 ret = -ETIMEDOUT;
627 goto release;
628 }
629 mdelay(1);
630 }
631 612
632 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret); 613 req_size = sdio_readb(card->func, IF_SDIO_RD_BASE, &ret);
633 if (ret) 614 if (ret)
@@ -943,6 +924,7 @@ static int if_sdio_probe(struct sdio_func *func,
943 int ret, i; 924 int ret, i;
944 unsigned int model; 925 unsigned int model;
945 struct if_sdio_packet *packet; 926 struct if_sdio_packet *packet;
927 struct mmc_host *host = func->card->host;
946 928
947 lbs_deb_enter(LBS_DEB_SDIO); 929 lbs_deb_enter(LBS_DEB_SDIO);
948 930
@@ -1023,6 +1005,25 @@ static int if_sdio_probe(struct sdio_func *func,
1023 if (ret) 1005 if (ret)
1024 goto disable; 1006 goto disable;
1025 1007
1008 /* For 1-bit transfers to the 8686 model, we need to enable the
1009 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
1010 * bit to allow access to non-vendor registers. */
1011 if ((card->model == IF_SDIO_MODEL_8686) &&
1012 (host->caps & MMC_CAP_SDIO_IRQ) &&
1013 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
1014 u8 reg;
1015
1016 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1017 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
1018 if (ret)
1019 goto release_int;
1020
1021 reg |= SDIO_BUS_ECSI;
1022 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
1023 if (ret)
1024 goto release_int;
1025 }
1026
1026 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret); 1027 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
1027 if (ret) 1028 if (ret)
1028 goto release_int; 1029 goto release_int;
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index fcea5741ba62..f41594c7ac16 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -133,8 +133,6 @@ static void if_usb_write_bulk_callback(struct urb *urb)
133 /* print the failure status number for debug */ 133 /* print the failure status number for debug */
134 lbs_pr_info("URB in failure status: %d\n", urb->status); 134 lbs_pr_info("URB in failure status: %d\n", urb->status);
135 } 135 }
136
137 return;
138} 136}
139 137
140/** 138/**
@@ -651,8 +649,6 @@ static void if_usb_receive_fwload(struct urb *urb)
651 if_usb_submit_rx_urb_fwload(cardp); 649 if_usb_submit_rx_urb_fwload(cardp);
652 650
653 kfree(syncfwheader); 651 kfree(syncfwheader);
654
655 return;
656} 652}
657 653
658#define MRVDRV_MIN_PKT_LEN 30 654#define MRVDRV_MIN_PKT_LEN 30
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 598080414b17..d9b8ee130c45 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -229,7 +229,7 @@ static void lbs_tx_timeout(struct net_device *dev)
229 229
230 lbs_pr_err("tx watch dog timeout\n"); 230 lbs_pr_err("tx watch dog timeout\n");
231 231
232 dev->trans_start = jiffies; 232 dev->trans_start = jiffies; /* prevent tx timeout */
233 233
234 if (priv->currenttxskb) 234 if (priv->currenttxskb)
235 lbs_send_tx_feedback(priv, 0); 235 lbs_send_tx_feedback(priv, 0);
@@ -319,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
319 struct net_device *dev, int nr_addrs) 319 struct net_device *dev, int nr_addrs)
320{ 320{
321 int i = nr_addrs; 321 int i = nr_addrs;
322 struct dev_mc_list *mc_list; 322 struct netdev_hw_addr *ha;
323 int cnt; 323 int cnt;
324 324
325 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) 325 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
@@ -327,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
327 327
328 netif_addr_lock_bh(dev); 328 netif_addr_lock_bh(dev);
329 cnt = netdev_mc_count(dev); 329 cnt = netdev_mc_count(dev);
330 netdev_for_each_mc_addr(mc_list, dev) { 330 netdev_for_each_mc_addr(ha, dev) {
331 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { 331 if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) {
332 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, 332 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
333 mc_list->dmi_addr); 333 ha->addr);
334 cnt--; 334 cnt--;
335 continue; 335 continue;
336 } 336 }
337 337
338 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) 338 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
339 break; 339 break;
340 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); 340 memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN);
341 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, 341 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
342 mc_list->dmi_addr); 342 ha->addr);
343 i++; 343 i++;
344 cnt--; 344 cnt--;
345 } 345 }
@@ -836,6 +836,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
836 priv->is_auto_deep_sleep_enabled = 0; 836 priv->is_auto_deep_sleep_enabled = 0;
837 priv->wakeup_dev_required = 0; 837 priv->wakeup_dev_required = 0;
838 init_waitqueue_head(&priv->ds_awake_q); 838 init_waitqueue_head(&priv->ds_awake_q);
839 priv->authtype_auto = 1;
839 840
840 mutex_init(&priv->lock); 841 mutex_init(&priv->lock);
841 842
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 784dae714705..a115bfa9513a 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -39,10 +39,10 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
40 40
41/** 41/**
42 * @brief This function computes the avgSNR . 42 * @brief This function computes the avgSNR .
43 * 43 *
44 * @param priv A pointer to struct lbs_private structure 44 * @param priv A pointer to struct lbs_private structure
45 * @return avgSNR 45 * @return avgSNR
46 */ 46 */
47static u8 lbs_getavgsnr(struct lbs_private *priv) 47static u8 lbs_getavgsnr(struct lbs_private *priv)
48{ 48{
@@ -57,10 +57,10 @@ static u8 lbs_getavgsnr(struct lbs_private *priv)
57} 57}
58 58
59/** 59/**
60 * @brief This function computes the AvgNF 60 * @brief This function computes the AvgNF
61 * 61 *
62 * @param priv A pointer to struct lbs_private structure 62 * @param priv A pointer to struct lbs_private structure
63 * @return AvgNF 63 * @return AvgNF
64 */ 64 */
65static u8 lbs_getavgnf(struct lbs_private *priv) 65static u8 lbs_getavgnf(struct lbs_private *priv)
66{ 66{
@@ -75,11 +75,11 @@ static u8 lbs_getavgnf(struct lbs_private *priv)
75} 75}
76 76
77/** 77/**
78 * @brief This function save the raw SNR/NF to our internel buffer 78 * @brief This function save the raw SNR/NF to our internel buffer
79 * 79 *
80 * @param priv A pointer to struct lbs_private structure 80 * @param priv A pointer to struct lbs_private structure
81 * @param prxpd A pointer to rxpd structure of received packet 81 * @param prxpd A pointer to rxpd structure of received packet
82 * @return n/a 82 * @return n/a
83 */ 83 */
84static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd) 84static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
85{ 85{
@@ -90,15 +90,14 @@ static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
90 priv->nextSNRNF++; 90 priv->nextSNRNF++;
91 if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR) 91 if (priv->nextSNRNF >= DEFAULT_DATA_AVG_FACTOR)
92 priv->nextSNRNF = 0; 92 priv->nextSNRNF = 0;
93 return;
94} 93}
95 94
96/** 95/**
97 * @brief This function computes the RSSI in received packet. 96 * @brief This function computes the RSSI in received packet.
98 * 97 *
99 * @param priv A pointer to struct lbs_private structure 98 * @param priv A pointer to struct lbs_private structure
100 * @param prxpd A pointer to rxpd structure of received packet 99 * @param prxpd A pointer to rxpd structure of received packet
101 * @return n/a 100 * @return n/a
102 */ 101 */
103static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd) 102static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
104{ 103{
@@ -135,9 +134,9 @@ static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
135 * @brief This function processes received packet and forwards it 134 * @brief This function processes received packet and forwards it
136 * to kernel/upper layer 135 * to kernel/upper layer
137 * 136 *
138 * @param priv A pointer to struct lbs_private 137 * @param priv A pointer to struct lbs_private
139 * @param skb A pointer to skb which includes the received packet 138 * @param skb A pointer to skb which includes the received packet
140 * @return 0 or -1 139 * @return 0 or -1
141 */ 140 */
142int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) 141int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
143{ 142{
@@ -197,7 +196,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
197 * before the snap_type. 196 * before the snap_type.
198 */ 197 */
199 p_ethhdr = (struct ethhdr *) 198 p_ethhdr = (struct ethhdr *)
200 ((u8 *) & p_rx_pkt->eth803_hdr 199 ((u8 *) &p_rx_pkt->eth803_hdr
201 + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr) 200 + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
202 - sizeof(p_rx_pkt->eth803_hdr.dest_addr) 201 - sizeof(p_rx_pkt->eth803_hdr.dest_addr)
203 - sizeof(p_rx_pkt->eth803_hdr.src_addr) 202 - sizeof(p_rx_pkt->eth803_hdr.src_addr)
@@ -214,7 +213,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
214 hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd; 213 hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
215 } else { 214 } else {
216 lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", 215 lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
217 (u8 *) & p_rx_pkt->rfc1042_hdr, 216 (u8 *) &p_rx_pkt->rfc1042_hdr,
218 sizeof(p_rx_pkt->rfc1042_hdr)); 217 sizeof(p_rx_pkt->rfc1042_hdr));
219 218
220 /* Chop off the rxpd */ 219 /* Chop off the rxpd */
@@ -255,8 +254,8 @@ EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
255 * @brief This function converts Tx/Rx rates from the Marvell WLAN format 254 * @brief This function converts Tx/Rx rates from the Marvell WLAN format
256 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s) 255 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
257 * 256 *
258 * @param rate Input rate 257 * @param rate Input rate
259 * @return Output Rate (0 if invalid) 258 * @return Output Rate (0 if invalid)
260 */ 259 */
261static u8 convert_mv_rate_to_radiotap(u8 rate) 260static u8 convert_mv_rate_to_radiotap(u8 rate)
262{ 261{
@@ -295,9 +294,9 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
295 * @brief This function processes a received 802.11 packet and forwards it 294 * @brief This function processes a received 802.11 packet and forwards it
296 * to kernel/upper layer 295 * to kernel/upper layer
297 * 296 *
298 * @param priv A pointer to struct lbs_private 297 * @param priv A pointer to struct lbs_private
299 * @param skb A pointer to skb which includes the received packet 298 * @param skb A pointer to skb which includes the received packet
300 * @return 0 or -1 299 * @return 0 or -1
301 */ 300 */
302static int process_rxed_802_11_packet(struct lbs_private *priv, 301static int process_rxed_802_11_packet(struct lbs_private *priv,
303 struct sk_buff *skb) 302 struct sk_buff *skb)
@@ -314,7 +313,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
314 p_rx_pkt = (struct rx80211packethdr *) skb->data; 313 p_rx_pkt = (struct rx80211packethdr *) skb->data;
315 prxpd = &p_rx_pkt->rx_pd; 314 prxpd = &p_rx_pkt->rx_pd;
316 315
317 // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); 316 /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
318 317
319 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { 318 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
320 lbs_deb_rx("rx err: frame received with bad length\n"); 319 lbs_deb_rx("rx err: frame received with bad length\n");
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 52d244ea3d97..a9bf658659eb 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -147,8 +147,6 @@ netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
147 dev->stats.tx_packets++; 147 dev->stats.tx_packets++;
148 dev->stats.tx_bytes += skb->len; 148 dev->stats.tx_bytes += skb->len;
149 149
150 dev->trans_start = jiffies;
151
152 if (priv->monitormode) { 150 if (priv->monitormode) {
153 /* Keep the skb to echo it back once Tx feedback is 151 /* Keep the skb to echo it back once Tx feedback is
154 received from FW */ 152 received from FW */
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 9b555884b08a..f96a96031a50 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1441,8 +1441,10 @@ static int lbs_set_encode(struct net_device *dev,
1441 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); 1441 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
1442 1442
1443 if (dwrq->flags & IW_ENCODE_RESTRICTED) { 1443 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1444 priv->authtype_auto = 0;
1444 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; 1445 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1445 } else if (dwrq->flags & IW_ENCODE_OPEN) { 1446 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1447 priv->authtype_auto = 0;
1446 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1448 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1447 } 1449 }
1448 1450
@@ -1621,8 +1623,10 @@ static int lbs_set_encodeext(struct net_device *dev,
1621 goto out; 1623 goto out;
1622 1624
1623 if (dwrq->flags & IW_ENCODE_RESTRICTED) { 1625 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1626 priv->authtype_auto = 0;
1624 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; 1627 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1625 } else if (dwrq->flags & IW_ENCODE_OPEN) { 1628 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1629 priv->authtype_auto = 0;
1626 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1630 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1627 } 1631 }
1628 1632
diff --git a/drivers/net/wireless/libertas_tf/cmd.c b/drivers/net/wireless/libertas_tf/cmd.c
index b620daf59ef7..8945afd6ce3e 100644
--- a/drivers/net/wireless/libertas_tf/cmd.c
+++ b/drivers/net/wireless/libertas_tf/cmd.c
@@ -7,6 +7,8 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
10#include <linux/slab.h> 12#include <linux/slab.h>
11 13
12#include "libertas_tf.h" 14#include "libertas_tf.h"
@@ -82,6 +84,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
82 int ret = -1; 84 int ret = -1;
83 u32 i; 85 u32 i;
84 86
87 lbtf_deb_enter(LBTF_DEB_CMD);
88
85 memset(&cmd, 0, sizeof(cmd)); 89 memset(&cmd, 0, sizeof(cmd));
86 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 90 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
87 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN); 91 memcpy(cmd.permanentaddr, priv->current_addr, ETH_ALEN);
@@ -104,6 +108,8 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
104 priv->fwrelease >> 8 & 0xff, 108 priv->fwrelease >> 8 & 0xff,
105 priv->fwrelease & 0xff, 109 priv->fwrelease & 0xff,
106 priv->fwcapinfo); 110 priv->fwcapinfo);
111 lbtf_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
112 cmd.hwifversion, cmd.version);
107 113
108 /* Clamp region code to 8-bit since FW spec indicates that it should 114 /* Clamp region code to 8-bit since FW spec indicates that it should
109 * only ever be 8-bit, even though the field size is 16-bit. Some 115 * only ever be 8-bit, even though the field size is 16-bit. Some
@@ -118,8 +124,10 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
118 } 124 }
119 125
120 /* if it's unidentified region code, use the default (USA) */ 126 /* if it's unidentified region code, use the default (USA) */
121 if (i >= MRVDRV_MAX_REGION_CODE) 127 if (i >= MRVDRV_MAX_REGION_CODE) {
122 priv->regioncode = 0x10; 128 priv->regioncode = 0x10;
129 pr_info("unidentified region code; using the default (USA)\n");
130 }
123 131
124 if (priv->current_addr[0] == 0xff) 132 if (priv->current_addr[0] == 0xff)
125 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN); 133 memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
@@ -128,6 +136,7 @@ int lbtf_update_hw_spec(struct lbtf_private *priv)
128 136
129 lbtf_geo_init(priv); 137 lbtf_geo_init(priv);
130out: 138out:
139 lbtf_deb_leave(LBTF_DEB_CMD);
131 return ret; 140 return ret;
132} 141}
133 142
@@ -141,13 +150,18 @@ out:
141 */ 150 */
142int lbtf_set_channel(struct lbtf_private *priv, u8 channel) 151int lbtf_set_channel(struct lbtf_private *priv, u8 channel)
143{ 152{
153 int ret = 0;
144 struct cmd_ds_802_11_rf_channel cmd; 154 struct cmd_ds_802_11_rf_channel cmd;
145 155
156 lbtf_deb_enter(LBTF_DEB_CMD);
157
146 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 158 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
147 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET); 159 cmd.action = cpu_to_le16(CMD_OPT_802_11_RF_CHANNEL_SET);
148 cmd.channel = cpu_to_le16(channel); 160 cmd.channel = cpu_to_le16(channel);
149 161
150 return lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd); 162 ret = lbtf_cmd_with_response(priv, CMD_802_11_RF_CHANNEL, &cmd);
163 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
164 return ret;
151} 165}
152 166
153int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon) 167int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
@@ -155,20 +169,28 @@ int lbtf_beacon_set(struct lbtf_private *priv, struct sk_buff *beacon)
155 struct cmd_ds_802_11_beacon_set cmd; 169 struct cmd_ds_802_11_beacon_set cmd;
156 int size; 170 int size;
157 171
158 if (beacon->len > MRVL_MAX_BCN_SIZE) 172 lbtf_deb_enter(LBTF_DEB_CMD);
173
174 if (beacon->len > MRVL_MAX_BCN_SIZE) {
175 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", -1);
159 return -1; 176 return -1;
177 }
160 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len; 178 size = sizeof(cmd) - sizeof(cmd.beacon) + beacon->len;
161 cmd.hdr.size = cpu_to_le16(size); 179 cmd.hdr.size = cpu_to_le16(size);
162 cmd.len = cpu_to_le16(beacon->len); 180 cmd.len = cpu_to_le16(beacon->len);
163 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len); 181 memcpy(cmd.beacon, (u8 *) beacon->data, beacon->len);
164 182
165 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size); 183 lbtf_cmd_async(priv, CMD_802_11_BEACON_SET, &cmd.hdr, size);
184
185 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", 0);
166 return 0; 186 return 0;
167} 187}
168 188
169int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable, 189int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
170 int beacon_int) { 190 int beacon_int)
191{
171 struct cmd_ds_802_11_beacon_control cmd; 192 struct cmd_ds_802_11_beacon_control cmd;
193 lbtf_deb_enter(LBTF_DEB_CMD);
172 194
173 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 195 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
174 cmd.action = cpu_to_le16(CMD_ACT_SET); 196 cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -176,6 +198,8 @@ int lbtf_beacon_ctrl(struct lbtf_private *priv, bool beacon_enable,
176 cmd.beacon_period = cpu_to_le16(beacon_int); 198 cmd.beacon_period = cpu_to_le16(beacon_int);
177 199
178 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd)); 200 lbtf_cmd_async(priv, CMD_802_11_BEACON_CTRL, &cmd.hdr, sizeof(cmd));
201
202 lbtf_deb_leave(LBTF_DEB_CMD);
179 return 0; 203 return 0;
180} 204}
181 205
@@ -183,17 +207,28 @@ static void lbtf_queue_cmd(struct lbtf_private *priv,
183 struct cmd_ctrl_node *cmdnode) 207 struct cmd_ctrl_node *cmdnode)
184{ 208{
185 unsigned long flags; 209 unsigned long flags;
210 lbtf_deb_enter(LBTF_DEB_HOST);
186 211
187 if (!cmdnode) 212 if (!cmdnode) {
188 return; 213 lbtf_deb_host("QUEUE_CMD: cmdnode is NULL\n");
214 goto qcmd_done;
215 }
189 216
190 if (!cmdnode->cmdbuf->size) 217 if (!cmdnode->cmdbuf->size) {
191 return; 218 lbtf_deb_host("DNLD_CMD: cmd size is zero\n");
219 goto qcmd_done;
220 }
192 221
193 cmdnode->result = 0; 222 cmdnode->result = 0;
194 spin_lock_irqsave(&priv->driver_lock, flags); 223 spin_lock_irqsave(&priv->driver_lock, flags);
195 list_add_tail(&cmdnode->list, &priv->cmdpendingq); 224 list_add_tail(&cmdnode->list, &priv->cmdpendingq);
196 spin_unlock_irqrestore(&priv->driver_lock, flags); 225 spin_unlock_irqrestore(&priv->driver_lock, flags);
226
227 lbtf_deb_host("QUEUE_CMD: inserted command 0x%04x into cmdpendingq\n",
228 le16_to_cpu(cmdnode->cmdbuf->command));
229
230qcmd_done:
231 lbtf_deb_leave(LBTF_DEB_HOST);
197} 232}
198 233
199static void lbtf_submit_command(struct lbtf_private *priv, 234static void lbtf_submit_command(struct lbtf_private *priv,
@@ -206,22 +241,33 @@ static void lbtf_submit_command(struct lbtf_private *priv,
206 int timeo = 5 * HZ; 241 int timeo = 5 * HZ;
207 int ret; 242 int ret;
208 243
244 lbtf_deb_enter(LBTF_DEB_HOST);
245
209 cmd = cmdnode->cmdbuf; 246 cmd = cmdnode->cmdbuf;
210 247
211 spin_lock_irqsave(&priv->driver_lock, flags); 248 spin_lock_irqsave(&priv->driver_lock, flags);
212 priv->cur_cmd = cmdnode; 249 priv->cur_cmd = cmdnode;
213 cmdsize = le16_to_cpu(cmd->size); 250 cmdsize = le16_to_cpu(cmd->size);
214 command = le16_to_cpu(cmd->command); 251 command = le16_to_cpu(cmd->command);
252
253 lbtf_deb_cmd("DNLD_CMD: command 0x%04x, seq %d, size %d\n",
254 command, le16_to_cpu(cmd->seqnum), cmdsize);
255 lbtf_deb_hex(LBTF_DEB_CMD, "DNLD_CMD", (void *) cmdnode->cmdbuf, cmdsize);
256
215 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize); 257 ret = priv->hw_host_to_card(priv, MVMS_CMD, (u8 *) cmd, cmdsize);
216 spin_unlock_irqrestore(&priv->driver_lock, flags); 258 spin_unlock_irqrestore(&priv->driver_lock, flags);
217 259
218 if (ret) 260 if (ret) {
261 pr_info("DNLD_CMD: hw_host_to_card failed: %d\n", ret);
219 /* Let the timer kick in and retry, and potentially reset 262 /* Let the timer kick in and retry, and potentially reset
220 the whole thing if the condition persists */ 263 the whole thing if the condition persists */
221 timeo = HZ; 264 timeo = HZ;
265 }
222 266
223 /* Setup the timer after transmit command */ 267 /* Setup the timer after transmit command */
224 mod_timer(&priv->command_timer, jiffies + timeo); 268 mod_timer(&priv->command_timer, jiffies + timeo);
269
270 lbtf_deb_leave(LBTF_DEB_HOST);
225} 271}
226 272
227/** 273/**
@@ -231,8 +277,10 @@ static void lbtf_submit_command(struct lbtf_private *priv,
231static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, 277static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
232 struct cmd_ctrl_node *cmdnode) 278 struct cmd_ctrl_node *cmdnode)
233{ 279{
280 lbtf_deb_enter(LBTF_DEB_HOST);
281
234 if (!cmdnode) 282 if (!cmdnode)
235 return; 283 goto cl_ins_out;
236 284
237 cmdnode->callback = NULL; 285 cmdnode->callback = NULL;
238 cmdnode->callback_arg = 0; 286 cmdnode->callback_arg = 0;
@@ -240,6 +288,9 @@ static void __lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
240 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE); 288 memset(cmdnode->cmdbuf, 0, LBS_CMD_BUFFER_SIZE);
241 289
242 list_add_tail(&cmdnode->list, &priv->cmdfreeq); 290 list_add_tail(&cmdnode->list, &priv->cmdfreeq);
291
292cl_ins_out:
293 lbtf_deb_leave(LBTF_DEB_HOST);
243} 294}
244 295
245static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv, 296static void lbtf_cleanup_and_insert_cmd(struct lbtf_private *priv,
@@ -268,29 +319,41 @@ int lbtf_cmd_set_mac_multicast_addr(struct lbtf_private *priv)
268{ 319{
269 struct cmd_ds_mac_multicast_addr cmd; 320 struct cmd_ds_mac_multicast_addr cmd;
270 321
322 lbtf_deb_enter(LBTF_DEB_CMD);
323
271 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 324 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
272 cmd.action = cpu_to_le16(CMD_ACT_SET); 325 cmd.action = cpu_to_le16(CMD_ACT_SET);
273 326
274 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr); 327 cmd.nr_of_adrs = cpu_to_le16((u16) priv->nr_of_multicastmacaddr);
328
329 lbtf_deb_cmd("MULTICAST_ADR: setting %d addresses\n", cmd.nr_of_adrs);
330
275 memcpy(cmd.maclist, priv->multicastlist, 331 memcpy(cmd.maclist, priv->multicastlist,
276 priv->nr_of_multicastmacaddr * ETH_ALEN); 332 priv->nr_of_multicastmacaddr * ETH_ALEN);
277 333
278 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd)); 334 lbtf_cmd_async(priv, CMD_MAC_MULTICAST_ADR, &cmd.hdr, sizeof(cmd));
335
336 lbtf_deb_leave(LBTF_DEB_CMD);
279 return 0; 337 return 0;
280} 338}
281 339
282void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode) 340void lbtf_set_mode(struct lbtf_private *priv, enum lbtf_mode mode)
283{ 341{
284 struct cmd_ds_set_mode cmd; 342 struct cmd_ds_set_mode cmd;
343 lbtf_deb_enter(LBTF_DEB_WEXT);
285 344
286 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 345 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
287 cmd.mode = cpu_to_le16(mode); 346 cmd.mode = cpu_to_le16(mode);
347 lbtf_deb_wext("Switching to mode: 0x%x\n", mode);
288 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd)); 348 lbtf_cmd_async(priv, CMD_802_11_SET_MODE, &cmd.hdr, sizeof(cmd));
349
350 lbtf_deb_leave(LBTF_DEB_WEXT);
289} 351}
290 352
291void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid) 353void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
292{ 354{
293 struct cmd_ds_set_bssid cmd; 355 struct cmd_ds_set_bssid cmd;
356 lbtf_deb_enter(LBTF_DEB_CMD);
294 357
295 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 358 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
296 cmd.activate = activate ? 1 : 0; 359 cmd.activate = activate ? 1 : 0;
@@ -298,11 +361,13 @@ void lbtf_set_bssid(struct lbtf_private *priv, bool activate, const u8 *bssid)
298 memcpy(cmd.bssid, bssid, ETH_ALEN); 361 memcpy(cmd.bssid, bssid, ETH_ALEN);
299 362
300 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd)); 363 lbtf_cmd_async(priv, CMD_802_11_SET_BSSID, &cmd.hdr, sizeof(cmd));
364 lbtf_deb_leave(LBTF_DEB_CMD);
301} 365}
302 366
303int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr) 367int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
304{ 368{
305 struct cmd_ds_802_11_mac_address cmd; 369 struct cmd_ds_802_11_mac_address cmd;
370 lbtf_deb_enter(LBTF_DEB_CMD);
306 371
307 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 372 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
308 cmd.action = cpu_to_le16(CMD_ACT_SET); 373 cmd.action = cpu_to_le16(CMD_ACT_SET);
@@ -310,6 +375,7 @@ int lbtf_set_mac_address(struct lbtf_private *priv, uint8_t *mac_addr)
310 memcpy(cmd.macadd, mac_addr, ETH_ALEN); 375 memcpy(cmd.macadd, mac_addr, ETH_ALEN);
311 376
312 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd)); 377 lbtf_cmd_async(priv, CMD_802_11_MAC_ADDRESS, &cmd.hdr, sizeof(cmd));
378 lbtf_deb_leave(LBTF_DEB_CMD);
313 return 0; 379 return 0;
314} 380}
315 381
@@ -318,6 +384,8 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
318 int ret = 0; 384 int ret = 0;
319 struct cmd_ds_802_11_radio_control cmd; 385 struct cmd_ds_802_11_radio_control cmd;
320 386
387 lbtf_deb_enter(LBTF_DEB_CMD);
388
321 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 389 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
322 cmd.action = cpu_to_le16(CMD_ACT_SET); 390 cmd.action = cpu_to_le16(CMD_ACT_SET);
323 391
@@ -341,19 +409,28 @@ int lbtf_set_radio_control(struct lbtf_private *priv)
341 else 409 else
342 cmd.control &= cpu_to_le16(~TURN_ON_RF); 410 cmd.control &= cpu_to_le16(~TURN_ON_RF);
343 411
412 lbtf_deb_cmd("RADIO_SET: radio %d, preamble %d\n", priv->radioon,
413 priv->preamble);
414
344 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd); 415 ret = lbtf_cmd_with_response(priv, CMD_802_11_RADIO_CONTROL, &cmd);
416
417 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
345 return ret; 418 return ret;
346} 419}
347 420
348void lbtf_set_mac_control(struct lbtf_private *priv) 421void lbtf_set_mac_control(struct lbtf_private *priv)
349{ 422{
350 struct cmd_ds_mac_control cmd; 423 struct cmd_ds_mac_control cmd;
424 lbtf_deb_enter(LBTF_DEB_CMD);
425
351 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 426 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
352 cmd.action = cpu_to_le16(priv->mac_control); 427 cmd.action = cpu_to_le16(priv->mac_control);
353 cmd.reserved = 0; 428 cmd.reserved = 0;
354 429
355 lbtf_cmd_async(priv, CMD_MAC_CONTROL, 430 lbtf_cmd_async(priv, CMD_MAC_CONTROL,
356 &cmd.hdr, sizeof(cmd)); 431 &cmd.hdr, sizeof(cmd));
432
433 lbtf_deb_leave(LBTF_DEB_CMD);
357} 434}
358 435
359/** 436/**
@@ -365,29 +442,43 @@ void lbtf_set_mac_control(struct lbtf_private *priv)
365 */ 442 */
366int lbtf_allocate_cmd_buffer(struct lbtf_private *priv) 443int lbtf_allocate_cmd_buffer(struct lbtf_private *priv)
367{ 444{
445 int ret = 0;
368 u32 bufsize; 446 u32 bufsize;
369 u32 i; 447 u32 i;
370 struct cmd_ctrl_node *cmdarray; 448 struct cmd_ctrl_node *cmdarray;
371 449
450 lbtf_deb_enter(LBTF_DEB_HOST);
451
372 /* Allocate and initialize the command array */ 452 /* Allocate and initialize the command array */
373 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS; 453 bufsize = sizeof(struct cmd_ctrl_node) * LBS_NUM_CMD_BUFFERS;
374 cmdarray = kzalloc(bufsize, GFP_KERNEL); 454 cmdarray = kzalloc(bufsize, GFP_KERNEL);
375 if (!cmdarray) 455 if (!cmdarray) {
376 return -1; 456 lbtf_deb_host("ALLOC_CMD_BUF: tempcmd_array is NULL\n");
457 ret = -1;
458 goto done;
459 }
377 priv->cmd_array = cmdarray; 460 priv->cmd_array = cmdarray;
378 461
379 /* Allocate and initialize each command buffer in the command array */ 462 /* Allocate and initialize each command buffer in the command array */
380 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { 463 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
381 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL); 464 cmdarray[i].cmdbuf = kzalloc(LBS_CMD_BUFFER_SIZE, GFP_KERNEL);
382 if (!cmdarray[i].cmdbuf) 465 if (!cmdarray[i].cmdbuf) {
383 return -1; 466 lbtf_deb_host("ALLOC_CMD_BUF: ptempvirtualaddr is NULL\n");
467 ret = -1;
468 goto done;
469 }
384 } 470 }
385 471
386 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) { 472 for (i = 0; i < LBS_NUM_CMD_BUFFERS; i++) {
387 init_waitqueue_head(&cmdarray[i].cmdwait_q); 473 init_waitqueue_head(&cmdarray[i].cmdwait_q);
388 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]); 474 lbtf_cleanup_and_insert_cmd(priv, &cmdarray[i]);
389 } 475 }
390 return 0; 476
477 ret = 0;
478
479done:
480 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
481 return ret;
391} 482}
392 483
393/** 484/**
@@ -402,9 +493,13 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
402 struct cmd_ctrl_node *cmdarray; 493 struct cmd_ctrl_node *cmdarray;
403 unsigned int i; 494 unsigned int i;
404 495
496 lbtf_deb_enter(LBTF_DEB_HOST);
497
405 /* need to check if cmd array is allocated or not */ 498 /* need to check if cmd array is allocated or not */
406 if (priv->cmd_array == NULL) 499 if (priv->cmd_array == NULL) {
407 return 0; 500 lbtf_deb_host("FREE_CMD_BUF: cmd_array is NULL\n");
501 goto done;
502 }
408 503
409 cmdarray = priv->cmd_array; 504 cmdarray = priv->cmd_array;
410 505
@@ -418,6 +513,8 @@ int lbtf_free_cmd_buffer(struct lbtf_private *priv)
418 kfree(priv->cmd_array); 513 kfree(priv->cmd_array);
419 priv->cmd_array = NULL; 514 priv->cmd_array = NULL;
420 515
516done:
517 lbtf_deb_leave(LBTF_DEB_HOST);
421 return 0; 518 return 0;
422} 519}
423 520
@@ -433,6 +530,8 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
433 struct cmd_ctrl_node *tempnode; 530 struct cmd_ctrl_node *tempnode;
434 unsigned long flags; 531 unsigned long flags;
435 532
533 lbtf_deb_enter(LBTF_DEB_HOST);
534
436 if (!priv) 535 if (!priv)
437 return NULL; 536 return NULL;
438 537
@@ -442,11 +541,14 @@ static struct cmd_ctrl_node *lbtf_get_cmd_ctrl_node(struct lbtf_private *priv)
442 tempnode = list_first_entry(&priv->cmdfreeq, 541 tempnode = list_first_entry(&priv->cmdfreeq,
443 struct cmd_ctrl_node, list); 542 struct cmd_ctrl_node, list);
444 list_del(&tempnode->list); 543 list_del(&tempnode->list);
445 } else 544 } else {
545 lbtf_deb_host("GET_CMD_NODE: cmd_ctrl_node is not available\n");
446 tempnode = NULL; 546 tempnode = NULL;
547 }
447 548
448 spin_unlock_irqrestore(&priv->driver_lock, flags); 549 spin_unlock_irqrestore(&priv->driver_lock, flags);
449 550
551 lbtf_deb_leave(LBTF_DEB_HOST);
450 return tempnode; 552 return tempnode;
451} 553}
452 554
@@ -462,16 +564,20 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
462 struct cmd_ctrl_node *cmdnode = NULL; 564 struct cmd_ctrl_node *cmdnode = NULL;
463 struct cmd_header *cmd; 565 struct cmd_header *cmd;
464 unsigned long flags; 566 unsigned long flags;
567 int ret = 0;
465 568
466 /* Debug group is LBS_DEB_THREAD and not LBS_DEB_HOST, because the 569 /* Debug group is lbtf_deb_THREAD and not lbtf_deb_HOST, because the
467 * only caller to us is lbtf_thread() and we get even when a 570 * only caller to us is lbtf_thread() and we get even when a
468 * data packet is received */ 571 * data packet is received */
572 lbtf_deb_enter(LBTF_DEB_THREAD);
469 573
470 spin_lock_irqsave(&priv->driver_lock, flags); 574 spin_lock_irqsave(&priv->driver_lock, flags);
471 575
472 if (priv->cur_cmd) { 576 if (priv->cur_cmd) {
577 pr_alert("EXEC_NEXT_CMD: already processing command!\n");
473 spin_unlock_irqrestore(&priv->driver_lock, flags); 578 spin_unlock_irqrestore(&priv->driver_lock, flags);
474 return -1; 579 ret = -1;
580 goto done;
475 } 581 }
476 582
477 if (!list_empty(&priv->cmdpendingq)) { 583 if (!list_empty(&priv->cmdpendingq)) {
@@ -483,11 +589,17 @@ int lbtf_execute_next_command(struct lbtf_private *priv)
483 cmd = cmdnode->cmdbuf; 589 cmd = cmdnode->cmdbuf;
484 590
485 list_del(&cmdnode->list); 591 list_del(&cmdnode->list);
592 lbtf_deb_host("EXEC_NEXT_CMD: sending command 0x%04x\n",
593 le16_to_cpu(cmd->command));
486 spin_unlock_irqrestore(&priv->driver_lock, flags); 594 spin_unlock_irqrestore(&priv->driver_lock, flags);
487 lbtf_submit_command(priv, cmdnode); 595 lbtf_submit_command(priv, cmdnode);
488 } else 596 } else
489 spin_unlock_irqrestore(&priv->driver_lock, flags); 597 spin_unlock_irqrestore(&priv->driver_lock, flags);
490 return 0; 598
599 ret = 0;
600done:
601 lbtf_deb_leave(LBTF_DEB_THREAD);
602 return ret;
491} 603}
492 604
493static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv, 605static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
@@ -498,14 +610,22 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
498{ 610{
499 struct cmd_ctrl_node *cmdnode; 611 struct cmd_ctrl_node *cmdnode;
500 612
501 if (priv->surpriseremoved) 613 lbtf_deb_enter(LBTF_DEB_HOST);
502 return ERR_PTR(-ENOENT); 614
615 if (priv->surpriseremoved) {
616 lbtf_deb_host("PREP_CMD: card removed\n");
617 cmdnode = ERR_PTR(-ENOENT);
618 goto done;
619 }
503 620
504 cmdnode = lbtf_get_cmd_ctrl_node(priv); 621 cmdnode = lbtf_get_cmd_ctrl_node(priv);
505 if (cmdnode == NULL) { 622 if (cmdnode == NULL) {
623 lbtf_deb_host("PREP_CMD: cmdnode is NULL\n");
624
506 /* Wake up main thread to execute next command */ 625 /* Wake up main thread to execute next command */
507 queue_work(lbtf_wq, &priv->cmd_work); 626 queue_work(lbtf_wq, &priv->cmd_work);
508 return ERR_PTR(-ENOBUFS); 627 cmdnode = ERR_PTR(-ENOBUFS);
628 goto done;
509 } 629 }
510 630
511 cmdnode->callback = callback; 631 cmdnode->callback = callback;
@@ -520,17 +640,24 @@ static struct cmd_ctrl_node *__lbtf_cmd_async(struct lbtf_private *priv,
520 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size); 640 cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
521 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum); 641 cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
522 cmdnode->cmdbuf->result = 0; 642 cmdnode->cmdbuf->result = 0;
643
644 lbtf_deb_host("PREP_CMD: command 0x%04x\n", command);
645
523 cmdnode->cmdwaitqwoken = 0; 646 cmdnode->cmdwaitqwoken = 0;
524 lbtf_queue_cmd(priv, cmdnode); 647 lbtf_queue_cmd(priv, cmdnode);
525 queue_work(lbtf_wq, &priv->cmd_work); 648 queue_work(lbtf_wq, &priv->cmd_work);
526 649
650 done:
651 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %p", cmdnode);
527 return cmdnode; 652 return cmdnode;
528} 653}
529 654
530void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command, 655void lbtf_cmd_async(struct lbtf_private *priv, uint16_t command,
531 struct cmd_header *in_cmd, int in_cmd_size) 656 struct cmd_header *in_cmd, int in_cmd_size)
532{ 657{
658 lbtf_deb_enter(LBTF_DEB_CMD);
533 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0); 659 __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, NULL, 0);
660 lbtf_deb_leave(LBTF_DEB_CMD);
534} 661}
535 662
536int __lbtf_cmd(struct lbtf_private *priv, uint16_t command, 663int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
@@ -543,30 +670,35 @@ int __lbtf_cmd(struct lbtf_private *priv, uint16_t command,
543 unsigned long flags; 670 unsigned long flags;
544 int ret = 0; 671 int ret = 0;
545 672
673 lbtf_deb_enter(LBTF_DEB_HOST);
674
546 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size, 675 cmdnode = __lbtf_cmd_async(priv, command, in_cmd, in_cmd_size,
547 callback, callback_arg); 676 callback, callback_arg);
548 if (IS_ERR(cmdnode)) 677 if (IS_ERR(cmdnode)) {
549 return PTR_ERR(cmdnode); 678 ret = PTR_ERR(cmdnode);
679 goto done;
680 }
550 681
551 might_sleep(); 682 might_sleep();
552 ret = wait_event_interruptible(cmdnode->cmdwait_q, 683 ret = wait_event_interruptible(cmdnode->cmdwait_q,
553 cmdnode->cmdwaitqwoken); 684 cmdnode->cmdwaitqwoken);
554 if (ret) { 685 if (ret) {
555 printk(KERN_DEBUG 686 pr_info("PREP_CMD: command 0x%04x interrupted by signal: %d\n",
556 "libertastf: command 0x%04x interrupted by signal", 687 command, ret);
557 command); 688 goto done;
558 return ret;
559 } 689 }
560 690
561 spin_lock_irqsave(&priv->driver_lock, flags); 691 spin_lock_irqsave(&priv->driver_lock, flags);
562 ret = cmdnode->result; 692 ret = cmdnode->result;
563 if (ret) 693 if (ret)
564 printk(KERN_DEBUG "libertastf: command 0x%04x failed: %d\n", 694 pr_info("PREP_CMD: command 0x%04x failed: %d\n",
565 command, ret); 695 command, ret);
566 696
567 __lbtf_cleanup_and_insert_cmd(priv, cmdnode); 697 __lbtf_cleanup_and_insert_cmd(priv, cmdnode);
568 spin_unlock_irqrestore(&priv->driver_lock, flags); 698 spin_unlock_irqrestore(&priv->driver_lock, flags);
569 699
700done:
701 lbtf_deb_leave_args(LBTF_DEB_HOST, "ret %d", ret);
570 return ret; 702 return ret;
571} 703}
572EXPORT_SYMBOL_GPL(__lbtf_cmd); 704EXPORT_SYMBOL_GPL(__lbtf_cmd);
@@ -587,6 +719,8 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
587 unsigned long flags; 719 unsigned long flags;
588 uint16_t result; 720 uint16_t result;
589 721
722 lbtf_deb_enter(LBTF_DEB_CMD);
723
590 mutex_lock(&priv->lock); 724 mutex_lock(&priv->lock);
591 spin_lock_irqsave(&priv->driver_lock, flags); 725 spin_lock_irqsave(&priv->driver_lock, flags);
592 726
@@ -602,7 +736,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
602 result = le16_to_cpu(resp->result); 736 result = le16_to_cpu(resp->result);
603 737
604 if (net_ratelimit()) 738 if (net_ratelimit())
605 printk(KERN_DEBUG "libertastf: cmd response 0x%04x, seq %d, size %d\n", 739 pr_info("libertastf: cmd response 0x%04x, seq %d, size %d\n",
606 respcmd, le16_to_cpu(resp->seqnum), 740 respcmd, le16_to_cpu(resp->seqnum),
607 le16_to_cpu(resp->size)); 741 le16_to_cpu(resp->size));
608 742
@@ -639,7 +773,7 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
639 switch (respcmd) { 773 switch (respcmd) {
640 case CMD_RET(CMD_GET_HW_SPEC): 774 case CMD_RET(CMD_GET_HW_SPEC):
641 case CMD_RET(CMD_802_11_RESET): 775 case CMD_RET(CMD_802_11_RESET):
642 printk(KERN_DEBUG "libertastf: reset failed\n"); 776 pr_info("libertastf: reset failed\n");
643 break; 777 break;
644 778
645 } 779 }
@@ -666,5 +800,6 @@ int lbtf_process_rx_command(struct lbtf_private *priv)
666 800
667done: 801done:
668 mutex_unlock(&priv->lock); 802 mutex_unlock(&priv->lock);
803 lbtf_deb_leave_args(LBTF_DEB_CMD, "ret %d", ret);
669 return ret; 804 return ret;
670} 805}
diff --git a/drivers/net/wireless/libertas_tf/deb_defs.h b/drivers/net/wireless/libertas_tf/deb_defs.h
new file mode 100644
index 000000000000..ae753962d8b5
--- /dev/null
+++ b/drivers/net/wireless/libertas_tf/deb_defs.h
@@ -0,0 +1,104 @@
1/**
2 * This header file contains global constant/enum definitions,
3 * global variable declaration.
4 */
5#ifndef _LBS_DEB_DEFS_H_
6#define _LBS_DEB_EFS_H_
7
8#ifndef DRV_NAME
9#define DRV_NAME "libertas_tf"
10#endif
11
12#include <linux/spinlock.h>
13
14#ifdef CONFIG_LIBERTAS_THINFIRM_DEBUG
15#define DEBUG
16#define PROC_DEBUG
17#endif
18
19#define LBTF_DEB_ENTER 0x00000001
20#define LBTF_DEB_LEAVE 0x00000002
21#define LBTF_DEB_MAIN 0x00000004
22#define LBTF_DEB_NET 0x00000008
23#define LBTF_DEB_MESH 0x00000010
24#define LBTF_DEB_WEXT 0x00000020
25#define LBTF_DEB_IOCTL 0x00000040
26#define LBTF_DEB_SCAN 0x00000080
27#define LBTF_DEB_ASSOC 0x00000100
28#define LBTF_DEB_JOIN 0x00000200
29#define LBTF_DEB_11D 0x00000400
30#define LBTF_DEB_DEBUGFS 0x00000800
31#define LBTF_DEB_ETHTOOL 0x00001000
32#define LBTF_DEB_HOST 0x00002000
33#define LBTF_DEB_CMD 0x00004000
34#define LBTF_DEB_RX 0x00008000
35#define LBTF_DEB_TX 0x00010000
36#define LBTF_DEB_USB 0x00020000
37#define LBTF_DEB_CS 0x00040000
38#define LBTF_DEB_FW 0x00080000
39#define LBTF_DEB_THREAD 0x00100000
40#define LBTF_DEB_HEX 0x00200000
41#define LBTF_DEB_SDIO 0x00400000
42#define LBTF_DEB_MACOPS 0x00800000
43
44extern unsigned int lbtf_debug;
45
46
47#ifdef DEBUG
48#define LBTF_DEB_LL(grp, grpnam, fmt, args...) \
49do { if ((lbtf_debug & (grp)) == (grp)) \
50 printk(KERN_DEBUG DRV_NAME grpnam "%s: " fmt, \
51 in_interrupt() ? " (INT)" : "", ## args); } while (0)
52#else
53#define LBTF_DEB_LL(grp, grpnam, fmt, args...) do {} while (0)
54#endif
55
56#define lbtf_deb_enter(grp) \
57 LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s()\n", __func__);
58#define lbtf_deb_enter_args(grp, fmt, args...) \
59 LBTF_DEB_LL(grp | LBTF_DEB_ENTER, " enter", "%s(" fmt ")\n", __func__, ## args);
60#define lbtf_deb_leave(grp) \
61 LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s()\n", __func__);
62#define lbtf_deb_leave_args(grp, fmt, args...) \
63 LBTF_DEB_LL(grp | LBTF_DEB_LEAVE, " leave", "%s(), " fmt "\n", \
64 __func__, ##args);
65#define lbtf_deb_main(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MAIN, " main", fmt, ##args)
66#define lbtf_deb_net(fmt, args...) LBTF_DEB_LL(LBTF_DEB_NET, " net", fmt, ##args)
67#define lbtf_deb_mesh(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MESH, " mesh", fmt, ##args)
68#define lbtf_deb_wext(fmt, args...) LBTF_DEB_LL(LBTF_DEB_WEXT, " wext", fmt, ##args)
69#define lbtf_deb_ioctl(fmt, args...) LBTF_DEB_LL(LBTF_DEB_IOCTL, " ioctl", fmt, ##args)
70#define lbtf_deb_scan(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SCAN, " scan", fmt, ##args)
71#define lbtf_deb_assoc(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ASSOC, " assoc", fmt, ##args)
72#define lbtf_deb_join(fmt, args...) LBTF_DEB_LL(LBTF_DEB_JOIN, " join", fmt, ##args)
73#define lbtf_deb_11d(fmt, args...) LBTF_DEB_LL(LBTF_DEB_11D, " 11d", fmt, ##args)
74#define lbtf_deb_debugfs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_DEBUGFS, " debugfs", fmt, ##args)
75#define lbtf_deb_ethtool(fmt, args...) LBTF_DEB_LL(LBTF_DEB_ETHTOOL, " ethtool", fmt, ##args)
76#define lbtf_deb_host(fmt, args...) LBTF_DEB_LL(LBTF_DEB_HOST, " host", fmt, ##args)
77#define lbtf_deb_cmd(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CMD, " cmd", fmt, ##args)
78#define lbtf_deb_rx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_RX, " rx", fmt, ##args)
79#define lbtf_deb_tx(fmt, args...) LBTF_DEB_LL(LBTF_DEB_TX, " tx", fmt, ##args)
80#define lbtf_deb_fw(fmt, args...) LBTF_DEB_LL(LBTF_DEB_FW, " fw", fmt, ##args)
81#define lbtf_deb_usb(fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usb", fmt, ##args)
82#define lbtf_deb_usbd(dev, fmt, args...) LBTF_DEB_LL(LBTF_DEB_USB, " usbd", "%s:" fmt, dev_name(dev), ##args)
83#define lbtf_deb_cs(fmt, args...) LBTF_DEB_LL(LBTF_DEB_CS, " cs", fmt, ##args)
84#define lbtf_deb_thread(fmt, args...) LBTF_DEB_LL(LBTF_DEB_THREAD, " thread", fmt, ##args)
85#define lbtf_deb_sdio(fmt, args...) LBTF_DEB_LL(LBTF_DEB_SDIO, " thread", fmt, ##args)
86#define lbtf_deb_macops(fmt, args...) LBTF_DEB_LL(LBTF_DEB_MACOPS, " thread", fmt, ##args)
87
88#ifdef DEBUG
89static inline void lbtf_deb_hex(unsigned int grp, const char *prompt, u8 *buf, int len)
90{
91 char newprompt[32];
92
93 if (len &&
94 (lbtf_debug & LBTF_DEB_HEX) &&
95 (lbtf_debug & grp)) {
96 snprintf(newprompt, sizeof(newprompt), DRV_NAME " %s: ", prompt);
97 print_hex_dump_bytes(prompt, DUMP_PREFIX_NONE, buf, len);
98 }
99}
100#else
101#define lbtf_deb_hex(grp, prompt, buf, len) do {} while (0)
102#endif
103
104#endif
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 8cc9db60c14b..c445500ffc61 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -7,6 +7,13 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define DRV_NAME "lbtf_usb"
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include "libertas_tf.h"
15#include "if_usb.h"
16
10#include <linux/delay.h> 17#include <linux/delay.h>
11#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
12#include <linux/firmware.h> 19#include <linux/firmware.h>
@@ -14,10 +21,8 @@
14#include <linux/slab.h> 21#include <linux/slab.h>
15#include <linux/usb.h> 22#include <linux/usb.h>
16 23
17#define DRV_NAME "lbtf_usb" 24#define INSANEDEBUG 0
18 25#define lbtf_deb_usb2(...) do { if (INSANEDEBUG) lbtf_deb_usbd(__VA_ARGS__); } while (0)
19#include "libertas_tf.h"
20#include "if_usb.h"
21 26
22#define MESSAGE_HEADER_LEN 4 27#define MESSAGE_HEADER_LEN 4
23 28
@@ -53,9 +58,14 @@ static int if_usb_reset_device(struct if_usb_card *cardp);
53 */ 58 */
54static void if_usb_write_bulk_callback(struct urb *urb) 59static void if_usb_write_bulk_callback(struct urb *urb)
55{ 60{
56 if (urb->status != 0) 61 if (urb->status != 0) {
57 printk(KERN_INFO "libertastf: URB in failure status: %d\n", 62 /* print the failure status number for debug */
58 urb->status); 63 pr_info("URB in failure status: %d\n", urb->status);
64 } else {
65 lbtf_deb_usb2(&urb->dev->dev, "URB status is successful\n");
66 lbtf_deb_usb2(&urb->dev->dev, "Actual length transmitted %d\n",
67 urb->actual_length);
68 }
59} 69}
60 70
61/** 71/**
@@ -65,6 +75,8 @@ static void if_usb_write_bulk_callback(struct urb *urb)
65 */ 75 */
66static void if_usb_free(struct if_usb_card *cardp) 76static void if_usb_free(struct if_usb_card *cardp)
67{ 77{
78 lbtf_deb_enter(LBTF_DEB_USB);
79
68 /* Unlink tx & rx urb */ 80 /* Unlink tx & rx urb */
69 usb_kill_urb(cardp->tx_urb); 81 usb_kill_urb(cardp->tx_urb);
70 usb_kill_urb(cardp->rx_urb); 82 usb_kill_urb(cardp->rx_urb);
@@ -81,6 +93,8 @@ static void if_usb_free(struct if_usb_card *cardp)
81 93
82 kfree(cardp->ep_out_buf); 94 kfree(cardp->ep_out_buf);
83 cardp->ep_out_buf = NULL; 95 cardp->ep_out_buf = NULL;
96
97 lbtf_deb_leave(LBTF_DEB_USB);
84} 98}
85 99
86static void if_usb_setup_firmware(struct lbtf_private *priv) 100static void if_usb_setup_firmware(struct lbtf_private *priv)
@@ -88,23 +102,33 @@ static void if_usb_setup_firmware(struct lbtf_private *priv)
88 struct if_usb_card *cardp = priv->card; 102 struct if_usb_card *cardp = priv->card;
89 struct cmd_ds_set_boot2_ver b2_cmd; 103 struct cmd_ds_set_boot2_ver b2_cmd;
90 104
105 lbtf_deb_enter(LBTF_DEB_USB);
106
91 if_usb_submit_rx_urb(cardp); 107 if_usb_submit_rx_urb(cardp);
92 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd)); 108 b2_cmd.hdr.size = cpu_to_le16(sizeof(b2_cmd));
93 b2_cmd.action = 0; 109 b2_cmd.action = 0;
94 b2_cmd.version = cardp->boot2_version; 110 b2_cmd.version = cardp->boot2_version;
95 111
96 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd)) 112 if (lbtf_cmd_with_response(priv, CMD_SET_BOOT2_VER, &b2_cmd))
97 printk(KERN_INFO "libertastf: setting boot2 version failed\n"); 113 lbtf_deb_usb("Setting boot2 version failed\n");
114
115 lbtf_deb_leave(LBTF_DEB_USB);
98} 116}
99 117
100static void if_usb_fw_timeo(unsigned long priv) 118static void if_usb_fw_timeo(unsigned long priv)
101{ 119{
102 struct if_usb_card *cardp = (void *)priv; 120 struct if_usb_card *cardp = (void *)priv;
103 121
104 if (!cardp->fwdnldover) 122 lbtf_deb_enter(LBTF_DEB_USB);
123 if (!cardp->fwdnldover) {
105 /* Download timed out */ 124 /* Download timed out */
106 cardp->priv->surpriseremoved = 1; 125 cardp->priv->surpriseremoved = 1;
126 pr_err("Download timed out\n");
127 } else {
128 lbtf_deb_usb("Download complete, no event. Assuming success\n");
129 }
107 wake_up(&cardp->fw_wq); 130 wake_up(&cardp->fw_wq);
131 lbtf_deb_leave(LBTF_DEB_USB);
108} 132}
109 133
110/** 134/**
@@ -125,11 +149,14 @@ static int if_usb_probe(struct usb_interface *intf,
125 struct if_usb_card *cardp; 149 struct if_usb_card *cardp;
126 int i; 150 int i;
127 151
152 lbtf_deb_enter(LBTF_DEB_USB);
128 udev = interface_to_usbdev(intf); 153 udev = interface_to_usbdev(intf);
129 154
130 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL); 155 cardp = kzalloc(sizeof(struct if_usb_card), GFP_KERNEL);
131 if (!cardp) 156 if (!cardp) {
157 pr_err("Out of memory allocating private data.\n");
132 goto error; 158 goto error;
159 }
133 160
134 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp); 161 setup_timer(&cardp->fw_timeout, if_usb_fw_timeo, (unsigned long)cardp);
135 init_waitqueue_head(&cardp->fw_wq); 162 init_waitqueue_head(&cardp->fw_wq);
@@ -137,38 +164,62 @@ static int if_usb_probe(struct usb_interface *intf,
137 cardp->udev = udev; 164 cardp->udev = udev;
138 iface_desc = intf->cur_altsetting; 165 iface_desc = intf->cur_altsetting;
139 166
167 lbtf_deb_usbd(&udev->dev, "bcdUSB = 0x%X bDeviceClass = 0x%X"
168 " bDeviceSubClass = 0x%X, bDeviceProtocol = 0x%X\n",
169 le16_to_cpu(udev->descriptor.bcdUSB),
170 udev->descriptor.bDeviceClass,
171 udev->descriptor.bDeviceSubClass,
172 udev->descriptor.bDeviceProtocol);
173
140 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 174 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
141 endpoint = &iface_desc->endpoint[i].desc; 175 endpoint = &iface_desc->endpoint[i].desc;
142 if (usb_endpoint_is_bulk_in(endpoint)) { 176 if (usb_endpoint_is_bulk_in(endpoint)) {
143 cardp->ep_in_size = 177 cardp->ep_in_size =
144 le16_to_cpu(endpoint->wMaxPacketSize); 178 le16_to_cpu(endpoint->wMaxPacketSize);
145 cardp->ep_in = usb_endpoint_num(endpoint); 179 cardp->ep_in = usb_endpoint_num(endpoint);
180
181 lbtf_deb_usbd(&udev->dev, "in_endpoint = %d\n", cardp->ep_in);
182 lbtf_deb_usbd(&udev->dev, "Bulk in size is %d\n", cardp->ep_in_size);
146 } else if (usb_endpoint_is_bulk_out(endpoint)) { 183 } else if (usb_endpoint_is_bulk_out(endpoint)) {
147 cardp->ep_out_size = 184 cardp->ep_out_size =
148 le16_to_cpu(endpoint->wMaxPacketSize); 185 le16_to_cpu(endpoint->wMaxPacketSize);
149 cardp->ep_out = usb_endpoint_num(endpoint); 186 cardp->ep_out = usb_endpoint_num(endpoint);
187
188 lbtf_deb_usbd(&udev->dev, "out_endpoint = %d\n", cardp->ep_out);
189 lbtf_deb_usbd(&udev->dev, "Bulk out size is %d\n",
190 cardp->ep_out_size);
150 } 191 }
151 } 192 }
152 if (!cardp->ep_out_size || !cardp->ep_in_size) 193 if (!cardp->ep_out_size || !cardp->ep_in_size) {
194 lbtf_deb_usbd(&udev->dev, "Endpoints not found\n");
153 /* Endpoints not found */ 195 /* Endpoints not found */
154 goto dealloc; 196 goto dealloc;
197 }
155 198
156 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL); 199 cardp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
157 if (!cardp->rx_urb) 200 if (!cardp->rx_urb) {
201 lbtf_deb_usbd(&udev->dev, "Rx URB allocation failed\n");
158 goto dealloc; 202 goto dealloc;
203 }
159 204
160 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL); 205 cardp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
161 if (!cardp->tx_urb) 206 if (!cardp->tx_urb) {
207 lbtf_deb_usbd(&udev->dev, "Tx URB allocation failed\n");
162 goto dealloc; 208 goto dealloc;
209 }
163 210
164 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL); 211 cardp->cmd_urb = usb_alloc_urb(0, GFP_KERNEL);
165 if (!cardp->cmd_urb) 212 if (!cardp->cmd_urb) {
213 lbtf_deb_usbd(&udev->dev, "Cmd URB allocation failed\n");
166 goto dealloc; 214 goto dealloc;
215 }
167 216
168 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE, 217 cardp->ep_out_buf = kmalloc(MRVDRV_ETH_TX_PACKET_BUFFER_SIZE,
169 GFP_KERNEL); 218 GFP_KERNEL);
170 if (!cardp->ep_out_buf) 219 if (!cardp->ep_out_buf) {
220 lbtf_deb_usbd(&udev->dev, "Could not allocate buffer\n");
171 goto dealloc; 221 goto dealloc;
222 }
172 223
173 priv = lbtf_add_card(cardp, &udev->dev); 224 priv = lbtf_add_card(cardp, &udev->dev);
174 if (!priv) 225 if (!priv)
@@ -189,6 +240,7 @@ static int if_usb_probe(struct usb_interface *intf,
189dealloc: 240dealloc:
190 if_usb_free(cardp); 241 if_usb_free(cardp);
191error: 242error:
243lbtf_deb_leave(LBTF_DEB_MAIN);
192 return -ENOMEM; 244 return -ENOMEM;
193} 245}
194 246
@@ -202,6 +254,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
202 struct if_usb_card *cardp = usb_get_intfdata(intf); 254 struct if_usb_card *cardp = usb_get_intfdata(intf);
203 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv; 255 struct lbtf_private *priv = (struct lbtf_private *) cardp->priv;
204 256
257 lbtf_deb_enter(LBTF_DEB_MAIN);
258
205 if_usb_reset_device(cardp); 259 if_usb_reset_device(cardp);
206 260
207 if (priv) 261 if (priv)
@@ -212,6 +266,8 @@ static void if_usb_disconnect(struct usb_interface *intf)
212 266
213 usb_set_intfdata(intf, NULL); 267 usb_set_intfdata(intf, NULL);
214 usb_put_dev(interface_to_usbdev(intf)); 268 usb_put_dev(interface_to_usbdev(intf));
269
270 lbtf_deb_leave(LBTF_DEB_MAIN);
215} 271}
216 272
217/** 273/**
@@ -226,6 +282,8 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
226 struct fwdata *fwdata = cardp->ep_out_buf; 282 struct fwdata *fwdata = cardp->ep_out_buf;
227 u8 *firmware = (u8 *) cardp->fw->data; 283 u8 *firmware = (u8 *) cardp->fw->data;
228 284
285 lbtf_deb_enter(LBTF_DEB_FW);
286
229 /* If we got a CRC failure on the last block, back 287 /* If we got a CRC failure on the last block, back
230 up and retry it */ 288 up and retry it */
231 if (!cardp->CRC_OK) { 289 if (!cardp->CRC_OK) {
@@ -233,6 +291,9 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
233 cardp->fwseqnum--; 291 cardp->fwseqnum--;
234 } 292 }
235 293
294 lbtf_deb_usb2(&cardp->udev->dev, "totalbytes = %d\n",
295 cardp->totalbytes);
296
236 /* struct fwdata (which we sent to the card) has an 297 /* struct fwdata (which we sent to the card) has an
237 extra __le32 field in between the header and the data, 298 extra __le32 field in between the header and the data,
238 which is not in the struct fwheader in the actual 299 which is not in the struct fwheader in the actual
@@ -246,18 +307,33 @@ static int if_usb_send_fw_pkt(struct if_usb_card *cardp)
246 memcpy(fwdata->data, &firmware[cardp->totalbytes], 307 memcpy(fwdata->data, &firmware[cardp->totalbytes],
247 le32_to_cpu(fwdata->hdr.datalength)); 308 le32_to_cpu(fwdata->hdr.datalength));
248 309
310 lbtf_deb_usb2(&cardp->udev->dev, "Data length = %d\n",
311 le32_to_cpu(fwdata->hdr.datalength));
312
249 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum); 313 fwdata->seqnum = cpu_to_le32(++cardp->fwseqnum);
250 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength); 314 cardp->totalbytes += le32_to_cpu(fwdata->hdr.datalength);
251 315
252 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) + 316 usb_tx_block(cardp, cardp->ep_out_buf, sizeof(struct fwdata) +
253 le32_to_cpu(fwdata->hdr.datalength), 0); 317 le32_to_cpu(fwdata->hdr.datalength), 0);
254 318
255 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) 319 if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_DATA_TO_RECV)) {
320 lbtf_deb_usb2(&cardp->udev->dev, "There are data to follow\n");
321 lbtf_deb_usb2(&cardp->udev->dev, "seqnum = %d totalbytes = %d\n",
322 cardp->fwseqnum, cardp->totalbytes);
323 } else if (fwdata->hdr.dnldcmd == cpu_to_le32(FW_HAS_LAST_BLOCK)) {
324 lbtf_deb_usb2(&cardp->udev->dev, "Host has finished FW downloading\n");
325 lbtf_deb_usb2(&cardp->udev->dev, "Donwloading FW JUMP BLOCK\n");
326
256 /* Host has finished FW downloading 327 /* Host has finished FW downloading
257 * Donwloading FW JUMP BLOCK 328 * Donwloading FW JUMP BLOCK
258 */ 329 */
259 cardp->fwfinalblk = 1; 330 cardp->fwfinalblk = 1;
331 }
260 332
333 lbtf_deb_usb2(&cardp->udev->dev, "Firmware download done; size %d\n",
334 cardp->totalbytes);
335
336 lbtf_deb_leave(LBTF_DEB_FW);
261 return 0; 337 return 0;
262} 338}
263 339
@@ -266,6 +342,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
266 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4; 342 struct cmd_ds_802_11_reset *cmd = cardp->ep_out_buf + 4;
267 int ret; 343 int ret;
268 344
345 lbtf_deb_enter(LBTF_DEB_USB);
346
269 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 347 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
270 348
271 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET); 349 cmd->hdr.command = cpu_to_le16(CMD_802_11_RESET);
@@ -280,6 +358,8 @@ static int if_usb_reset_device(struct if_usb_card *cardp)
280 ret = usb_reset_device(cardp->udev); 358 ret = usb_reset_device(cardp->udev);
281 msleep(100); 359 msleep(100);
282 360
361 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
362
283 return ret; 363 return ret;
284} 364}
285EXPORT_SYMBOL_GPL(if_usb_reset_device); 365EXPORT_SYMBOL_GPL(if_usb_reset_device);
@@ -297,11 +377,15 @@ EXPORT_SYMBOL_GPL(if_usb_reset_device);
297static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload, 377static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
298 uint16_t nb, u8 data) 378 uint16_t nb, u8 data)
299{ 379{
380 int ret = -1;
300 struct urb *urb; 381 struct urb *urb;
301 382
383 lbtf_deb_enter(LBTF_DEB_USB);
302 /* check if device is removed */ 384 /* check if device is removed */
303 if (cardp->priv->surpriseremoved) 385 if (cardp->priv->surpriseremoved) {
304 return -1; 386 lbtf_deb_usbd(&cardp->udev->dev, "Device removed\n");
387 goto tx_ret;
388 }
305 389
306 if (data) 390 if (data)
307 urb = cardp->tx_urb; 391 urb = cardp->tx_urb;
@@ -315,19 +399,34 @@ static int usb_tx_block(struct if_usb_card *cardp, uint8_t *payload,
315 399
316 urb->transfer_flags |= URB_ZERO_PACKET; 400 urb->transfer_flags |= URB_ZERO_PACKET;
317 401
318 if (usb_submit_urb(urb, GFP_ATOMIC)) 402 if (usb_submit_urb(urb, GFP_ATOMIC)) {
319 return -1; 403 lbtf_deb_usbd(&cardp->udev->dev, "usb_submit_urb failed: %d\n", ret);
320 return 0; 404 goto tx_ret;
405 }
406
407 lbtf_deb_usb2(&cardp->udev->dev, "usb_submit_urb success\n");
408
409 ret = 0;
410
411tx_ret:
412 lbtf_deb_leave(LBTF_DEB_USB);
413 return ret;
321} 414}
322 415
323static int __if_usb_submit_rx_urb(struct if_usb_card *cardp, 416static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
324 void (*callbackfn)(struct urb *urb)) 417 void (*callbackfn)(struct urb *urb))
325{ 418{
326 struct sk_buff *skb; 419 struct sk_buff *skb;
420 int ret = -1;
421
422 lbtf_deb_enter(LBTF_DEB_USB);
327 423
328 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE); 424 skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
329 if (!skb) 425 if (!skb) {
426 pr_err("No free skb\n");
427 lbtf_deb_leave(LBTF_DEB_USB);
330 return -1; 428 return -1;
429 }
331 430
332 cardp->rx_skb = skb; 431 cardp->rx_skb = skb;
333 432
@@ -339,12 +438,19 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
339 438
340 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET; 439 cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
341 440
342 if (usb_submit_urb(cardp->rx_urb, GFP_ATOMIC)) { 441 lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n", cardp->rx_urb);
442 ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
443 if (ret) {
444 lbtf_deb_usbd(&cardp->udev->dev, "Submit Rx URB failed: %d\n", ret);
343 kfree_skb(skb); 445 kfree_skb(skb);
344 cardp->rx_skb = NULL; 446 cardp->rx_skb = NULL;
447 lbtf_deb_leave(LBTF_DEB_USB);
345 return -1; 448 return -1;
346 } else 449 } else {
450 lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
451 lbtf_deb_leave(LBTF_DEB_USB);
347 return 0; 452 return 0;
453 }
348} 454}
349 455
350static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp) 456static int if_usb_submit_rx_urb_fwload(struct if_usb_card *cardp)
@@ -364,8 +470,12 @@ static void if_usb_receive_fwload(struct urb *urb)
364 struct fwsyncheader *syncfwheader; 470 struct fwsyncheader *syncfwheader;
365 struct bootcmdresp bcmdresp; 471 struct bootcmdresp bcmdresp;
366 472
473 lbtf_deb_enter(LBTF_DEB_USB);
367 if (urb->status) { 474 if (urb->status) {
475 lbtf_deb_usbd(&cardp->udev->dev,
476 "URB status is failed during fw load\n");
368 kfree_skb(skb); 477 kfree_skb(skb);
478 lbtf_deb_leave(LBTF_DEB_USB);
369 return; 479 return;
370 } 480 }
371 481
@@ -373,12 +483,17 @@ static void if_usb_receive_fwload(struct urb *urb)
373 __le32 *tmp = (__le32 *)(skb->data); 483 __le32 *tmp = (__le32 *)(skb->data);
374 484
375 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) && 485 if (tmp[0] == cpu_to_le32(CMD_TYPE_INDICATION) &&
376 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) 486 tmp[1] == cpu_to_le32(MACREG_INT_CODE_FIRMWARE_READY)) {
377 /* Firmware ready event received */ 487 /* Firmware ready event received */
488 pr_info("Firmware ready event received\n");
378 wake_up(&cardp->fw_wq); 489 wake_up(&cardp->fw_wq);
379 else 490 } else {
491 lbtf_deb_usb("Waiting for confirmation; got %x %x\n",
492 le32_to_cpu(tmp[0]), le32_to_cpu(tmp[1]));
380 if_usb_submit_rx_urb_fwload(cardp); 493 if_usb_submit_rx_urb_fwload(cardp);
494 }
381 kfree_skb(skb); 495 kfree_skb(skb);
496 lbtf_deb_leave(LBTF_DEB_USB);
382 return; 497 return;
383 } 498 }
384 if (cardp->bootcmdresp <= 0) { 499 if (cardp->bootcmdresp <= 0) {
@@ -389,34 +504,60 @@ static void if_usb_receive_fwload(struct urb *urb)
389 if_usb_submit_rx_urb_fwload(cardp); 504 if_usb_submit_rx_urb_fwload(cardp);
390 cardp->bootcmdresp = 1; 505 cardp->bootcmdresp = 1;
391 /* Received valid boot command response */ 506 /* Received valid boot command response */
507 lbtf_deb_usbd(&cardp->udev->dev,
508 "Received valid boot command response\n");
509 lbtf_deb_leave(LBTF_DEB_USB);
392 return; 510 return;
393 } 511 }
394 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) { 512 if (bcmdresp.magic != cpu_to_le32(BOOT_CMD_MAGIC_NUMBER)) {
395 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) || 513 if (bcmdresp.magic == cpu_to_le32(CMD_TYPE_REQUEST) ||
396 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) || 514 bcmdresp.magic == cpu_to_le32(CMD_TYPE_DATA) ||
397 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) 515 bcmdresp.magic == cpu_to_le32(CMD_TYPE_INDICATION)) {
516 if (!cardp->bootcmdresp)
517 pr_info("Firmware already seems alive; resetting\n");
398 cardp->bootcmdresp = -1; 518 cardp->bootcmdresp = -1;
399 } else if (bcmdresp.cmd == BOOT_CMD_FW_BY_USB && 519 } else {
400 bcmdresp.result == BOOT_CMD_RESP_OK) 520 pr_info("boot cmd response wrong magic number (0x%x)\n",
521 le32_to_cpu(bcmdresp.magic));
522 }
523 } else if (bcmdresp.cmd != BOOT_CMD_FW_BY_USB) {
524 pr_info("boot cmd response cmd_tag error (%d)\n",
525 bcmdresp.cmd);
526 } else if (bcmdresp.result != BOOT_CMD_RESP_OK) {
527 pr_info("boot cmd response result error (%d)\n",
528 bcmdresp.result);
529 } else {
401 cardp->bootcmdresp = 1; 530 cardp->bootcmdresp = 1;
531 lbtf_deb_usbd(&cardp->udev->dev,
532 "Received valid boot command response\n");
533 }
402 534
403 kfree_skb(skb); 535 kfree_skb(skb);
404 if_usb_submit_rx_urb_fwload(cardp); 536 if_usb_submit_rx_urb_fwload(cardp);
537 lbtf_deb_leave(LBTF_DEB_USB);
405 return; 538 return;
406 } 539 }
407 540
408 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC); 541 syncfwheader = kmalloc(sizeof(struct fwsyncheader), GFP_ATOMIC);
409 if (!syncfwheader) { 542 if (!syncfwheader) {
543 lbtf_deb_usbd(&cardp->udev->dev, "Failure to allocate syncfwheader\n");
410 kfree_skb(skb); 544 kfree_skb(skb);
545 lbtf_deb_leave(LBTF_DEB_USB);
411 return; 546 return;
412 } 547 }
413 548
414 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader)); 549 memcpy(syncfwheader, skb->data, sizeof(struct fwsyncheader));
415 550
416 if (!syncfwheader->cmd) 551 if (!syncfwheader->cmd) {
552 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk with correct CRC\n");
553 lbtf_deb_usb2(&cardp->udev->dev, "FW received Blk seqnum = %d\n",
554 le32_to_cpu(syncfwheader->seqnum));
417 cardp->CRC_OK = 1; 555 cardp->CRC_OK = 1;
418 else 556 } else {
557 lbtf_deb_usbd(&cardp->udev->dev, "FW received Blk with CRC error\n");
419 cardp->CRC_OK = 0; 558 cardp->CRC_OK = 0;
559 }
560
420 kfree_skb(skb); 561 kfree_skb(skb);
421 562
422 /* reschedule timer for 200ms hence */ 563 /* reschedule timer for 200ms hence */
@@ -434,7 +575,7 @@ static void if_usb_receive_fwload(struct urb *urb)
434 575
435 kfree(syncfwheader); 576 kfree(syncfwheader);
436 577
437 return; 578 lbtf_deb_leave(LBTF_DEB_USB);
438} 579}
439 580
440#define MRVDRV_MIN_PKT_LEN 30 581#define MRVDRV_MIN_PKT_LEN 30
@@ -445,6 +586,7 @@ static inline void process_cmdtypedata(int recvlength, struct sk_buff *skb,
445{ 586{
446 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN 587 if (recvlength > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE + MESSAGE_HEADER_LEN
447 || recvlength < MRVDRV_MIN_PKT_LEN) { 588 || recvlength < MRVDRV_MIN_PKT_LEN) {
589 lbtf_deb_usbd(&cardp->udev->dev, "Packet length is Invalid\n");
448 kfree_skb(skb); 590 kfree_skb(skb);
449 return; 591 return;
450 } 592 }
@@ -460,6 +602,8 @@ static inline void process_cmdrequest(int recvlength, uint8_t *recvbuff,
460 struct lbtf_private *priv) 602 struct lbtf_private *priv)
461{ 603{
462 if (recvlength > LBS_CMD_BUFFER_SIZE) { 604 if (recvlength > LBS_CMD_BUFFER_SIZE) {
605 lbtf_deb_usbd(&cardp->udev->dev,
606 "The receive buffer is too large\n");
463 kfree_skb(skb); 607 kfree_skb(skb);
464 return; 608 return;
465 } 609 }
@@ -489,16 +633,24 @@ static void if_usb_receive(struct urb *urb)
489 uint32_t recvtype = 0; 633 uint32_t recvtype = 0;
490 __le32 *pkt = (__le32 *) skb->data; 634 __le32 *pkt = (__le32 *) skb->data;
491 635
636 lbtf_deb_enter(LBTF_DEB_USB);
637
492 if (recvlength) { 638 if (recvlength) {
493 if (urb->status) { 639 if (urb->status) {
640 lbtf_deb_usbd(&cardp->udev->dev, "RX URB failed: %d\n",
641 urb->status);
494 kfree_skb(skb); 642 kfree_skb(skb);
495 goto setup_for_next; 643 goto setup_for_next;
496 } 644 }
497 645
498 recvbuff = skb->data; 646 recvbuff = skb->data;
499 recvtype = le32_to_cpu(pkt[0]); 647 recvtype = le32_to_cpu(pkt[0]);
648 lbtf_deb_usbd(&cardp->udev->dev,
649 "Recv length = 0x%x, Recv type = 0x%X\n",
650 recvlength, recvtype);
500 } else if (urb->status) { 651 } else if (urb->status) {
501 kfree_skb(skb); 652 kfree_skb(skb);
653 lbtf_deb_leave(LBTF_DEB_USB);
502 return; 654 return;
503 } 655 }
504 656
@@ -515,6 +667,7 @@ static void if_usb_receive(struct urb *urb)
515 { 667 {
516 /* Event cause handling */ 668 /* Event cause handling */
517 u32 event_cause = le32_to_cpu(pkt[1]); 669 u32 event_cause = le32_to_cpu(pkt[1]);
670 lbtf_deb_usbd(&cardp->udev->dev, "**EVENT** 0x%X\n", event_cause);
518 671
519 /* Icky undocumented magic special case */ 672 /* Icky undocumented magic special case */
520 if (event_cause & 0xffff0000) { 673 if (event_cause & 0xffff0000) {
@@ -529,21 +682,22 @@ static void if_usb_receive(struct urb *urb)
529 } else if (event_cause == LBTF_EVENT_BCN_SENT) 682 } else if (event_cause == LBTF_EVENT_BCN_SENT)
530 lbtf_bcn_sent(priv); 683 lbtf_bcn_sent(priv);
531 else 684 else
532 printk(KERN_DEBUG 685 lbtf_deb_usbd(&cardp->udev->dev,
533 "Unsupported notification %d received\n", 686 "Unsupported notification %d received\n",
534 event_cause); 687 event_cause);
535 kfree_skb(skb); 688 kfree_skb(skb);
536 break; 689 break;
537 } 690 }
538 default: 691 default:
539 printk(KERN_DEBUG "libertastf: unknown command type 0x%X\n", 692 lbtf_deb_usbd(&cardp->udev->dev,
540 recvtype); 693 "libertastf: unknown command type 0x%X\n", recvtype);
541 kfree_skb(skb); 694 kfree_skb(skb);
542 break; 695 break;
543 } 696 }
544 697
545setup_for_next: 698setup_for_next:
546 if_usb_submit_rx_urb(cardp); 699 if_usb_submit_rx_urb(cardp);
700 lbtf_deb_leave(LBTF_DEB_USB);
547} 701}
548 702
549/** 703/**
@@ -562,6 +716,9 @@ static int if_usb_host_to_card(struct lbtf_private *priv, uint8_t type,
562 struct if_usb_card *cardp = priv->card; 716 struct if_usb_card *cardp = priv->card;
563 u8 data = 0; 717 u8 data = 0;
564 718
719 lbtf_deb_usbd(&cardp->udev->dev, "*** type = %u\n", type);
720 lbtf_deb_usbd(&cardp->udev->dev, "size after = %d\n", nb);
721
565 if (type == MVMS_CMD) { 722 if (type == MVMS_CMD) {
566 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST); 723 *(__le32 *)cardp->ep_out_buf = cpu_to_le32(CMD_TYPE_REQUEST);
567 } else { 724 } else {
@@ -639,8 +796,10 @@ static int check_fwfile_format(const u8 *data, u32 totlen)
639 } while (!exit); 796 } while (!exit);
640 797
641 if (ret) 798 if (ret)
642 printk(KERN_INFO 799 pr_err("firmware file format check FAIL\n");
643 "libertastf: firmware file format check failed\n"); 800 else
801 lbtf_deb_fw("firmware file format check PASS\n");
802
644 return ret; 803 return ret;
645} 804}
646 805
@@ -651,10 +810,12 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
651 static int reset_count = 10; 810 static int reset_count = 10;
652 int ret = 0; 811 int ret = 0;
653 812
813 lbtf_deb_enter(LBTF_DEB_USB);
814
654 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev); 815 ret = request_firmware(&cardp->fw, lbtf_fw_name, &cardp->udev->dev);
655 if (ret < 0) { 816 if (ret < 0) {
656 printk(KERN_INFO "libertastf: firmware %s not found\n", 817 pr_err("request_firmware() failed with %#x\n", ret);
657 lbtf_fw_name); 818 pr_err("firmware %s not found\n", lbtf_fw_name);
658 goto done; 819 goto done;
659 } 820 }
660 821
@@ -663,6 +824,7 @@ static int if_usb_prog_firmware(struct if_usb_card *cardp)
663 824
664restart: 825restart:
665 if (if_usb_submit_rx_urb_fwload(cardp) < 0) { 826 if (if_usb_submit_rx_urb_fwload(cardp) < 0) {
827 lbtf_deb_usbd(&cardp->udev->dev, "URB submission is failed\n");
666 ret = -1; 828 ret = -1;
667 goto release_fw; 829 goto release_fw;
668 } 830 }
@@ -709,14 +871,13 @@ restart:
709 usb_kill_urb(cardp->rx_urb); 871 usb_kill_urb(cardp->rx_urb);
710 872
711 if (!cardp->fwdnldover) { 873 if (!cardp->fwdnldover) {
712 printk(KERN_INFO "libertastf: failed to load fw," 874 pr_info("failed to load fw, resetting device!\n");
713 " resetting device!\n");
714 if (--reset_count >= 0) { 875 if (--reset_count >= 0) {
715 if_usb_reset_device(cardp); 876 if_usb_reset_device(cardp);
716 goto restart; 877 goto restart;
717 } 878 }
718 879
719 printk(KERN_INFO "libertastf: fw download failure\n"); 880 pr_info("FW download failure, time = %d ms\n", i * 100);
720 ret = -1; 881 ret = -1;
721 goto release_fw; 882 goto release_fw;
722 } 883 }
@@ -730,6 +891,7 @@ restart:
730 if_usb_setup_firmware(cardp->priv); 891 if_usb_setup_firmware(cardp->priv);
731 892
732 done: 893 done:
894 lbtf_deb_leave_args(LBTF_DEB_USB, "ret %d", ret);
733 return ret; 895 return ret;
734} 896}
735EXPORT_SYMBOL_GPL(if_usb_prog_firmware); 897EXPORT_SYMBOL_GPL(if_usb_prog_firmware);
@@ -751,13 +913,19 @@ static int __init if_usb_init_module(void)
751{ 913{
752 int ret = 0; 914 int ret = 0;
753 915
916 lbtf_deb_enter(LBTF_DEB_MAIN);
917
754 ret = usb_register(&if_usb_driver); 918 ret = usb_register(&if_usb_driver);
919
920 lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
755 return ret; 921 return ret;
756} 922}
757 923
758static void __exit if_usb_exit_module(void) 924static void __exit if_usb_exit_module(void)
759{ 925{
926 lbtf_deb_enter(LBTF_DEB_MAIN);
760 usb_deregister(&if_usb_driver); 927 usb_deregister(&if_usb_driver);
928 lbtf_deb_leave(LBTF_DEB_MAIN);
761} 929}
762 930
763module_init(if_usb_init_module); 931module_init(if_usb_init_module);
diff --git a/drivers/net/wireless/libertas_tf/libertas_tf.h b/drivers/net/wireless/libertas_tf/libertas_tf.h
index 4cc42dd5a005..fbbaaae7a1ae 100644
--- a/drivers/net/wireless/libertas_tf/libertas_tf.h
+++ b/drivers/net/wireless/libertas_tf/libertas_tf.h
@@ -13,6 +13,8 @@
13#include <linux/kthread.h> 13#include <linux/kthread.h>
14#include <net/mac80211.h> 14#include <net/mac80211.h>
15 15
16#include "deb_defs.h"
17
16#ifndef DRV_NAME 18#ifndef DRV_NAME
17#define DRV_NAME "libertas_tf" 19#define DRV_NAME "libertas_tf"
18#endif 20#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7945ff5aa334..6a04c2157f73 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -7,10 +7,12 @@
7 * the Free Software Foundation; either version 2 of the License, or (at 7 * the Free Software Foundation; either version 2 of the License, or (at
8 * your option) any later version. 8 * your option) any later version.
9 */ 9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
10#include <linux/slab.h> 12#include <linux/slab.h>
11 13
14#include <linux/etherdevice.h>
12#include "libertas_tf.h" 15#include "libertas_tf.h"
13#include "linux/etherdevice.h"
14 16
15#define DRIVER_RELEASE_VERSION "004.p0" 17#define DRIVER_RELEASE_VERSION "004.p0"
16/* thinfirm version: 5.132.X.pX */ 18/* thinfirm version: 5.132.X.pX */
@@ -18,7 +20,17 @@
18#define LBTF_FW_VER_MAX 0x0584ffff 20#define LBTF_FW_VER_MAX 0x0584ffff
19#define QOS_CONTROL_LEN 2 21#define QOS_CONTROL_LEN 2
20 22
21static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION; 23/* Module parameters */
24unsigned int lbtf_debug;
25EXPORT_SYMBOL_GPL(lbtf_debug);
26module_param_named(libertas_tf_debug, lbtf_debug, int, 0644);
27
28static const char lbtf_driver_version[] = "THINFIRM-USB8388-" DRIVER_RELEASE_VERSION
29#ifdef DEBUG
30 "-dbg"
31#endif
32 "";
33
22struct workqueue_struct *lbtf_wq; 34struct workqueue_struct *lbtf_wq;
23 35
24static const struct ieee80211_channel lbtf_channels[] = { 36static const struct ieee80211_channel lbtf_channels[] = {
@@ -81,6 +93,9 @@ static void lbtf_cmd_work(struct work_struct *work)
81{ 93{
82 struct lbtf_private *priv = container_of(work, struct lbtf_private, 94 struct lbtf_private *priv = container_of(work, struct lbtf_private,
83 cmd_work); 95 cmd_work);
96
97 lbtf_deb_enter(LBTF_DEB_CMD);
98
84 spin_lock_irq(&priv->driver_lock); 99 spin_lock_irq(&priv->driver_lock);
85 /* command response? */ 100 /* command response? */
86 if (priv->cmd_response_rxed) { 101 if (priv->cmd_response_rxed) {
@@ -108,11 +123,16 @@ static void lbtf_cmd_work(struct work_struct *work)
108 priv->cmd_timed_out = 0; 123 priv->cmd_timed_out = 0;
109 spin_unlock_irq(&priv->driver_lock); 124 spin_unlock_irq(&priv->driver_lock);
110 125
111 if (!priv->fw_ready) 126 if (!priv->fw_ready) {
127 lbtf_deb_leave_args(LBTF_DEB_CMD, "fw not ready");
112 return; 128 return;
129 }
130
113 /* Execute the next command */ 131 /* Execute the next command */
114 if (!priv->cur_cmd) 132 if (!priv->cur_cmd)
115 lbtf_execute_next_command(priv); 133 lbtf_execute_next_command(priv);
134
135 lbtf_deb_leave(LBTF_DEB_CMD);
116} 136}
117 137
118/** 138/**
@@ -126,6 +146,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
126{ 146{
127 int ret = -1; 147 int ret = -1;
128 148
149 lbtf_deb_enter(LBTF_DEB_FW);
129 /* 150 /*
130 * Read priv address from HW 151 * Read priv address from HW
131 */ 152 */
@@ -141,6 +162,7 @@ static int lbtf_setup_firmware(struct lbtf_private *priv)
141 162
142 ret = 0; 163 ret = 0;
143done: 164done:
165 lbtf_deb_leave_args(LBTF_DEB_FW, "ret: %d", ret);
144 return ret; 166 return ret;
145} 167}
146 168
@@ -152,6 +174,7 @@ static void command_timer_fn(unsigned long data)
152{ 174{
153 struct lbtf_private *priv = (struct lbtf_private *)data; 175 struct lbtf_private *priv = (struct lbtf_private *)data;
154 unsigned long flags; 176 unsigned long flags;
177 lbtf_deb_enter(LBTF_DEB_CMD);
155 178
156 spin_lock_irqsave(&priv->driver_lock, flags); 179 spin_lock_irqsave(&priv->driver_lock, flags);
157 180
@@ -168,10 +191,12 @@ static void command_timer_fn(unsigned long data)
168 queue_work(lbtf_wq, &priv->cmd_work); 191 queue_work(lbtf_wq, &priv->cmd_work);
169out: 192out:
170 spin_unlock_irqrestore(&priv->driver_lock, flags); 193 spin_unlock_irqrestore(&priv->driver_lock, flags);
194 lbtf_deb_leave(LBTF_DEB_CMD);
171} 195}
172 196
173static int lbtf_init_adapter(struct lbtf_private *priv) 197static int lbtf_init_adapter(struct lbtf_private *priv)
174{ 198{
199 lbtf_deb_enter(LBTF_DEB_MAIN);
175 memset(priv->current_addr, 0xff, ETH_ALEN); 200 memset(priv->current_addr, 0xff, ETH_ALEN);
176 mutex_init(&priv->lock); 201 mutex_init(&priv->lock);
177 202
@@ -188,13 +213,16 @@ static int lbtf_init_adapter(struct lbtf_private *priv)
188 if (lbtf_allocate_cmd_buffer(priv)) 213 if (lbtf_allocate_cmd_buffer(priv))
189 return -1; 214 return -1;
190 215
216 lbtf_deb_leave(LBTF_DEB_MAIN);
191 return 0; 217 return 0;
192} 218}
193 219
194static void lbtf_free_adapter(struct lbtf_private *priv) 220static void lbtf_free_adapter(struct lbtf_private *priv)
195{ 221{
222 lbtf_deb_enter(LBTF_DEB_MAIN);
196 lbtf_free_cmd_buffer(priv); 223 lbtf_free_cmd_buffer(priv);
197 del_timer(&priv->command_timer); 224 del_timer(&priv->command_timer);
225 lbtf_deb_leave(LBTF_DEB_MAIN);
198} 226}
199 227
200static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 228static int lbtf_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
@@ -221,14 +249,18 @@ static void lbtf_tx_work(struct work_struct *work)
221 struct sk_buff *skb = NULL; 249 struct sk_buff *skb = NULL;
222 int err; 250 int err;
223 251
252 lbtf_deb_enter(LBTF_DEB_MACOPS | LBTF_DEB_TX);
253
224 if ((priv->vif->type == NL80211_IFTYPE_AP) && 254 if ((priv->vif->type == NL80211_IFTYPE_AP) &&
225 (!skb_queue_empty(&priv->bc_ps_buf))) 255 (!skb_queue_empty(&priv->bc_ps_buf)))
226 skb = skb_dequeue(&priv->bc_ps_buf); 256 skb = skb_dequeue(&priv->bc_ps_buf);
227 else if (priv->skb_to_tx) { 257 else if (priv->skb_to_tx) {
228 skb = priv->skb_to_tx; 258 skb = priv->skb_to_tx;
229 priv->skb_to_tx = NULL; 259 priv->skb_to_tx = NULL;
230 } else 260 } else {
261 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
231 return; 262 return;
263 }
232 264
233 len = skb->len; 265 len = skb->len;
234 info = IEEE80211_SKB_CB(skb); 266 info = IEEE80211_SKB_CB(skb);
@@ -236,6 +268,7 @@ static void lbtf_tx_work(struct work_struct *work)
236 268
237 if (priv->surpriseremoved) { 269 if (priv->surpriseremoved) {
238 dev_kfree_skb_any(skb); 270 dev_kfree_skb_any(skb);
271 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
239 return; 272 return;
240 } 273 }
241 274
@@ -249,6 +282,7 @@ static void lbtf_tx_work(struct work_struct *work)
249 ETH_ALEN); 282 ETH_ALEN);
250 txpd->tx_packet_length = cpu_to_le16(len); 283 txpd->tx_packet_length = cpu_to_le16(len);
251 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd)); 284 txpd->tx_packet_location = cpu_to_le32(sizeof(struct txpd));
285 lbtf_deb_hex(LBTF_DEB_TX, "TX Data", skb->data, min_t(unsigned int, skb->len, 100));
252 BUG_ON(priv->tx_skb); 286 BUG_ON(priv->tx_skb);
253 spin_lock_irq(&priv->driver_lock); 287 spin_lock_irq(&priv->driver_lock);
254 priv->tx_skb = skb; 288 priv->tx_skb = skb;
@@ -257,7 +291,9 @@ static void lbtf_tx_work(struct work_struct *work)
257 if (err) { 291 if (err) {
258 dev_kfree_skb_any(skb); 292 dev_kfree_skb_any(skb);
259 priv->tx_skb = NULL; 293 priv->tx_skb = NULL;
294 pr_err("TX error: %d", err);
260 } 295 }
296 lbtf_deb_leave(LBTF_DEB_MACOPS | LBTF_DEB_TX);
261} 297}
262 298
263static int lbtf_op_start(struct ieee80211_hw *hw) 299static int lbtf_op_start(struct ieee80211_hw *hw)
@@ -266,6 +302,8 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
266 void *card = priv->card; 302 void *card = priv->card;
267 int ret = -1; 303 int ret = -1;
268 304
305 lbtf_deb_enter(LBTF_DEB_MACOPS);
306
269 if (!priv->fw_ready) 307 if (!priv->fw_ready)
270 /* Upload firmware */ 308 /* Upload firmware */
271 if (priv->hw_prog_firmware(card)) 309 if (priv->hw_prog_firmware(card))
@@ -286,10 +324,12 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
286 } 324 }
287 325
288 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n"); 326 printk(KERN_INFO "libertastf: Marvell WLAN 802.11 thinfirm adapter\n");
327 lbtf_deb_leave(LBTF_DEB_MACOPS);
289 return 0; 328 return 0;
290 329
291err_prog_firmware: 330err_prog_firmware:
292 priv->hw_reset_device(card); 331 priv->hw_reset_device(card);
332 lbtf_deb_leave_args(LBTF_DEB_MACOPS, "error programing fw; ret=%d", ret);
293 return ret; 333 return ret;
294} 334}
295 335
@@ -300,6 +340,9 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
300 struct sk_buff *skb; 340 struct sk_buff *skb;
301 341
302 struct cmd_ctrl_node *cmdnode; 342 struct cmd_ctrl_node *cmdnode;
343
344 lbtf_deb_enter(LBTF_DEB_MACOPS);
345
303 /* Flush pending command nodes */ 346 /* Flush pending command nodes */
304 spin_lock_irqsave(&priv->driver_lock, flags); 347 spin_lock_irqsave(&priv->driver_lock, flags);
305 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) { 348 list_for_each_entry(cmdnode, &priv->cmdpendingq, list) {
@@ -316,13 +359,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
316 priv->radioon = RADIO_OFF; 359 priv->radioon = RADIO_OFF;
317 lbtf_set_radio_control(priv); 360 lbtf_set_radio_control(priv);
318 361
319 return; 362 lbtf_deb_leave(LBTF_DEB_MACOPS);
320} 363}
321 364
322static int lbtf_op_add_interface(struct ieee80211_hw *hw, 365static int lbtf_op_add_interface(struct ieee80211_hw *hw,
323 struct ieee80211_vif *vif) 366 struct ieee80211_vif *vif)
324{ 367{
325 struct lbtf_private *priv = hw->priv; 368 struct lbtf_private *priv = hw->priv;
369 lbtf_deb_enter(LBTF_DEB_MACOPS);
326 if (priv->vif != NULL) 370 if (priv->vif != NULL)
327 return -EOPNOTSUPP; 371 return -EOPNOTSUPP;
328 372
@@ -340,6 +384,7 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
340 return -EOPNOTSUPP; 384 return -EOPNOTSUPP;
341 } 385 }
342 lbtf_set_mac_address(priv, (u8 *) vif->addr); 386 lbtf_set_mac_address(priv, (u8 *) vif->addr);
387 lbtf_deb_leave(LBTF_DEB_MACOPS);
343 return 0; 388 return 0;
344} 389}
345 390
@@ -347,6 +392,7 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
347 struct ieee80211_vif *vif) 392 struct ieee80211_vif *vif)
348{ 393{
349 struct lbtf_private *priv = hw->priv; 394 struct lbtf_private *priv = hw->priv;
395 lbtf_deb_enter(LBTF_DEB_MACOPS);
350 396
351 if (priv->vif->type == NL80211_IFTYPE_AP || 397 if (priv->vif->type == NL80211_IFTYPE_AP ||
352 priv->vif->type == NL80211_IFTYPE_MESH_POINT) 398 priv->vif->type == NL80211_IFTYPE_MESH_POINT)
@@ -354,37 +400,38 @@ static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
354 lbtf_set_mode(priv, LBTF_PASSIVE_MODE); 400 lbtf_set_mode(priv, LBTF_PASSIVE_MODE);
355 lbtf_set_bssid(priv, 0, NULL); 401 lbtf_set_bssid(priv, 0, NULL);
356 priv->vif = NULL; 402 priv->vif = NULL;
403 lbtf_deb_leave(LBTF_DEB_MACOPS);
357} 404}
358 405
359static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed) 406static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
360{ 407{
361 struct lbtf_private *priv = hw->priv; 408 struct lbtf_private *priv = hw->priv;
362 struct ieee80211_conf *conf = &hw->conf; 409 struct ieee80211_conf *conf = &hw->conf;
410 lbtf_deb_enter(LBTF_DEB_MACOPS);
363 411
364 if (conf->channel->center_freq != priv->cur_freq) { 412 if (conf->channel->center_freq != priv->cur_freq) {
365 priv->cur_freq = conf->channel->center_freq; 413 priv->cur_freq = conf->channel->center_freq;
366 lbtf_set_channel(priv, conf->channel->hw_value); 414 lbtf_set_channel(priv, conf->channel->hw_value);
367 } 415 }
416 lbtf_deb_leave(LBTF_DEB_MACOPS);
368 return 0; 417 return 0;
369} 418}
370 419
371static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw, 420static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
372 int mc_count, struct dev_addr_list *mclist) 421 struct netdev_hw_addr_list *mc_list)
373{ 422{
374 struct lbtf_private *priv = hw->priv; 423 struct lbtf_private *priv = hw->priv;
375 int i; 424 int i;
425 struct netdev_hw_addr *ha;
426 int mc_count = netdev_hw_addr_list_count(mc_list);
376 427
377 if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) 428 if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
378 return mc_count; 429 return mc_count;
379 430
380 priv->nr_of_multicastmacaddr = mc_count; 431 priv->nr_of_multicastmacaddr = mc_count;
381 for (i = 0; i < mc_count; i++) { 432 i = 0;
382 if (!mclist) 433 netdev_hw_addr_list_for_each(ha, mc_list)
383 break; 434 memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
384 memcpy(&priv->multicastlist[i], mclist->da_addr,
385 ETH_ALEN);
386 mclist = mclist->next;
387 }
388 435
389 return mc_count; 436 return mc_count;
390} 437}
@@ -397,11 +444,16 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
397{ 444{
398 struct lbtf_private *priv = hw->priv; 445 struct lbtf_private *priv = hw->priv;
399 int old_mac_control = priv->mac_control; 446 int old_mac_control = priv->mac_control;
447
448 lbtf_deb_enter(LBTF_DEB_MACOPS);
449
400 changed_flags &= SUPPORTED_FIF_FLAGS; 450 changed_flags &= SUPPORTED_FIF_FLAGS;
401 *new_flags &= SUPPORTED_FIF_FLAGS; 451 *new_flags &= SUPPORTED_FIF_FLAGS;
402 452
403 if (!changed_flags) 453 if (!changed_flags) {
454 lbtf_deb_leave(LBTF_DEB_MACOPS);
404 return; 455 return;
456 }
405 457
406 if (*new_flags & (FIF_PROMISC_IN_BSS)) 458 if (*new_flags & (FIF_PROMISC_IN_BSS))
407 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE; 459 priv->mac_control |= CMD_ACT_MAC_PROMISCUOUS_ENABLE;
@@ -427,6 +479,8 @@ static void lbtf_op_configure_filter(struct ieee80211_hw *hw,
427 479
428 if (priv->mac_control != old_mac_control) 480 if (priv->mac_control != old_mac_control)
429 lbtf_set_mac_control(priv); 481 lbtf_set_mac_control(priv);
482
483 lbtf_deb_leave(LBTF_DEB_MACOPS);
430} 484}
431 485
432static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw, 486static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -436,6 +490,7 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
436{ 490{
437 struct lbtf_private *priv = hw->priv; 491 struct lbtf_private *priv = hw->priv;
438 struct sk_buff *beacon; 492 struct sk_buff *beacon;
493 lbtf_deb_enter(LBTF_DEB_MACOPS);
439 494
440 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) { 495 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_INT)) {
441 switch (priv->vif->type) { 496 switch (priv->vif->type) {
@@ -466,6 +521,8 @@ static void lbtf_op_bss_info_changed(struct ieee80211_hw *hw,
466 priv->preamble = CMD_TYPE_LONG_PREAMBLE; 521 priv->preamble = CMD_TYPE_LONG_PREAMBLE;
467 lbtf_set_radio_control(priv); 522 lbtf_set_radio_control(priv);
468 } 523 }
524
525 lbtf_deb_leave(LBTF_DEB_MACOPS);
469} 526}
470 527
471static const struct ieee80211_ops lbtf_ops = { 528static const struct ieee80211_ops lbtf_ops = {
@@ -488,6 +545,8 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
488 unsigned int flags; 545 unsigned int flags;
489 struct ieee80211_hdr *hdr; 546 struct ieee80211_hdr *hdr;
490 547
548 lbtf_deb_enter(LBTF_DEB_RX);
549
491 prxpd = (struct rxpd *) skb->data; 550 prxpd = (struct rxpd *) skb->data;
492 551
493 stats.flag = 0; 552 stats.flag = 0;
@@ -496,7 +555,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
496 stats.freq = priv->cur_freq; 555 stats.freq = priv->cur_freq;
497 stats.band = IEEE80211_BAND_2GHZ; 556 stats.band = IEEE80211_BAND_2GHZ;
498 stats.signal = prxpd->snr; 557 stats.signal = prxpd->snr;
499 stats.noise = prxpd->nf;
500 /* Marvell rate index has a hole at value 4 */ 558 /* Marvell rate index has a hole at value 4 */
501 if (prxpd->rx_rate > 4) 559 if (prxpd->rx_rate > 4)
502 --prxpd->rx_rate; 560 --prxpd->rx_rate;
@@ -518,7 +576,15 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
518 } 576 }
519 577
520 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); 578 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
579
580 lbtf_deb_rx("rx data: skb->len-sizeof(RxPd) = %d-%zd = %zd\n",
581 skb->len, sizeof(struct rxpd), skb->len - sizeof(struct rxpd));
582 lbtf_deb_hex(LBTF_DEB_RX, "RX Data", skb->data,
583 min_t(unsigned int, skb->len, 100));
584
521 ieee80211_rx_irqsafe(priv->hw, skb); 585 ieee80211_rx_irqsafe(priv->hw, skb);
586
587 lbtf_deb_leave(LBTF_DEB_RX);
522 return 0; 588 return 0;
523} 589}
524EXPORT_SYMBOL_GPL(lbtf_rx); 590EXPORT_SYMBOL_GPL(lbtf_rx);
@@ -535,6 +601,8 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
535 struct ieee80211_hw *hw; 601 struct ieee80211_hw *hw;
536 struct lbtf_private *priv = NULL; 602 struct lbtf_private *priv = NULL;
537 603
604 lbtf_deb_enter(LBTF_DEB_MAIN);
605
538 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops); 606 hw = ieee80211_alloc_hw(sizeof(struct lbtf_private), &lbtf_ops);
539 if (!hw) 607 if (!hw)
540 goto done; 608 goto done;
@@ -577,6 +645,7 @@ err_init_adapter:
577 priv = NULL; 645 priv = NULL;
578 646
579done: 647done:
648 lbtf_deb_leave_args(LBTF_DEB_MAIN, "priv %p", priv);
580 return priv; 649 return priv;
581} 650}
582EXPORT_SYMBOL_GPL(lbtf_add_card); 651EXPORT_SYMBOL_GPL(lbtf_add_card);
@@ -586,6 +655,8 @@ int lbtf_remove_card(struct lbtf_private *priv)
586{ 655{
587 struct ieee80211_hw *hw = priv->hw; 656 struct ieee80211_hw *hw = priv->hw;
588 657
658 lbtf_deb_enter(LBTF_DEB_MAIN);
659
589 priv->surpriseremoved = 1; 660 priv->surpriseremoved = 1;
590 del_timer(&priv->command_timer); 661 del_timer(&priv->command_timer);
591 lbtf_free_adapter(priv); 662 lbtf_free_adapter(priv);
@@ -593,6 +664,7 @@ int lbtf_remove_card(struct lbtf_private *priv)
593 ieee80211_unregister_hw(hw); 664 ieee80211_unregister_hw(hw);
594 ieee80211_free_hw(hw); 665 ieee80211_free_hw(hw);
595 666
667 lbtf_deb_leave(LBTF_DEB_MAIN);
596 return 0; 668 return 0;
597} 669}
598EXPORT_SYMBOL_GPL(lbtf_remove_card); 670EXPORT_SYMBOL_GPL(lbtf_remove_card);
@@ -651,17 +723,21 @@ EXPORT_SYMBOL_GPL(lbtf_bcn_sent);
651 723
652static int __init lbtf_init_module(void) 724static int __init lbtf_init_module(void)
653{ 725{
726 lbtf_deb_enter(LBTF_DEB_MAIN);
654 lbtf_wq = create_workqueue("libertastf"); 727 lbtf_wq = create_workqueue("libertastf");
655 if (lbtf_wq == NULL) { 728 if (lbtf_wq == NULL) {
656 printk(KERN_ERR "libertastf: couldn't create workqueue\n"); 729 printk(KERN_ERR "libertastf: couldn't create workqueue\n");
657 return -ENOMEM; 730 return -ENOMEM;
658 } 731 }
732 lbtf_deb_leave(LBTF_DEB_MAIN);
659 return 0; 733 return 0;
660} 734}
661 735
662static void __exit lbtf_exit_module(void) 736static void __exit lbtf_exit_module(void)
663{ 737{
738 lbtf_deb_enter(LBTF_DEB_MAIN);
664 destroy_workqueue(lbtf_wq); 739 destroy_workqueue(lbtf_wq);
740 lbtf_deb_leave(LBTF_DEB_MAIN);
665} 741}
666 742
667module_init(lbtf_init_module); 743module_init(lbtf_init_module);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd5f56662fc..6f8cb3ee6fed 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -291,7 +291,8 @@ struct mac80211_hwsim_data {
291 struct ieee80211_channel *channel; 291 struct ieee80211_channel *channel;
292 unsigned long beacon_int; /* in jiffies unit */ 292 unsigned long beacon_int; /* in jiffies unit */
293 unsigned int rx_filter; 293 unsigned int rx_filter;
294 bool started, idle; 294 bool started, idle, scanning;
295 struct mutex mutex;
295 struct timer_list beacon_timer; 296 struct timer_list beacon_timer;
296 enum ps_mode { 297 enum ps_mode {
297 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL 298 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -651,17 +652,17 @@ static void mac80211_hwsim_beacon(unsigned long arg)
651 add_timer(&data->beacon_timer); 652 add_timer(&data->beacon_timer);
652} 653}
653 654
655static const char *hwsim_chantypes[] = {
656 [NL80211_CHAN_NO_HT] = "noht",
657 [NL80211_CHAN_HT20] = "ht20",
658 [NL80211_CHAN_HT40MINUS] = "ht40-",
659 [NL80211_CHAN_HT40PLUS] = "ht40+",
660};
654 661
655static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) 662static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
656{ 663{
657 struct mac80211_hwsim_data *data = hw->priv; 664 struct mac80211_hwsim_data *data = hw->priv;
658 struct ieee80211_conf *conf = &hw->conf; 665 struct ieee80211_conf *conf = &hw->conf;
659 static const char *chantypes[4] = {
660 [NL80211_CHAN_NO_HT] = "noht",
661 [NL80211_CHAN_HT20] = "ht20",
662 [NL80211_CHAN_HT40MINUS] = "ht40-",
663 [NL80211_CHAN_HT40PLUS] = "ht40+",
664 };
665 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { 666 static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
666 [IEEE80211_SMPS_AUTOMATIC] = "auto", 667 [IEEE80211_SMPS_AUTOMATIC] = "auto",
667 [IEEE80211_SMPS_OFF] = "off", 668 [IEEE80211_SMPS_OFF] = "off",
@@ -672,7 +673,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
672 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n", 673 printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
673 wiphy_name(hw->wiphy), __func__, 674 wiphy_name(hw->wiphy), __func__,
674 conf->channel->center_freq, 675 conf->channel->center_freq,
675 chantypes[conf->channel_type], 676 hwsim_chantypes[conf->channel_type],
676 !!(conf->flags & IEEE80211_CONF_IDLE), 677 !!(conf->flags & IEEE80211_CONF_IDLE),
677 !!(conf->flags & IEEE80211_CONF_PS), 678 !!(conf->flags & IEEE80211_CONF_PS),
678 smps_modes[conf->smps_mode]); 679 smps_modes[conf->smps_mode]);
@@ -760,9 +761,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
760 } 761 }
761 762
762 if (changed & BSS_CHANGED_HT) { 763 if (changed & BSS_CHANGED_HT) {
763 printk(KERN_DEBUG " %s: HT: op_mode=0x%x\n", 764 printk(KERN_DEBUG " %s: HT: op_mode=0x%x, chantype=%s\n",
764 wiphy_name(hw->wiphy), 765 wiphy_name(hw->wiphy),
765 info->ht_operation_mode); 766 info->ht_operation_mode,
767 hwsim_chantypes[info->channel_type]);
766 } 768 }
767 769
768 if (changed & BSS_CHANGED_BASIC_RATES) { 770 if (changed & BSS_CHANGED_BASIC_RATES) {
@@ -829,6 +831,33 @@ static int mac80211_hwsim_conf_tx(
829 return 0; 831 return 0;
830} 832}
831 833
834static int mac80211_hwsim_get_survey(
835 struct ieee80211_hw *hw, int idx,
836 struct survey_info *survey)
837{
838 struct ieee80211_conf *conf = &hw->conf;
839
840 printk(KERN_DEBUG "%s:%s (idx=%d)\n",
841 wiphy_name(hw->wiphy), __func__, idx);
842
843 if (idx != 0)
844 return -ENOENT;
845
846 /* Current channel */
847 survey->channel = conf->channel;
848
849 /*
850 * Magically conjured noise level --- this is only ok for simulated hardware.
851 *
852 * A real driver which cannot determine the real channel noise MUST NOT
853 * report any noise, especially not a magically conjured one :-)
854 */
855 survey->filled = SURVEY_INFO_NOISE_DBM;
856 survey->noise = -92;
857
858 return 0;
859}
860
832#ifdef CONFIG_NL80211_TESTMODE 861#ifdef CONFIG_NL80211_TESTMODE
833/* 862/*
834 * This section contains example code for using netlink 863 * This section contains example code for using netlink
@@ -946,6 +975,7 @@ static void hw_scan_done(struct work_struct *work)
946} 975}
947 976
948static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, 977static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
978 struct ieee80211_vif *vif,
949 struct cfg80211_scan_request *req) 979 struct cfg80211_scan_request *req)
950{ 980{
951 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL); 981 struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
@@ -957,9 +987,9 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
957 hsd->hw = hw; 987 hsd->hw = hw;
958 INIT_DELAYED_WORK(&hsd->w, hw_scan_done); 988 INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
959 989
960 printk(KERN_DEBUG "hwsim scan request\n"); 990 printk(KERN_DEBUG "hwsim hw_scan request\n");
961 for (i = 0; i < req->n_channels; i++) 991 for (i = 0; i < req->n_channels; i++)
962 printk(KERN_DEBUG "hwsim scan freq %d\n", 992 printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
963 req->channels[i]->center_freq); 993 req->channels[i]->center_freq);
964 994
965 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ); 995 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -967,6 +997,36 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
967 return 0; 997 return 0;
968} 998}
969 999
1000static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
1001{
1002 struct mac80211_hwsim_data *hwsim = hw->priv;
1003
1004 mutex_lock(&hwsim->mutex);
1005
1006 if (hwsim->scanning) {
1007 printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
1008 goto out;
1009 }
1010
1011 printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
1012 hwsim->scanning = true;
1013
1014out:
1015 mutex_unlock(&hwsim->mutex);
1016}
1017
1018static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
1019{
1020 struct mac80211_hwsim_data *hwsim = hw->priv;
1021
1022 mutex_lock(&hwsim->mutex);
1023
1024 printk(KERN_DEBUG "hwsim sw_scan_complete\n");
1025 hwsim->scanning = false;
1026
1027 mutex_unlock(&hwsim->mutex);
1028}
1029
970static struct ieee80211_ops mac80211_hwsim_ops = 1030static struct ieee80211_ops mac80211_hwsim_ops =
971{ 1031{
972 .tx = mac80211_hwsim_tx, 1032 .tx = mac80211_hwsim_tx,
@@ -982,8 +1042,11 @@ static struct ieee80211_ops mac80211_hwsim_ops =
982 .sta_notify = mac80211_hwsim_sta_notify, 1042 .sta_notify = mac80211_hwsim_sta_notify,
983 .set_tim = mac80211_hwsim_set_tim, 1043 .set_tim = mac80211_hwsim_set_tim,
984 .conf_tx = mac80211_hwsim_conf_tx, 1044 .conf_tx = mac80211_hwsim_conf_tx,
1045 .get_survey = mac80211_hwsim_get_survey,
985 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 1046 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
986 .ampdu_action = mac80211_hwsim_ampdu_action, 1047 .ampdu_action = mac80211_hwsim_ampdu_action,
1048 .sw_scan_start = mac80211_hwsim_sw_scan,
1049 .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
987 .flush = mac80211_hwsim_flush, 1050 .flush = mac80211_hwsim_flush,
988}; 1051};
989 1052
@@ -1179,8 +1242,11 @@ static int __init init_mac80211_hwsim(void)
1179 if (radios < 1 || radios > 100) 1242 if (radios < 1 || radios > 100)
1180 return -EINVAL; 1243 return -EINVAL;
1181 1244
1182 if (fake_hw_scan) 1245 if (fake_hw_scan) {
1183 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; 1246 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
1247 mac80211_hwsim_ops.sw_scan_start = NULL;
1248 mac80211_hwsim_ops.sw_scan_complete = NULL;
1249 }
1184 1250
1185 spin_lock_init(&hwsim_radio_lock); 1251 spin_lock_init(&hwsim_radio_lock);
1186 INIT_LIST_HEAD(&hwsim_radios); 1252 INIT_LIST_HEAD(&hwsim_radios);
@@ -1235,7 +1301,8 @@ static int __init init_mac80211_hwsim(void)
1235 hw->flags = IEEE80211_HW_MFP_CAPABLE | 1301 hw->flags = IEEE80211_HW_MFP_CAPABLE |
1236 IEEE80211_HW_SIGNAL_DBM | 1302 IEEE80211_HW_SIGNAL_DBM |
1237 IEEE80211_HW_SUPPORTS_STATIC_SMPS | 1303 IEEE80211_HW_SUPPORTS_STATIC_SMPS |
1238 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS; 1304 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
1305 IEEE80211_HW_AMPDU_AGGREGATION;
1239 1306
1240 /* ask mac80211 to reserve space for magic */ 1307 /* ask mac80211 to reserve space for magic */
1241 hw->vif_data_size = sizeof(struct hwsim_vif_priv); 1308 hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -1285,6 +1352,7 @@ static int __init init_mac80211_hwsim(void)
1285 } 1352 }
1286 /* By default all radios are belonging to the first group */ 1353 /* By default all radios are belonging to the first group */
1287 data->group = 1; 1354 data->group = 1;
1355 mutex_init(&data->mutex);
1288 1356
1289 /* Work to be done prior to ieee80211_register_hw() */ 1357 /* Work to be done prior to ieee80211_register_hw() */
1290 switch (regtest) { 1358 switch (regtest) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 12fdcb25fd38..808adb909095 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -750,7 +750,6 @@ mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
750 memset(status, 0, sizeof(*status)); 750 memset(status, 0, sizeof(*status));
751 751
752 status->signal = -rxd->rssi; 752 status->signal = -rxd->rssi;
753 status->noise = -rxd->noise_floor;
754 753
755 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) { 754 if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
756 status->flag |= RX_FLAG_HT; 755 status->flag |= RX_FLAG_HT;
@@ -852,7 +851,6 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
852 memset(status, 0, sizeof(*status)); 851 memset(status, 0, sizeof(*status));
853 852
854 status->signal = -rxd->rssi; 853 status->signal = -rxd->rssi;
855 status->noise = -rxd->noise_level;
856 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info); 854 status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
857 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info); 855 status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
858 856
@@ -1939,11 +1937,15 @@ struct mwl8k_cmd_mac_multicast_adr {
1939 1937
1940static struct mwl8k_cmd_pkt * 1938static struct mwl8k_cmd_pkt *
1941__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, 1939__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1942 int mc_count, struct dev_addr_list *mclist) 1940 struct netdev_hw_addr_list *mc_list)
1943{ 1941{
1944 struct mwl8k_priv *priv = hw->priv; 1942 struct mwl8k_priv *priv = hw->priv;
1945 struct mwl8k_cmd_mac_multicast_adr *cmd; 1943 struct mwl8k_cmd_mac_multicast_adr *cmd;
1946 int size; 1944 int size;
1945 int mc_count = 0;
1946
1947 if (mc_list)
1948 mc_count = netdev_hw_addr_list_count(mc_list);
1947 1949
1948 if (allmulti || mc_count > priv->num_mcaddrs) { 1950 if (allmulti || mc_count > priv->num_mcaddrs) {
1949 allmulti = 1; 1951 allmulti = 1;
@@ -1964,17 +1966,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1964 if (allmulti) { 1966 if (allmulti) {
1965 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); 1967 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
1966 } else if (mc_count) { 1968 } else if (mc_count) {
1967 int i; 1969 struct netdev_hw_addr *ha;
1970 int i = 0;
1968 1971
1969 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); 1972 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
1970 cmd->numaddr = cpu_to_le16(mc_count); 1973 cmd->numaddr = cpu_to_le16(mc_count);
1971 for (i = 0; i < mc_count && mclist; i++) { 1974 netdev_hw_addr_list_for_each(ha, mc_list) {
1972 if (mclist->da_addrlen != ETH_ALEN) { 1975 memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
1973 kfree(cmd);
1974 return NULL;
1975 }
1976 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1977 mclist = mclist->next;
1978 } 1976 }
1979 } 1977 }
1980 1978
@@ -3553,7 +3551,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3553} 3551}
3554 3552
3555static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, 3553static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3556 int mc_count, struct dev_addr_list *mclist) 3554 struct netdev_hw_addr_list *mc_list)
3557{ 3555{
3558 struct mwl8k_cmd_pkt *cmd; 3556 struct mwl8k_cmd_pkt *cmd;
3559 3557
@@ -3564,7 +3562,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3564 * we'll end up throwing this packet away and creating a new 3562 * we'll end up throwing this packet away and creating a new
3565 * one in mwl8k_configure_filter(). 3563 * one in mwl8k_configure_filter().
3566 */ 3564 */
3567 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist); 3565 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
3568 3566
3569 return (unsigned long)cmd; 3567 return (unsigned long)cmd;
3570} 3568}
@@ -3687,7 +3685,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3687 */ 3685 */
3688 if (*total_flags & FIF_ALLMULTI) { 3686 if (*total_flags & FIF_ALLMULTI) {
3689 kfree(cmd); 3687 kfree(cmd);
3690 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL); 3688 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
3691 } 3689 }
3692 3690
3693 if (cmd != NULL) { 3691 if (cmd != NULL) {
@@ -3984,8 +3982,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3984 3982
3985 hw->queues = MWL8K_TX_QUEUES; 3983 hw->queues = MWL8K_TX_QUEUES;
3986 3984
3987 /* Set rssi and noise values to dBm */ 3985 /* Set rssi values to dBm */
3988 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 3986 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
3989 hw->vif_data_size = sizeof(struct mwl8k_vif); 3987 hw->vif_data_size = sizeof(struct mwl8k_vif);
3990 hw->sta_data_size = sizeof(struct mwl8k_sta); 3988 hw->sta_data_size = sizeof(struct mwl8k_sta);
3991 3989
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index e2a2c18920aa..60819bcf4377 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -27,6 +27,17 @@ config HERMES
27 configure your card and that /etc/pcmcia/wireless.opts works : 27 configure your card and that /etc/pcmcia/wireless.opts works :
28 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html> 28 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
29 29
30config HERMES_PRISM
31 bool "Support Prism 2/2.5 chipset"
32 depends on HERMES
33 ---help---
34
35 Say Y to enable support for Prism 2 and 2.5 chipsets. These
36 chipsets are better handled by the hostap driver. This driver
37 would not support WPA or firmware download for Prism chipset.
38
39 If you are not sure, say N.
40
30config HERMES_CACHE_FW_ON_INIT 41config HERMES_CACHE_FW_ON_INIT
31 bool "Cache Hermes firmware on driver initialisation" 42 bool "Cache Hermes firmware on driver initialisation"
32 depends on HERMES 43 depends on HERMES
@@ -86,7 +97,7 @@ config NORTEL_HERMES
86 97
87config PCI_HERMES 98config PCI_HERMES
88 tristate "Prism 2.5 PCI 802.11b adaptor support" 99 tristate "Prism 2.5 PCI 802.11b adaptor support"
89 depends on PCI && HERMES 100 depends on PCI && HERMES && HERMES_PRISM
90 help 101 help
91 Enable support for PCI and mini-PCI 802.11b wireless NICs based on 102 Enable support for PCI and mini-PCI 802.11b wireless NICs based on
92 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b 103 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
@@ -121,3 +132,10 @@ config PCMCIA_SPECTRUM
121 This driver requires firmware download on startup. Utilities 132 This driver requires firmware download on startup. Utilities
122 for downloading Symbol firmware are available at 133 for downloading Symbol firmware are available at
123 <http://sourceforge.net/projects/orinoco/> 134 <http://sourceforge.net/projects/orinoco/>
135
136config ORINOCO_USB
137 tristate "Agere Orinoco USB support"
138 depends on USB && HERMES
139 select FW_LOADER
140 ---help---
141 This driver is for USB versions of the Agere Orinoco card.
diff --git a/drivers/net/wireless/orinoco/Makefile b/drivers/net/wireless/orinoco/Makefile
index 9abd6329bcbd..bfdefb85abcd 100644
--- a/drivers/net/wireless/orinoco/Makefile
+++ b/drivers/net/wireless/orinoco/Makefile
@@ -11,3 +11,7 @@ obj-$(CONFIG_PCI_HERMES) += orinoco_pci.o
11obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o 11obj-$(CONFIG_TMD_HERMES) += orinoco_tmd.o
12obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o 12obj-$(CONFIG_NORTEL_HERMES) += orinoco_nortel.o
13obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o 13obj-$(CONFIG_PCMCIA_SPECTRUM) += spectrum_cs.o
14obj-$(CONFIG_ORINOCO_USB) += orinoco_usb.o
15
16# Orinoco should be endian clean.
17ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/orinoco/airport.c b/drivers/net/wireless/orinoco/airport.c
index c60df2c1aca3..9bcee10c9308 100644
--- a/drivers/net/wireless/orinoco/airport.c
+++ b/drivers/net/wireless/orinoco/airport.c
@@ -77,9 +77,9 @@ airport_resume(struct macio_dev *mdev)
77 77
78 enable_irq(card->irq); 78 enable_irq(card->irq);
79 79
80 spin_lock_irqsave(&priv->lock, flags); 80 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
81 err = orinoco_up(priv); 81 err = orinoco_up(priv);
82 spin_unlock_irqrestore(&priv->lock, flags); 82 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
83 83
84 return err; 84 return err;
85} 85}
@@ -195,7 +195,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
195 ssleep(1); 195 ssleep(1);
196 196
197 /* Reset it before we get the interrupt */ 197 /* Reset it before we get the interrupt */
198 hermes_init(hw); 198 hw->ops->init(hw);
199 199
200 if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) { 200 if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) {
201 printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq); 201 printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq);
@@ -210,7 +210,7 @@ airport_attach(struct macio_dev *mdev, const struct of_device_id *match)
210 } 210 }
211 211
212 /* Register an interface with the stack */ 212 /* Register an interface with the stack */
213 if (orinoco_if_add(priv, phys_addr, card->irq) != 0) { 213 if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) {
214 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 214 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
215 goto failed; 215 goto failed;
216 } 216 }
diff --git a/drivers/net/wireless/orinoco/cfg.c b/drivers/net/wireless/orinoco/cfg.c
index 27f2d3342645..8c4169c227ae 100644
--- a/drivers/net/wireless/orinoco/cfg.c
+++ b/drivers/net/wireless/orinoco/cfg.c
@@ -88,7 +88,9 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
88 88
89 wiphy->rts_threshold = priv->rts_thresh; 89 wiphy->rts_threshold = priv->rts_thresh;
90 if (!priv->has_mwo) 90 if (!priv->has_mwo)
91 wiphy->frag_threshold = priv->frag_thresh; 91 wiphy->frag_threshold = priv->frag_thresh + 1;
92 wiphy->retry_short = priv->short_retry_limit;
93 wiphy->retry_long = priv->long_retry_limit;
92 94
93 return wiphy_register(wiphy); 95 return wiphy_register(wiphy);
94} 96}
@@ -157,6 +159,7 @@ static int orinoco_scan(struct wiphy *wiphy, struct net_device *dev,
157} 159}
158 160
159static int orinoco_set_channel(struct wiphy *wiphy, 161static int orinoco_set_channel(struct wiphy *wiphy,
162 struct net_device *netdev,
160 struct ieee80211_channel *chan, 163 struct ieee80211_channel *chan,
161 enum nl80211_channel_type channel_type) 164 enum nl80211_channel_type channel_type)
162{ 165{
@@ -187,7 +190,7 @@ static int orinoco_set_channel(struct wiphy *wiphy,
187 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 190 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
188 /* Fast channel change - no commit if successful */ 191 /* Fast channel change - no commit if successful */
189 hermes_t *hw = &priv->hw; 192 hermes_t *hw = &priv->hw;
190 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 193 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
191 HERMES_TEST_SET_CHANNEL, 194 HERMES_TEST_SET_CHANNEL,
192 channel, NULL); 195 channel, NULL);
193 } 196 }
@@ -196,8 +199,92 @@ static int orinoco_set_channel(struct wiphy *wiphy,
196 return err; 199 return err;
197} 200}
198 201
202static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed)
203{
204 struct orinoco_private *priv = wiphy_priv(wiphy);
205 int frag_value = -1;
206 int rts_value = -1;
207 int err = 0;
208
209 if (changed & WIPHY_PARAM_RETRY_SHORT) {
210 /* Setting short retry not supported */
211 err = -EINVAL;
212 }
213
214 if (changed & WIPHY_PARAM_RETRY_LONG) {
215 /* Setting long retry not supported */
216 err = -EINVAL;
217 }
218
219 if (changed & WIPHY_PARAM_FRAG_THRESHOLD) {
220 /* Set fragmentation */
221 if (priv->has_mwo) {
222 if (wiphy->frag_threshold < 0)
223 frag_value = 0;
224 else {
225 printk(KERN_WARNING "%s: Fixed fragmentation "
226 "is not supported on this firmware. "
227 "Using MWO robust instead.\n",
228 priv->ndev->name);
229 frag_value = 1;
230 }
231 } else {
232 if (wiphy->frag_threshold < 0)
233 frag_value = 2346;
234 else if ((wiphy->frag_threshold < 257) ||
235 (wiphy->frag_threshold > 2347))
236 err = -EINVAL;
237 else
238 /* cfg80211 value is 257-2347 (odd only)
239 * orinoco rid has range 256-2346 (even only) */
240 frag_value = wiphy->frag_threshold & ~0x1;
241 }
242 }
243
244 if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
245 /* Set RTS.
246 *
247 * Prism documentation suggests default of 2432,
248 * and a range of 0-3000.
249 *
250 * Current implementation uses 2347 as the default and
251 * the upper limit.
252 */
253
254 if (wiphy->rts_threshold < 0)
255 rts_value = 2347;
256 else if (wiphy->rts_threshold > 2347)
257 err = -EINVAL;
258 else
259 rts_value = wiphy->rts_threshold;
260 }
261
262 if (!err) {
263 unsigned long flags;
264
265 if (orinoco_lock(priv, &flags) != 0)
266 return -EBUSY;
267
268 if (frag_value >= 0) {
269 if (priv->has_mwo)
270 priv->mwo_robust = frag_value;
271 else
272 priv->frag_thresh = frag_value;
273 }
274 if (rts_value >= 0)
275 priv->rts_thresh = rts_value;
276
277 err = orinoco_commit(priv);
278
279 orinoco_unlock(priv, &flags);
280 }
281
282 return err;
283}
284
199const struct cfg80211_ops orinoco_cfg_ops = { 285const struct cfg80211_ops orinoco_cfg_ops = {
200 .change_virtual_intf = orinoco_change_vif, 286 .change_virtual_intf = orinoco_change_vif,
201 .set_channel = orinoco_set_channel, 287 .set_channel = orinoco_set_channel,
202 .scan = orinoco_scan, 288 .scan = orinoco_scan,
289 .set_wiphy_params = orinoco_set_wiphy_params,
203}; 290};
diff --git a/drivers/net/wireless/orinoco/fw.c b/drivers/net/wireless/orinoco/fw.c
index 5ea0f7cf85b1..3e1947d097ca 100644
--- a/drivers/net/wireless/orinoco/fw.c
+++ b/drivers/net/wireless/orinoco/fw.c
@@ -122,7 +122,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
122 dev_dbg(dev, "Attempting to download firmware %s\n", firmware); 122 dev_dbg(dev, "Attempting to download firmware %s\n", firmware);
123 123
124 /* Read current plug data */ 124 /* Read current plug data */
125 err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0); 125 err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
126 dev_dbg(dev, "Read PDA returned %d\n", err); 126 dev_dbg(dev, "Read PDA returned %d\n", err);
127 if (err) 127 if (err)
128 goto free; 128 goto free;
@@ -149,7 +149,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
149 } 149 }
150 150
151 /* Enable aux port to allow programming */ 151 /* Enable aux port to allow programming */
152 err = hermesi_program_init(hw, le32_to_cpu(hdr->entry_point)); 152 err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point));
153 dev_dbg(dev, "Program init returned %d\n", err); 153 dev_dbg(dev, "Program init returned %d\n", err);
154 if (err != 0) 154 if (err != 0)
155 goto abort; 155 goto abort;
@@ -177,7 +177,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
177 goto abort; 177 goto abort;
178 178
179 /* Tell card we've finished */ 179 /* Tell card we've finished */
180 err = hermesi_program_end(hw); 180 err = hw->ops->program_end(hw);
181 dev_dbg(dev, "Program end returned %d\n", err); 181 dev_dbg(dev, "Program end returned %d\n", err);
182 if (err != 0) 182 if (err != 0)
183 goto abort; 183 goto abort;
@@ -224,7 +224,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
224 if (!pda) 224 if (!pda)
225 return -ENOMEM; 225 return -ENOMEM;
226 226
227 ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1); 227 ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size);
228 if (ret) 228 if (ret)
229 goto free; 229 goto free;
230 } 230 }
@@ -260,7 +260,7 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
260 } 260 }
261 261
262 /* Reset hermes chip and make sure it responds */ 262 /* Reset hermes chip and make sure it responds */
263 ret = hermes_init(hw); 263 ret = hw->ops->init(hw);
264 264
265 /* hermes_reset() should return 0 with the secondary firmware */ 265 /* hermes_reset() should return 0 with the secondary firmware */
266 if (secondary && ret != 0) 266 if (secondary && ret != 0)
diff --git a/drivers/net/wireless/orinoco/hermes.c b/drivers/net/wireless/orinoco/hermes.c
index 1a2fca76fd3c..6c6a23e08df6 100644
--- a/drivers/net/wireless/orinoco/hermes.c
+++ b/drivers/net/wireless/orinoco/hermes.c
@@ -52,6 +52,26 @@
52#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */ 52#define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */
53 53
54/* 54/*
55 * AUX port access. To unlock the AUX port write the access keys to the
56 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
57 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
58 */
59#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
60#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
61#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
62#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
63
64#define HERMES_AUX_PW0 0xFE01
65#define HERMES_AUX_PW1 0xDC23
66#define HERMES_AUX_PW2 0xBA45
67
68/* HERMES_CMD_DOWNLD */
69#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
70#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
71#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
72#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
73
74/*
55 * Debugging helpers 75 * Debugging helpers
56 */ 76 */
57 77
@@ -70,6 +90,7 @@
70 90
71#endif /* ! HERMES_DEBUG */ 91#endif /* ! HERMES_DEBUG */
72 92
93static const struct hermes_ops hermes_ops_local;
73 94
74/* 95/*
75 * Internal functions 96 * Internal functions
@@ -111,9 +132,9 @@ static int hermes_issue_cmd(hermes_t *hw, u16 cmd, u16 param0,
111 */ 132 */
112 133
113/* For doing cmds that wipe the magic constant in SWSUPPORT0 */ 134/* For doing cmds that wipe the magic constant in SWSUPPORT0 */
114int hermes_doicmd_wait(hermes_t *hw, u16 cmd, 135static int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
115 u16 parm0, u16 parm1, u16 parm2, 136 u16 parm0, u16 parm1, u16 parm2,
116 struct hermes_response *resp) 137 struct hermes_response *resp)
117{ 138{
118 int err = 0; 139 int err = 0;
119 int k; 140 int k;
@@ -163,17 +184,18 @@ int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
163out: 184out:
164 return err; 185 return err;
165} 186}
166EXPORT_SYMBOL(hermes_doicmd_wait);
167 187
168void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing) 188void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing)
169{ 189{
170 hw->iobase = address; 190 hw->iobase = address;
171 hw->reg_spacing = reg_spacing; 191 hw->reg_spacing = reg_spacing;
172 hw->inten = 0x0; 192 hw->inten = 0x0;
193 hw->eeprom_pda = false;
194 hw->ops = &hermes_ops_local;
173} 195}
174EXPORT_SYMBOL(hermes_struct_init); 196EXPORT_SYMBOL(hermes_struct_init);
175 197
176int hermes_init(hermes_t *hw) 198static int hermes_init(hermes_t *hw)
177{ 199{
178 u16 reg; 200 u16 reg;
179 int err = 0; 201 int err = 0;
@@ -217,7 +239,6 @@ int hermes_init(hermes_t *hw)
217 239
218 return err; 240 return err;
219} 241}
220EXPORT_SYMBOL(hermes_init);
221 242
222/* Issue a command to the chip, and (busy!) wait for it to 243/* Issue a command to the chip, and (busy!) wait for it to
223 * complete. 244 * complete.
@@ -228,8 +249,8 @@ EXPORT_SYMBOL(hermes_init);
228 * > 0 on error returned by the firmware 249 * > 0 on error returned by the firmware
229 * 250 *
230 * Callable from any context, but locking is your problem. */ 251 * Callable from any context, but locking is your problem. */
231int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0, 252static int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
232 struct hermes_response *resp) 253 struct hermes_response *resp)
233{ 254{
234 int err; 255 int err;
235 int k; 256 int k;
@@ -291,9 +312,8 @@ int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
291 out: 312 out:
292 return err; 313 return err;
293} 314}
294EXPORT_SYMBOL(hermes_docmd_wait);
295 315
296int hermes_allocate(hermes_t *hw, u16 size, u16 *fid) 316static int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
297{ 317{
298 int err = 0; 318 int err = 0;
299 int k; 319 int k;
@@ -333,7 +353,6 @@ int hermes_allocate(hermes_t *hw, u16 size, u16 *fid)
333 353
334 return 0; 354 return 0;
335} 355}
336EXPORT_SYMBOL(hermes_allocate);
337 356
338/* Set up a BAP to read a particular chunk of data from card's internal buffer. 357/* Set up a BAP to read a particular chunk of data from card's internal buffer.
339 * 358 *
@@ -403,8 +422,8 @@ static int hermes_bap_seek(hermes_t *hw, int bap, u16 id, u16 offset)
403 * 0 on success 422 * 0 on success
404 * > 0 on error from firmware 423 * > 0 on error from firmware
405 */ 424 */
406int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len, 425static int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
407 u16 id, u16 offset) 426 u16 id, u16 offset)
408{ 427{
409 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 428 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
410 int err = 0; 429 int err = 0;
@@ -422,7 +441,6 @@ int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
422 out: 441 out:
423 return err; 442 return err;
424} 443}
425EXPORT_SYMBOL(hermes_bap_pread);
426 444
427/* Write a block of data to the chip's buffer, via the 445/* Write a block of data to the chip's buffer, via the
428 * BAP. Synchronization/serialization is the caller's problem. 446 * BAP. Synchronization/serialization is the caller's problem.
@@ -432,8 +450,8 @@ EXPORT_SYMBOL(hermes_bap_pread);
432 * 0 on success 450 * 0 on success
433 * > 0 on error from firmware 451 * > 0 on error from firmware
434 */ 452 */
435int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len, 453static int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
436 u16 id, u16 offset) 454 u16 id, u16 offset)
437{ 455{
438 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 456 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
439 int err = 0; 457 int err = 0;
@@ -451,7 +469,6 @@ int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
451 out: 469 out:
452 return err; 470 return err;
453} 471}
454EXPORT_SYMBOL(hermes_bap_pwrite);
455 472
456/* Read a Length-Type-Value record from the card. 473/* Read a Length-Type-Value record from the card.
457 * 474 *
@@ -461,8 +478,8 @@ EXPORT_SYMBOL(hermes_bap_pwrite);
461 * practice. 478 * practice.
462 * 479 *
463 * Callable from user or bh context. */ 480 * Callable from user or bh context. */
464int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize, 481static int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
465 u16 *length, void *buf) 482 u16 *length, void *buf)
466{ 483{
467 int err = 0; 484 int err = 0;
468 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 485 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
@@ -505,10 +522,9 @@ int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned bufsize,
505 522
506 return 0; 523 return 0;
507} 524}
508EXPORT_SYMBOL(hermes_read_ltv);
509 525
510int hermes_write_ltv(hermes_t *hw, int bap, u16 rid, 526static int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
511 u16 length, const void *value) 527 u16 length, const void *value)
512{ 528{
513 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; 529 int dreg = bap ? HERMES_DATA1 : HERMES_DATA0;
514 int err = 0; 530 int err = 0;
@@ -533,4 +549,228 @@ int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
533 549
534 return err; 550 return err;
535} 551}
536EXPORT_SYMBOL(hermes_write_ltv); 552
553/*** Hermes AUX control ***/
554
555static inline void
556hermes_aux_setaddr(hermes_t *hw, u32 addr)
557{
558 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
559 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
560}
561
562static inline int
563hermes_aux_control(hermes_t *hw, int enabled)
564{
565 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
566 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
567 int i;
568
569 /* Already open? */
570 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
571 return 0;
572
573 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
574 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
575 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
576 hermes_write_reg(hw, HERMES_CONTROL, action);
577
578 for (i = 0; i < 20; i++) {
579 udelay(10);
580 if (hermes_read_reg(hw, HERMES_CONTROL) ==
581 desired_state)
582 return 0;
583 }
584
585 return -EBUSY;
586}
587
588/*** Hermes programming ***/
589
590/* About to start programming data (Hermes I)
591 * offset is the entry point
592 *
593 * Spectrum_cs' Symbol fw does not require this
594 * wl_lkm Agere fw does
595 * Don't know about intersil
596 */
597static int hermesi_program_init(hermes_t *hw, u32 offset)
598{
599 int err;
600
601 /* Disable interrupts?*/
602 /*hw->inten = 0x0;*/
603 /*hermes_write_regn(hw, INTEN, 0);*/
604 /*hermes_set_irqmask(hw, 0);*/
605
606 /* Acknowledge any outstanding command */
607 hermes_write_regn(hw, EVACK, 0xFFFF);
608
609 /* Using init_cmd_wait rather than cmd_wait */
610 err = hw->ops->init_cmd_wait(hw,
611 0x0100 | HERMES_CMD_INIT,
612 0, 0, 0, NULL);
613 if (err)
614 return err;
615
616 err = hw->ops->init_cmd_wait(hw,
617 0x0000 | HERMES_CMD_INIT,
618 0, 0, 0, NULL);
619 if (err)
620 return err;
621
622 err = hermes_aux_control(hw, 1);
623 pr_debug("AUX enable returned %d\n", err);
624
625 if (err)
626 return err;
627
628 pr_debug("Enabling volatile, EP 0x%08x\n", offset);
629 err = hw->ops->init_cmd_wait(hw,
630 HERMES_PROGRAM_ENABLE_VOLATILE,
631 offset & 0xFFFFu,
632 offset >> 16,
633 0,
634 NULL);
635 pr_debug("PROGRAM_ENABLE returned %d\n", err);
636
637 return err;
638}
639
640/* Done programming data (Hermes I)
641 *
642 * Spectrum_cs' Symbol fw does not require this
643 * wl_lkm Agere fw does
644 * Don't know about intersil
645 */
646static int hermesi_program_end(hermes_t *hw)
647{
648 struct hermes_response resp;
649 int rc = 0;
650 int err;
651
652 rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
653
654 pr_debug("PROGRAM_DISABLE returned %d, "
655 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
656 rc, resp.resp0, resp.resp1, resp.resp2);
657
658 if ((rc == 0) &&
659 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
660 rc = -EIO;
661
662 err = hermes_aux_control(hw, 0);
663 pr_debug("AUX disable returned %d\n", err);
664
665 /* Acknowledge any outstanding command */
666 hermes_write_regn(hw, EVACK, 0xFFFF);
667
668 /* Reinitialise, ignoring return */
669 (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
670 0, 0, 0, NULL);
671
672 return rc ? rc : err;
673}
674
675static int hermes_program_bytes(struct hermes *hw, const char *data,
676 u32 addr, u32 len)
677{
678 /* wl lkm splits the programming into chunks of 2000 bytes.
679 * This restriction appears to come from USB. The PCMCIA
680 * adapters can program the whole lot in one go */
681 hermes_aux_setaddr(hw, addr);
682 hermes_write_bytes(hw, HERMES_AUXDATA, data, len);
683 return 0;
684}
685
686/* Read PDA from the adapter */
687static int hermes_read_pda(hermes_t *hw, __le16 *pda, u32 pda_addr, u16 pda_len)
688{
689 int ret;
690 u16 pda_size;
691 u16 data_len = pda_len;
692 __le16 *data = pda;
693
694 if (hw->eeprom_pda) {
695 /* PDA of spectrum symbol is in eeprom */
696
697 /* Issue command to read EEPROM */
698 ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
699 if (ret)
700 return ret;
701 } else {
702 /* wl_lkm does not include PDA size in the PDA area.
703 * We will pad the information into pda, so other routines
704 * don't have to be modified */
705 pda[0] = cpu_to_le16(pda_len - 2);
706 /* Includes CFG_PROD_DATA but not itself */
707 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
708 data_len = pda_len - 4;
709 data = pda + 2;
710 }
711
712 /* Open auxiliary port */
713 ret = hermes_aux_control(hw, 1);
714 pr_debug("AUX enable returned %d\n", ret);
715 if (ret)
716 return ret;
717
718 /* Read PDA */
719 hermes_aux_setaddr(hw, pda_addr);
720 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
721
722 /* Close aux port */
723 ret = hermes_aux_control(hw, 0);
724 pr_debug("AUX disable returned %d\n", ret);
725
726 /* Check PDA length */
727 pda_size = le16_to_cpu(pda[0]);
728 pr_debug("Actual PDA length %d, Max allowed %d\n",
729 pda_size, pda_len);
730 if (pda_size > pda_len)
731 return -EINVAL;
732
733 return 0;
734}
735
736static void hermes_lock_irqsave(spinlock_t *lock,
737 unsigned long *flags) __acquires(lock)
738{
739 spin_lock_irqsave(lock, *flags);
740}
741
742static void hermes_unlock_irqrestore(spinlock_t *lock,
743 unsigned long *flags) __releases(lock)
744{
745 spin_unlock_irqrestore(lock, *flags);
746}
747
748static void hermes_lock_irq(spinlock_t *lock) __acquires(lock)
749{
750 spin_lock_irq(lock);
751}
752
753static void hermes_unlock_irq(spinlock_t *lock) __releases(lock)
754{
755 spin_unlock_irq(lock);
756}
757
758/* Hermes operations for local buses */
759static const struct hermes_ops hermes_ops_local = {
760 .init = hermes_init,
761 .cmd_wait = hermes_docmd_wait,
762 .init_cmd_wait = hermes_doicmd_wait,
763 .allocate = hermes_allocate,
764 .read_ltv = hermes_read_ltv,
765 .write_ltv = hermes_write_ltv,
766 .bap_pread = hermes_bap_pread,
767 .bap_pwrite = hermes_bap_pwrite,
768 .read_pda = hermes_read_pda,
769 .program_init = hermesi_program_init,
770 .program_end = hermesi_program_end,
771 .program = hermes_program_bytes,
772 .lock_irqsave = hermes_lock_irqsave,
773 .unlock_irqrestore = hermes_unlock_irqrestore,
774 .lock_irq = hermes_lock_irq,
775 .unlock_irq = hermes_unlock_irq,
776};
diff --git a/drivers/net/wireless/orinoco/hermes.h b/drivers/net/wireless/orinoco/hermes.h
index 2dddbb597c4d..9ca34e722b45 100644
--- a/drivers/net/wireless/orinoco/hermes.h
+++ b/drivers/net/wireless/orinoco/hermes.h
@@ -374,6 +374,37 @@ struct hermes_multicast {
374/* Timeouts */ 374/* Timeouts */
375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */ 375#define HERMES_BAP_BUSY_TIMEOUT (10000) /* In iterations of ~1us */
376 376
377struct hermes;
378
379/* Functions to access hardware */
380struct hermes_ops {
381 int (*init)(struct hermes *hw);
382 int (*cmd_wait)(struct hermes *hw, u16 cmd, u16 parm0,
383 struct hermes_response *resp);
384 int (*init_cmd_wait)(struct hermes *hw, u16 cmd,
385 u16 parm0, u16 parm1, u16 parm2,
386 struct hermes_response *resp);
387 int (*allocate)(struct hermes *hw, u16 size, u16 *fid);
388 int (*read_ltv)(struct hermes *hw, int bap, u16 rid, unsigned buflen,
389 u16 *length, void *buf);
390 int (*write_ltv)(struct hermes *hw, int bap, u16 rid,
391 u16 length, const void *value);
392 int (*bap_pread)(struct hermes *hw, int bap, void *buf, int len,
393 u16 id, u16 offset);
394 int (*bap_pwrite)(struct hermes *hw, int bap, const void *buf,
395 int len, u16 id, u16 offset);
396 int (*read_pda)(struct hermes *hw, __le16 *pda,
397 u32 pda_addr, u16 pda_len);
398 int (*program_init)(struct hermes *hw, u32 entry_point);
399 int (*program_end)(struct hermes *hw);
400 int (*program)(struct hermes *hw, const char *buf,
401 u32 addr, u32 len);
402 void (*lock_irqsave)(spinlock_t *lock, unsigned long *flags);
403 void (*unlock_irqrestore)(spinlock_t *lock, unsigned long *flags);
404 void (*lock_irq)(spinlock_t *lock);
405 void (*unlock_irq)(spinlock_t *lock);
406};
407
377/* Basic control structure */ 408/* Basic control structure */
378typedef struct hermes { 409typedef struct hermes {
379 void __iomem *iobase; 410 void __iomem *iobase;
@@ -381,6 +412,9 @@ typedef struct hermes {
381#define HERMES_16BIT_REGSPACING 0 412#define HERMES_16BIT_REGSPACING 0
382#define HERMES_32BIT_REGSPACING 1 413#define HERMES_32BIT_REGSPACING 1
383 u16 inten; /* Which interrupts should be enabled? */ 414 u16 inten; /* Which interrupts should be enabled? */
415 bool eeprom_pda;
416 const struct hermes_ops *ops;
417 void *priv;
384} hermes_t; 418} hermes_t;
385 419
386/* Register access convenience macros */ 420/* Register access convenience macros */
@@ -394,22 +428,6 @@ typedef struct hermes {
394 428
395/* Function prototypes */ 429/* Function prototypes */
396void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing); 430void hermes_struct_init(hermes_t *hw, void __iomem *address, int reg_spacing);
397int hermes_init(hermes_t *hw);
398int hermes_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
399 struct hermes_response *resp);
400int hermes_doicmd_wait(hermes_t *hw, u16 cmd,
401 u16 parm0, u16 parm1, u16 parm2,
402 struct hermes_response *resp);
403int hermes_allocate(hermes_t *hw, u16 size, u16 *fid);
404
405int hermes_bap_pread(hermes_t *hw, int bap, void *buf, int len,
406 u16 id, u16 offset);
407int hermes_bap_pwrite(hermes_t *hw, int bap, const void *buf, int len,
408 u16 id, u16 offset);
409int hermes_read_ltv(hermes_t *hw, int bap, u16 rid, unsigned buflen,
410 u16 *length, void *buf);
411int hermes_write_ltv(hermes_t *hw, int bap, u16 rid,
412 u16 length, const void *value);
413 431
414/* Inline functions */ 432/* Inline functions */
415 433
@@ -426,13 +444,13 @@ static inline void hermes_set_irqmask(hermes_t *hw, u16 events)
426 444
427static inline int hermes_enable_port(hermes_t *hw, int port) 445static inline int hermes_enable_port(hermes_t *hw, int port)
428{ 446{
429 return hermes_docmd_wait(hw, HERMES_CMD_ENABLE | (port << 8), 447 return hw->ops->cmd_wait(hw, HERMES_CMD_ENABLE | (port << 8),
430 0, NULL); 448 0, NULL);
431} 449}
432 450
433static inline int hermes_disable_port(hermes_t *hw, int port) 451static inline int hermes_disable_port(hermes_t *hw, int port)
434{ 452{
435 return hermes_docmd_wait(hw, HERMES_CMD_DISABLE | (port << 8), 453 return hw->ops->cmd_wait(hw, HERMES_CMD_DISABLE | (port << 8),
436 0, NULL); 454 0, NULL);
437} 455}
438 456
@@ -440,7 +458,7 @@ static inline int hermes_disable_port(hermes_t *hw, int port)
440 * information frame in __orinoco_ev_info() */ 458 * information frame in __orinoco_ev_info() */
441static inline int hermes_inquire(hermes_t *hw, u16 rid) 459static inline int hermes_inquire(hermes_t *hw, u16 rid)
442{ 460{
443 return hermes_docmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL); 461 return hw->ops->cmd_wait(hw, HERMES_CMD_INQUIRE, rid, NULL);
444} 462}
445 463
446#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1) 464#define HERMES_BYTES_TO_RECLEN(n) ((((n)+1)/2) + 1)
@@ -475,10 +493,10 @@ static inline void hermes_clear_words(struct hermes *hw, int off,
475} 493}
476 494
477#define HERMES_READ_RECORD(hw, bap, rid, buf) \ 495#define HERMES_READ_RECORD(hw, bap, rid, buf) \
478 (hermes_read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf))) 496 (hw->ops->read_ltv((hw), (bap), (rid), sizeof(*buf), NULL, (buf)))
479#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \ 497#define HERMES_WRITE_RECORD(hw, bap, rid, buf) \
480 (hermes_write_ltv((hw), (bap), (rid), \ 498 (hw->ops->write_ltv((hw), (bap), (rid), \
481 HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf))) 499 HERMES_BYTES_TO_RECLEN(sizeof(*buf)), (buf)))
482 500
483static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word) 501static inline int hermes_read_wordrec(hermes_t *hw, int bap, u16 rid, u16 *word)
484{ 502{
diff --git a/drivers/net/wireless/orinoco/hermes_dld.c b/drivers/net/wireless/orinoco/hermes_dld.c
index fb157eb889ca..6da85e75fce0 100644
--- a/drivers/net/wireless/orinoco/hermes_dld.c
+++ b/drivers/net/wireless/orinoco/hermes_dld.c
@@ -46,37 +46,11 @@
46 46
47#define PFX "hermes_dld: " 47#define PFX "hermes_dld: "
48 48
49/*
50 * AUX port access. To unlock the AUX port write the access keys to the
51 * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL
52 * register. Then read it and make sure it's HERMES_AUX_ENABLED.
53 */
54#define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */
55#define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */
56#define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */
57#define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */
58
59#define HERMES_AUX_PW0 0xFE01
60#define HERMES_AUX_PW1 0xDC23
61#define HERMES_AUX_PW2 0xBA45
62
63/* HERMES_CMD_DOWNLD */
64#define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD)
65#define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD)
66#define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD)
67#define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD)
68
69/* End markers used in dblocks */ 49/* End markers used in dblocks */
70#define PDI_END 0x00000000 /* End of PDA */ 50#define PDI_END 0x00000000 /* End of PDA */
71#define BLOCK_END 0xFFFFFFFF /* Last image block */ 51#define BLOCK_END 0xFFFFFFFF /* Last image block */
72#define TEXT_END 0x1A /* End of text header */ 52#define TEXT_END 0x1A /* End of text header */
73 53
74/* Limit the amout we try to download in a single shot.
75 * Size is in bytes.
76 */
77#define MAX_DL_SIZE 1024
78#define LIMIT_PROGRAM_SIZE 0
79
80/* 54/*
81 * The following structures have little-endian fields denoted by 55 * The following structures have little-endian fields denoted by
82 * the leading underscore. Don't access them directly - use inline 56 * the leading underscore. Don't access them directly - use inline
@@ -165,41 +139,6 @@ pdi_len(const struct pdi *pdi)
165 return 2 * (le16_to_cpu(pdi->len) - 1); 139 return 2 * (le16_to_cpu(pdi->len) - 1);
166} 140}
167 141
168/*** Hermes AUX control ***/
169
170static inline void
171hermes_aux_setaddr(hermes_t *hw, u32 addr)
172{
173 hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7));
174 hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F));
175}
176
177static inline int
178hermes_aux_control(hermes_t *hw, int enabled)
179{
180 int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED;
181 int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE;
182 int i;
183
184 /* Already open? */
185 if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state)
186 return 0;
187
188 hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0);
189 hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1);
190 hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2);
191 hermes_write_reg(hw, HERMES_CONTROL, action);
192
193 for (i = 0; i < 20; i++) {
194 udelay(10);
195 if (hermes_read_reg(hw, HERMES_CONTROL) ==
196 desired_state)
197 return 0;
198 }
199
200 return -EBUSY;
201}
202
203/*** Plug Data Functions ***/ 142/*** Plug Data Functions ***/
204 143
205/* 144/*
@@ -271,62 +210,7 @@ hermes_plug_pdi(hermes_t *hw, const struct pdr *first_pdr,
271 return -EINVAL; 210 return -EINVAL;
272 211
273 /* do the actual plugging */ 212 /* do the actual plugging */
274 hermes_aux_setaddr(hw, pdr_addr(pdr)); 213 hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi));
275 hermes_write_bytes(hw, HERMES_AUXDATA, pdi->data, pdi_len(pdi));
276
277 return 0;
278}
279
280/* Read PDA from the adapter */
281int hermes_read_pda(hermes_t *hw,
282 __le16 *pda,
283 u32 pda_addr,
284 u16 pda_len,
285 int use_eeprom) /* can we get this into hw? */
286{
287 int ret;
288 u16 pda_size;
289 u16 data_len = pda_len;
290 __le16 *data = pda;
291
292 if (use_eeprom) {
293 /* PDA of spectrum symbol is in eeprom */
294
295 /* Issue command to read EEPROM */
296 ret = hermes_docmd_wait(hw, HERMES_CMD_READMIF, 0, NULL);
297 if (ret)
298 return ret;
299 } else {
300 /* wl_lkm does not include PDA size in the PDA area.
301 * We will pad the information into pda, so other routines
302 * don't have to be modified */
303 pda[0] = cpu_to_le16(pda_len - 2);
304 /* Includes CFG_PROD_DATA but not itself */
305 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
306 data_len = pda_len - 4;
307 data = pda + 2;
308 }
309
310 /* Open auxiliary port */
311 ret = hermes_aux_control(hw, 1);
312 pr_debug(PFX "AUX enable returned %d\n", ret);
313 if (ret)
314 return ret;
315
316 /* read PDA from EEPROM */
317 hermes_aux_setaddr(hw, pda_addr);
318 hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2);
319
320 /* Close aux port */
321 ret = hermes_aux_control(hw, 0);
322 pr_debug(PFX "AUX disable returned %d\n", ret);
323
324 /* Check PDA length */
325 pda_size = le16_to_cpu(pda[0]);
326 pr_debug(PFX "Actual PDA length %d, Max allowed %d\n",
327 pda_size, pda_len);
328 if (pda_size > pda_len)
329 return -EINVAL;
330 214
331 return 0; 215 return 0;
332} 216}
@@ -389,101 +273,13 @@ hermes_blocks_length(const char *first_block, const void *end)
389 273
390/*** Hermes programming ***/ 274/*** Hermes programming ***/
391 275
392/* About to start programming data (Hermes I)
393 * offset is the entry point
394 *
395 * Spectrum_cs' Symbol fw does not require this
396 * wl_lkm Agere fw does
397 * Don't know about intersil
398 */
399int hermesi_program_init(hermes_t *hw, u32 offset)
400{
401 int err;
402
403 /* Disable interrupts?*/
404 /*hw->inten = 0x0;*/
405 /*hermes_write_regn(hw, INTEN, 0);*/
406 /*hermes_set_irqmask(hw, 0);*/
407
408 /* Acknowledge any outstanding command */
409 hermes_write_regn(hw, EVACK, 0xFFFF);
410
411 /* Using doicmd_wait rather than docmd_wait */
412 err = hermes_doicmd_wait(hw,
413 0x0100 | HERMES_CMD_INIT,
414 0, 0, 0, NULL);
415 if (err)
416 return err;
417
418 err = hermes_doicmd_wait(hw,
419 0x0000 | HERMES_CMD_INIT,
420 0, 0, 0, NULL);
421 if (err)
422 return err;
423
424 err = hermes_aux_control(hw, 1);
425 pr_debug(PFX "AUX enable returned %d\n", err);
426
427 if (err)
428 return err;
429
430 pr_debug(PFX "Enabling volatile, EP 0x%08x\n", offset);
431 err = hermes_doicmd_wait(hw,
432 HERMES_PROGRAM_ENABLE_VOLATILE,
433 offset & 0xFFFFu,
434 offset >> 16,
435 0,
436 NULL);
437 pr_debug(PFX "PROGRAM_ENABLE returned %d\n", err);
438
439 return err;
440}
441
442/* Done programming data (Hermes I)
443 *
444 * Spectrum_cs' Symbol fw does not require this
445 * wl_lkm Agere fw does
446 * Don't know about intersil
447 */
448int hermesi_program_end(hermes_t *hw)
449{
450 struct hermes_response resp;
451 int rc = 0;
452 int err;
453
454 rc = hermes_docmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp);
455
456 pr_debug(PFX "PROGRAM_DISABLE returned %d, "
457 "r0 0x%04x, r1 0x%04x, r2 0x%04x\n",
458 rc, resp.resp0, resp.resp1, resp.resp2);
459
460 if ((rc == 0) &&
461 ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD))
462 rc = -EIO;
463
464 err = hermes_aux_control(hw, 0);
465 pr_debug(PFX "AUX disable returned %d\n", err);
466
467 /* Acknowledge any outstanding command */
468 hermes_write_regn(hw, EVACK, 0xFFFF);
469
470 /* Reinitialise, ignoring return */
471 (void) hermes_doicmd_wait(hw, 0x0000 | HERMES_CMD_INIT,
472 0, 0, 0, NULL);
473
474 return rc ? rc : err;
475}
476
477/* Program the data blocks */ 276/* Program the data blocks */
478int hermes_program(hermes_t *hw, const char *first_block, const void *end) 277int hermes_program(hermes_t *hw, const char *first_block, const void *end)
479{ 278{
480 const struct dblock *blk; 279 const struct dblock *blk;
481 u32 blkaddr; 280 u32 blkaddr;
482 u32 blklen; 281 u32 blklen;
483#if LIMIT_PROGRAM_SIZE 282 int err = 0;
484 u32 addr;
485 u32 len;
486#endif
487 283
488 blk = (const struct dblock *) first_block; 284 blk = (const struct dblock *) first_block;
489 285
@@ -498,30 +294,10 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
498 pr_debug(PFX "Programming block of length %d " 294 pr_debug(PFX "Programming block of length %d "
499 "to address 0x%08x\n", blklen, blkaddr); 295 "to address 0x%08x\n", blklen, blkaddr);
500 296
501#if !LIMIT_PROGRAM_SIZE 297 err = hw->ops->program(hw, blk->data, blkaddr, blklen);
502 /* wl_lkm driver splits this into writes of 2000 bytes */ 298 if (err)
503 hermes_aux_setaddr(hw, blkaddr); 299 break;
504 hermes_write_bytes(hw, HERMES_AUXDATA, blk->data, 300
505 blklen);
506#else
507 len = (blklen < MAX_DL_SIZE) ? blklen : MAX_DL_SIZE;
508 addr = blkaddr;
509
510 while (addr < (blkaddr + blklen)) {
511 pr_debug(PFX "Programming subblock of length %d "
512 "to address 0x%08x. Data @ %p\n",
513 len, addr, &blk->data[addr - blkaddr]);
514
515 hermes_aux_setaddr(hw, addr);
516 hermes_write_bytes(hw, HERMES_AUXDATA,
517 &blk->data[addr - blkaddr],
518 len);
519
520 addr += len;
521 len = ((blkaddr + blklen - addr) < MAX_DL_SIZE) ?
522 (blkaddr + blklen - addr) : MAX_DL_SIZE;
523 }
524#endif
525 blk = (const struct dblock *) &blk->data[blklen]; 301 blk = (const struct dblock *) &blk->data[blklen];
526 302
527 if ((void *) blk > (end - sizeof(*blk))) 303 if ((void *) blk > (end - sizeof(*blk)))
@@ -530,7 +306,7 @@ int hermes_program(hermes_t *hw, const char *first_block, const void *end)
530 blkaddr = dblock_addr(blk); 306 blkaddr = dblock_addr(blk);
531 blklen = dblock_len(blk); 307 blklen = dblock_len(blk);
532 } 308 }
533 return 0; 309 return err;
534} 310}
535 311
536/*** Default plugging data for Hermes I ***/ 312/*** Default plugging data for Hermes I ***/
@@ -690,9 +466,8 @@ int hermes_apply_pda_with_defaults(hermes_t *hw,
690 if ((pdi_len(pdi) == pdr_len(pdr)) && 466 if ((pdi_len(pdi) == pdr_len(pdr)) &&
691 ((void *) pdi->data + pdi_len(pdi) < pda_end)) { 467 ((void *) pdi->data + pdi_len(pdi) < pda_end)) {
692 /* do the actual plugging */ 468 /* do the actual plugging */
693 hermes_aux_setaddr(hw, pdr_addr(pdr)); 469 hw->ops->program(hw, pdi->data, pdr_addr(pdr),
694 hermes_write_bytes(hw, HERMES_AUXDATA, 470 pdi_len(pdi));
695 pdi->data, pdi_len(pdi));
696 } 471 }
697 } 472 }
698 473
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index e6369242e49c..6fbd78850123 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -177,9 +177,9 @@ int determine_fw_capabilities(struct orinoco_private *priv,
177 /* 3Com MAC : 00:50:DA:* */ 177 /* 3Com MAC : 00:50:DA:* */
178 memset(tmp, 0, sizeof(tmp)); 178 memset(tmp, 0, sizeof(tmp));
179 /* Get the Symbol firmware version */ 179 /* Get the Symbol firmware version */
180 err = hermes_read_ltv(hw, USER_BAP, 180 err = hw->ops->read_ltv(hw, USER_BAP,
181 HERMES_RID_SECONDARYVERSION_SYMBOL, 181 HERMES_RID_SECONDARYVERSION_SYMBOL,
182 SYMBOL_MAX_VER_LEN, NULL, &tmp); 182 SYMBOL_MAX_VER_LEN, NULL, &tmp);
183 if (err) { 183 if (err) {
184 dev_warn(dev, "Error %d reading Symbol firmware info. " 184 dev_warn(dev, "Error %d reading Symbol firmware info. "
185 "Wildly guessing capabilities...\n", err); 185 "Wildly guessing capabilities...\n", err);
@@ -262,6 +262,13 @@ int determine_fw_capabilities(struct orinoco_private *priv,
262 if (fw_name) 262 if (fw_name)
263 dev_info(dev, "Firmware determined as %s\n", fw_name); 263 dev_info(dev, "Firmware determined as %s\n", fw_name);
264 264
265#ifndef CONFIG_HERMES_PRISM
266 if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
267 dev_err(dev, "Support for Prism chipset is not enabled\n");
268 return -ENODEV;
269 }
270#endif
271
265 return 0; 272 return 0;
266} 273}
267 274
@@ -279,8 +286,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
279 u16 reclen; 286 u16 reclen;
280 287
281 /* Get the MAC address */ 288 /* Get the MAC address */
282 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 289 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
283 ETH_ALEN, NULL, dev_addr); 290 ETH_ALEN, NULL, dev_addr);
284 if (err) { 291 if (err) {
285 dev_warn(dev, "Failed to read MAC address!\n"); 292 dev_warn(dev, "Failed to read MAC address!\n");
286 goto out; 293 goto out;
@@ -289,8 +296,8 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
289 dev_dbg(dev, "MAC address %pM\n", dev_addr); 296 dev_dbg(dev, "MAC address %pM\n", dev_addr);
290 297
291 /* Get the station name */ 298 /* Get the station name */
292 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, 299 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
293 sizeof(nickbuf), &reclen, &nickbuf); 300 sizeof(nickbuf), &reclen, &nickbuf);
294 if (err) { 301 if (err) {
295 dev_err(dev, "failed to read station name\n"); 302 dev_err(dev, "failed to read station name\n");
296 goto out; 303 goto out;
@@ -367,6 +374,32 @@ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr)
367 err = hermes_read_wordrec(hw, USER_BAP, 374 err = hermes_read_wordrec(hw, USER_BAP,
368 HERMES_RID_CNFPREAMBLE_SYMBOL, 375 HERMES_RID_CNFPREAMBLE_SYMBOL,
369 &priv->preamble); 376 &priv->preamble);
377 if (err) {
378 dev_err(dev, "Failed to read preamble setup\n");
379 goto out;
380 }
381 }
382
383 /* Retry settings */
384 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
385 &priv->short_retry_limit);
386 if (err) {
387 dev_err(dev, "Failed to read short retry limit\n");
388 goto out;
389 }
390
391 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
392 &priv->long_retry_limit);
393 if (err) {
394 dev_err(dev, "Failed to read long retry limit\n");
395 goto out;
396 }
397
398 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
399 &priv->retry_lifetime);
400 if (err) {
401 dev_err(dev, "Failed to read max retry lifetime\n");
402 goto out;
370 } 403 }
371 404
372out: 405out:
@@ -380,11 +413,11 @@ int orinoco_hw_allocate_fid(struct orinoco_private *priv)
380 struct hermes *hw = &priv->hw; 413 struct hermes *hw = &priv->hw;
381 int err; 414 int err;
382 415
383 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); 416 err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
384 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { 417 if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) {
385 /* Try workaround for old Symbol firmware bug */ 418 /* Try workaround for old Symbol firmware bug */
386 priv->nicbuf_size = TX_NICBUF_SIZE_BUG; 419 priv->nicbuf_size = TX_NICBUF_SIZE_BUG;
387 err = hermes_allocate(hw, priv->nicbuf_size, &priv->txfid); 420 err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid);
388 421
389 dev_warn(dev, "Firmware ALLOC bug detected " 422 dev_warn(dev, "Firmware ALLOC bug detected "
390 "(old Symbol firmware?). Work around %s\n", 423 "(old Symbol firmware?). Work around %s\n",
@@ -430,8 +463,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
430 struct hermes_idstring idbuf; 463 struct hermes_idstring idbuf;
431 464
432 /* Set the MAC address */ 465 /* Set the MAC address */
433 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, 466 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR,
434 HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr); 467 HERMES_BYTES_TO_RECLEN(ETH_ALEN),
468 dev->dev_addr);
435 if (err) { 469 if (err) {
436 printk(KERN_ERR "%s: Error %d setting MAC address\n", 470 printk(KERN_ERR "%s: Error %d setting MAC address\n",
437 dev->name, err); 471 dev->name, err);
@@ -494,7 +528,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
494 idbuf.len = cpu_to_le16(strlen(priv->desired_essid)); 528 idbuf.len = cpu_to_le16(strlen(priv->desired_essid));
495 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val)); 529 memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val));
496 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */ 530 /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */
497 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID, 531 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID,
498 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2), 532 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
499 &idbuf); 533 &idbuf);
500 if (err) { 534 if (err) {
@@ -502,7 +536,7 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
502 dev->name, err); 536 dev->name, err);
503 return err; 537 return err;
504 } 538 }
505 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID, 539 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID,
506 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2), 540 HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid)+2),
507 &idbuf); 541 &idbuf);
508 if (err) { 542 if (err) {
@@ -514,9 +548,9 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
514 /* Set the station name */ 548 /* Set the station name */
515 idbuf.len = cpu_to_le16(strlen(priv->nick)); 549 idbuf.len = cpu_to_le16(strlen(priv->nick));
516 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val)); 550 memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val));
517 err = hermes_write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, 551 err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME,
518 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2), 552 HERMES_BYTES_TO_RECLEN(strlen(priv->nick)+2),
519 &idbuf); 553 &idbuf);
520 if (err) { 554 if (err) {
521 printk(KERN_ERR "%s: Error %d setting nickname\n", 555 printk(KERN_ERR "%s: Error %d setting nickname\n",
522 dev->name, err); 556 dev->name, err);
@@ -631,12 +665,12 @@ int orinoco_hw_program_rids(struct orinoco_private *priv)
631 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 665 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
632 /* Enable monitor mode */ 666 /* Enable monitor mode */
633 dev->type = ARPHRD_IEEE80211; 667 dev->type = ARPHRD_IEEE80211;
634 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 668 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
635 HERMES_TEST_MONITOR, 0, NULL); 669 HERMES_TEST_MONITOR, 0, NULL);
636 } else { 670 } else {
637 /* Disable monitor mode */ 671 /* Disable monitor mode */
638 dev->type = ARPHRD_ETHER; 672 dev->type = ARPHRD_ETHER;
639 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 673 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
640 HERMES_TEST_STOP, 0, NULL); 674 HERMES_TEST_STOP, 0, NULL);
641 } 675 }
642 if (err) 676 if (err)
@@ -662,8 +696,8 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
662 if ((key < 0) || (key >= 4)) 696 if ((key < 0) || (key >= 4))
663 return -EINVAL; 697 return -EINVAL;
664 698
665 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, 699 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
666 sizeof(tsc_arr), NULL, &tsc_arr); 700 sizeof(tsc_arr), NULL, &tsc_arr);
667 if (!err) 701 if (!err)
668 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0])); 702 memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0]));
669 703
@@ -842,7 +876,7 @@ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv)
842 memcpy(key, priv->keys[i].key, 876 memcpy(key, priv->keys[i].key,
843 priv->keys[i].key_len); 877 priv->keys[i].key_len);
844 878
845 err = hermes_write_ltv(hw, USER_BAP, 879 err = hw->ops->write_ltv(hw, USER_BAP,
846 HERMES_RID_CNFDEFAULTKEY0 + i, 880 HERMES_RID_CNFDEFAULTKEY0 + i,
847 HERMES_BYTES_TO_RECLEN(keylen), 881 HERMES_BYTES_TO_RECLEN(keylen),
848 key); 882 key);
@@ -1049,17 +1083,17 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
1049 * group address if either we want to multicast, or if we were 1083 * group address if either we want to multicast, or if we were
1050 * multicasting and want to stop */ 1084 * multicasting and want to stop */
1051 if (!promisc && (mc_count || priv->mc_count)) { 1085 if (!promisc && (mc_count || priv->mc_count)) {
1052 struct dev_mc_list *p; 1086 struct netdev_hw_addr *ha;
1053 struct hermes_multicast mclist; 1087 struct hermes_multicast mclist;
1054 int i = 0; 1088 int i = 0;
1055 1089
1056 netdev_for_each_mc_addr(p, dev) { 1090 netdev_for_each_mc_addr(ha, dev) {
1057 if (i == mc_count) 1091 if (i == mc_count)
1058 break; 1092 break;
1059 memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN); 1093 memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
1060 } 1094 }
1061 1095
1062 err = hermes_write_ltv(hw, USER_BAP, 1096 err = hw->ops->write_ltv(hw, USER_BAP,
1063 HERMES_RID_CNFGROUPADDRESSES, 1097 HERMES_RID_CNFGROUPADDRESSES,
1064 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN), 1098 HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
1065 &mclist); 1099 &mclist);
@@ -1101,15 +1135,15 @@ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
1101 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID : 1135 rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID :
1102 HERMES_RID_CNFDESIREDSSID; 1136 HERMES_RID_CNFDESIREDSSID;
1103 1137
1104 err = hermes_read_ltv(hw, USER_BAP, rid, sizeof(essidbuf), 1138 err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf),
1105 NULL, &essidbuf); 1139 NULL, &essidbuf);
1106 if (err) 1140 if (err)
1107 goto fail_unlock; 1141 goto fail_unlock;
1108 } else { 1142 } else {
1109 *active = 0; 1143 *active = 0;
1110 1144
1111 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID, 1145 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID,
1112 sizeof(essidbuf), NULL, &essidbuf); 1146 sizeof(essidbuf), NULL, &essidbuf);
1113 if (err) 1147 if (err)
1114 goto fail_unlock; 1148 goto fail_unlock;
1115 } 1149 }
@@ -1180,8 +1214,8 @@ int orinoco_hw_get_bitratelist(struct orinoco_private *priv,
1180 if (orinoco_lock(priv, &flags) != 0) 1214 if (orinoco_lock(priv, &flags) != 0)
1181 return -EBUSY; 1215 return -EBUSY;
1182 1216
1183 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES, 1217 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES,
1184 sizeof(list), NULL, &list); 1218 sizeof(list), NULL, &list);
1185 orinoco_unlock(priv, &flags); 1219 orinoco_unlock(priv, &flags);
1186 1220
1187 if (err) 1221 if (err)
@@ -1248,7 +1282,7 @@ int orinoco_hw_trigger_scan(struct orinoco_private *priv,
1248 idbuf.len = cpu_to_le16(len); 1282 idbuf.len = cpu_to_le16(len);
1249 memcpy(idbuf.val, ssid->ssid, len); 1283 memcpy(idbuf.val, ssid->ssid, len);
1250 1284
1251 err = hermes_write_ltv(hw, USER_BAP, 1285 err = hw->ops->write_ltv(hw, USER_BAP,
1252 HERMES_RID_CNFSCANSSID_AGERE, 1286 HERMES_RID_CNFSCANSSID_AGERE,
1253 HERMES_BYTES_TO_RECLEN(len + 2), 1287 HERMES_BYTES_TO_RECLEN(len + 2),
1254 &idbuf); 1288 &idbuf);
@@ -1312,8 +1346,8 @@ int orinoco_hw_get_current_bssid(struct orinoco_private *priv,
1312 hermes_t *hw = &priv->hw; 1346 hermes_t *hw = &priv->hw;
1313 int err; 1347 int err;
1314 1348
1315 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, 1349 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1316 ETH_ALEN, NULL, addr); 1350 ETH_ALEN, NULL, addr);
1317 1351
1318 return err; 1352 return err;
1319} 1353}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 9799a1d14a63..97af71e79950 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -22,7 +22,6 @@
22 22
23/* Forward declarations */ 23/* Forward declarations */
24struct orinoco_private; 24struct orinoco_private;
25struct dev_addr_list;
26 25
27int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, 26int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
28 size_t fw_name_len, u32 *hw_ver); 27 size_t fw_name_len, u32 *hw_ver);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 413e9ab6cab3..ca71f08709bc 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -254,7 +254,7 @@ void set_port_type(struct orinoco_private *priv)
254/* Device methods */ 254/* Device methods */
255/********************************************************************/ 255/********************************************************************/
256 256
257static int orinoco_open(struct net_device *dev) 257int orinoco_open(struct net_device *dev)
258{ 258{
259 struct orinoco_private *priv = ndev_priv(dev); 259 struct orinoco_private *priv = ndev_priv(dev);
260 unsigned long flags; 260 unsigned long flags;
@@ -272,8 +272,9 @@ static int orinoco_open(struct net_device *dev)
272 272
273 return err; 273 return err;
274} 274}
275EXPORT_SYMBOL(orinoco_open);
275 276
276static int orinoco_stop(struct net_device *dev) 277int orinoco_stop(struct net_device *dev)
277{ 278{
278 struct orinoco_private *priv = ndev_priv(dev); 279 struct orinoco_private *priv = ndev_priv(dev);
279 int err = 0; 280 int err = 0;
@@ -281,25 +282,27 @@ static int orinoco_stop(struct net_device *dev)
281 /* We mustn't use orinoco_lock() here, because we need to be 282 /* We mustn't use orinoco_lock() here, because we need to be
282 able to close the interface even if hw_unavailable is set 283 able to close the interface even if hw_unavailable is set
283 (e.g. as we're released after a PC Card removal) */ 284 (e.g. as we're released after a PC Card removal) */
284 spin_lock_irq(&priv->lock); 285 orinoco_lock_irq(priv);
285 286
286 priv->open = 0; 287 priv->open = 0;
287 288
288 err = __orinoco_down(priv); 289 err = __orinoco_down(priv);
289 290
290 spin_unlock_irq(&priv->lock); 291 orinoco_unlock_irq(priv);
291 292
292 return err; 293 return err;
293} 294}
295EXPORT_SYMBOL(orinoco_stop);
294 296
295static struct net_device_stats *orinoco_get_stats(struct net_device *dev) 297struct net_device_stats *orinoco_get_stats(struct net_device *dev)
296{ 298{
297 struct orinoco_private *priv = ndev_priv(dev); 299 struct orinoco_private *priv = ndev_priv(dev);
298 300
299 return &priv->stats; 301 return &priv->stats;
300} 302}
303EXPORT_SYMBOL(orinoco_get_stats);
301 304
302static void orinoco_set_multicast_list(struct net_device *dev) 305void orinoco_set_multicast_list(struct net_device *dev)
303{ 306{
304 struct orinoco_private *priv = ndev_priv(dev); 307 struct orinoco_private *priv = ndev_priv(dev);
305 unsigned long flags; 308 unsigned long flags;
@@ -313,8 +316,9 @@ static void orinoco_set_multicast_list(struct net_device *dev)
313 __orinoco_set_multicast_list(dev); 316 __orinoco_set_multicast_list(dev);
314 orinoco_unlock(priv, &flags); 317 orinoco_unlock(priv, &flags);
315} 318}
319EXPORT_SYMBOL(orinoco_set_multicast_list);
316 320
317static int orinoco_change_mtu(struct net_device *dev, int new_mtu) 321int orinoco_change_mtu(struct net_device *dev, int new_mtu)
318{ 322{
319 struct orinoco_private *priv = ndev_priv(dev); 323 struct orinoco_private *priv = ndev_priv(dev);
320 324
@@ -330,23 +334,115 @@ static int orinoco_change_mtu(struct net_device *dev, int new_mtu)
330 334
331 return 0; 335 return 0;
332} 336}
337EXPORT_SYMBOL(orinoco_change_mtu);
333 338
334/********************************************************************/ 339/********************************************************************/
335/* Tx path */ 340/* Tx path */
336/********************************************************************/ 341/********************************************************************/
337 342
343/* Add encapsulation and MIC to the existing SKB.
344 * The main xmit routine will then send the whole lot to the card.
345 * Need 8 bytes headroom
346 * Need 8 bytes tailroom
347 *
348 * With encapsulated ethernet II frame
349 * --------
350 * 803.3 header (14 bytes)
351 * dst[6]
352 * -------- src[6]
353 * 803.3 header (14 bytes) len[2]
354 * dst[6] 803.2 header (8 bytes)
355 * src[6] encaps[6]
356 * len[2] <- leave alone -> len[2]
357 * -------- -------- <-- 0
358 * Payload Payload
359 * ... ...
360 *
361 * -------- --------
362 * MIC (8 bytes)
363 * --------
364 *
365 * returns 0 on success, -ENOMEM on error.
366 */
367int orinoco_process_xmit_skb(struct sk_buff *skb,
368 struct net_device *dev,
369 struct orinoco_private *priv,
370 int *tx_control,
371 u8 *mic_buf)
372{
373 struct orinoco_tkip_key *key;
374 struct ethhdr *eh;
375 int do_mic;
376
377 key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
378
379 do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
380 (key != NULL));
381
382 if (do_mic)
383 *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) |
384 HERMES_TXCTRL_MIC;
385
386 eh = (struct ethhdr *)skb->data;
387
388 /* Encapsulate Ethernet-II frames */
389 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
390 struct header_struct {
391 struct ethhdr eth; /* 802.3 header */
392 u8 encap[6]; /* 802.2 header */
393 } __attribute__ ((packed)) hdr;
394 int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN);
395
396 if (skb_headroom(skb) < ENCAPS_OVERHEAD) {
397 if (net_ratelimit())
398 printk(KERN_ERR
399 "%s: Not enough headroom for 802.2 headers %d\n",
400 dev->name, skb_headroom(skb));
401 return -ENOMEM;
402 }
403
404 /* Fill in new header */
405 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
406 hdr.eth.h_proto = htons(len);
407 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
408
409 /* Make room for the new header, and copy it in */
410 eh = (struct ethhdr *) skb_push(skb, ENCAPS_OVERHEAD);
411 memcpy(eh, &hdr, sizeof(hdr));
412 }
413
414 /* Calculate Michael MIC */
415 if (do_mic) {
416 size_t len = skb->len - ETH_HLEN;
417 u8 *mic = &mic_buf[0];
418
419 /* Have to write to an even address, so copy the spare
420 * byte across */
421 if (skb->len % 2) {
422 *mic = skb->data[skb->len - 1];
423 mic++;
424 }
425
426 orinoco_mic(priv->tx_tfm_mic, key->tx_mic,
427 eh->h_dest, eh->h_source, 0 /* priority */,
428 skb->data + ETH_HLEN,
429 len, mic);
430 }
431
432 return 0;
433}
434EXPORT_SYMBOL(orinoco_process_xmit_skb);
435
338static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev) 436static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
339{ 437{
340 struct orinoco_private *priv = ndev_priv(dev); 438 struct orinoco_private *priv = ndev_priv(dev);
341 struct net_device_stats *stats = &priv->stats; 439 struct net_device_stats *stats = &priv->stats;
342 struct orinoco_tkip_key *key;
343 hermes_t *hw = &priv->hw; 440 hermes_t *hw = &priv->hw;
344 int err = 0; 441 int err = 0;
345 u16 txfid = priv->txfid; 442 u16 txfid = priv->txfid;
346 struct ethhdr *eh;
347 int tx_control; 443 int tx_control;
348 unsigned long flags; 444 unsigned long flags;
349 int do_mic; 445 u8 mic_buf[MICHAEL_MIC_LEN+1];
350 446
351 if (!netif_running(dev)) { 447 if (!netif_running(dev)) {
352 printk(KERN_ERR "%s: Tx on stopped device!\n", 448 printk(KERN_ERR "%s: Tx on stopped device!\n",
@@ -378,16 +474,12 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
378 if (skb->len < ETH_HLEN) 474 if (skb->len < ETH_HLEN)
379 goto drop; 475 goto drop;
380 476
381 key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key;
382
383 do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) &&
384 (key != NULL));
385
386 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX; 477 tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX;
387 478
388 if (do_mic) 479 err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
389 tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) | 480 &mic_buf[0]);
390 HERMES_TXCTRL_MIC; 481 if (err)
482 goto drop;
391 483
392 if (priv->has_alt_txcntl) { 484 if (priv->has_alt_txcntl) {
393 /* WPA enabled firmwares have tx_cntl at the end of 485 /* WPA enabled firmwares have tx_cntl at the end of
@@ -400,8 +492,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
400 memset(&desc, 0, sizeof(desc)); 492 memset(&desc, 0, sizeof(desc));
401 493
402 *txcntl = cpu_to_le16(tx_control); 494 *txcntl = cpu_to_le16(tx_control);
403 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), 495 err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
404 txfid, 0); 496 txfid, 0);
405 if (err) { 497 if (err) {
406 if (net_ratelimit()) 498 if (net_ratelimit())
407 printk(KERN_ERR "%s: Error %d writing Tx " 499 printk(KERN_ERR "%s: Error %d writing Tx "
@@ -414,8 +506,8 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
414 memset(&desc, 0, sizeof(desc)); 506 memset(&desc, 0, sizeof(desc));
415 507
416 desc.tx_control = cpu_to_le16(tx_control); 508 desc.tx_control = cpu_to_le16(tx_control);
417 err = hermes_bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), 509 err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc),
418 txfid, 0); 510 txfid, 0);
419 if (err) { 511 if (err) {
420 if (net_ratelimit()) 512 if (net_ratelimit())
421 printk(KERN_ERR "%s: Error %d writing Tx " 513 printk(KERN_ERR "%s: Error %d writing Tx "
@@ -430,68 +522,24 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
430 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET); 522 HERMES_802_3_OFFSET - HERMES_802_11_OFFSET);
431 } 523 }
432 524
433 eh = (struct ethhdr *)skb->data; 525 err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len,
434 526 txfid, HERMES_802_3_OFFSET);
435 /* Encapsulate Ethernet-II frames */
436 if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */
437 struct header_struct {
438 struct ethhdr eth; /* 802.3 header */
439 u8 encap[6]; /* 802.2 header */
440 } __attribute__ ((packed)) hdr;
441
442 /* Strip destination and source from the data */
443 skb_pull(skb, 2 * ETH_ALEN);
444
445 /* And move them to a separate header */
446 memcpy(&hdr.eth, eh, 2 * ETH_ALEN);
447 hdr.eth.h_proto = htons(sizeof(encaps_hdr) + skb->len);
448 memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr));
449
450 /* Insert the SNAP header */
451 if (skb_headroom(skb) < sizeof(hdr)) {
452 printk(KERN_ERR
453 "%s: Not enough headroom for 802.2 headers %d\n",
454 dev->name, skb_headroom(skb));
455 goto drop;
456 }
457 eh = (struct ethhdr *) skb_push(skb, sizeof(hdr));
458 memcpy(eh, &hdr, sizeof(hdr));
459 }
460
461 err = hermes_bap_pwrite(hw, USER_BAP, skb->data, skb->len,
462 txfid, HERMES_802_3_OFFSET);
463 if (err) { 527 if (err) {
464 printk(KERN_ERR "%s: Error %d writing packet to BAP\n", 528 printk(KERN_ERR "%s: Error %d writing packet to BAP\n",
465 dev->name, err); 529 dev->name, err);
466 goto busy; 530 goto busy;
467 } 531 }
468 532
469 /* Calculate Michael MIC */ 533 if (tx_control & HERMES_TXCTRL_MIC) {
470 if (do_mic) { 534 size_t offset = HERMES_802_3_OFFSET + skb->len;
471 u8 mic_buf[MICHAEL_MIC_LEN + 1]; 535 size_t len = MICHAEL_MIC_LEN;
472 u8 *mic;
473 size_t offset;
474 size_t len;
475 536
476 if (skb->len % 2) { 537 if (offset % 2) {
477 /* MIC start is on an odd boundary */ 538 offset--;
478 mic_buf[0] = skb->data[skb->len - 1]; 539 len++;
479 mic = &mic_buf[1];
480 offset = skb->len - 1;
481 len = MICHAEL_MIC_LEN + 1;
482 } else {
483 mic = &mic_buf[0];
484 offset = skb->len;
485 len = MICHAEL_MIC_LEN;
486 } 540 }
487 541 err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
488 orinoco_mic(priv->tx_tfm_mic, key->tx_mic, 542 txfid, offset);
489 eh->h_dest, eh->h_source, 0 /* priority */,
490 skb->data + ETH_HLEN, skb->len - ETH_HLEN, mic);
491
492 /* Write the MIC */
493 err = hermes_bap_pwrite(hw, USER_BAP, &mic_buf[0], len,
494 txfid, HERMES_802_3_OFFSET + offset);
495 if (err) { 543 if (err) {
496 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n", 544 printk(KERN_ERR "%s: Error %d writing MIC to BAP\n",
497 dev->name, err); 545 dev->name, err);
@@ -502,7 +550,7 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
502 /* Finally, we actually initiate the send */ 550 /* Finally, we actually initiate the send */
503 netif_stop_queue(dev); 551 netif_stop_queue(dev);
504 552
505 err = hermes_docmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, 553 err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL,
506 txfid, NULL); 554 txfid, NULL);
507 if (err) { 555 if (err) {
508 netif_start_queue(dev); 556 netif_start_queue(dev);
@@ -512,7 +560,6 @@ static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev)
512 goto busy; 560 goto busy;
513 } 561 }
514 562
515 dev->trans_start = jiffies;
516 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len; 563 stats->tx_bytes += HERMES_802_3_OFFSET + skb->len;
517 goto ok; 564 goto ok;
518 565
@@ -572,9 +619,9 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
572 return; /* Nothing's really happened */ 619 return; /* Nothing's really happened */
573 620
574 /* Read part of the frame header - we need status and addr1 */ 621 /* Read part of the frame header - we need status and addr1 */
575 err = hermes_bap_pread(hw, IRQ_BAP, &hdr, 622 err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr,
576 sizeof(struct hermes_txexc_data), 623 sizeof(struct hermes_txexc_data),
577 fid, 0); 624 fid, 0);
578 625
579 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); 626 hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID);
580 stats->tx_errors++; 627 stats->tx_errors++;
@@ -615,7 +662,7 @@ static void __orinoco_ev_txexc(struct net_device *dev, hermes_t *hw)
615 netif_wake_queue(dev); 662 netif_wake_queue(dev);
616} 663}
617 664
618static void orinoco_tx_timeout(struct net_device *dev) 665void orinoco_tx_timeout(struct net_device *dev)
619{ 666{
620 struct orinoco_private *priv = ndev_priv(dev); 667 struct orinoco_private *priv = ndev_priv(dev);
621 struct net_device_stats *stats = &priv->stats; 668 struct net_device_stats *stats = &priv->stats;
@@ -630,6 +677,7 @@ static void orinoco_tx_timeout(struct net_device *dev)
630 677
631 schedule_work(&priv->reset_work); 678 schedule_work(&priv->reset_work);
632} 679}
680EXPORT_SYMBOL(orinoco_tx_timeout);
633 681
634/********************************************************************/ 682/********************************************************************/
635/* Rx path (data frames) */ 683/* Rx path (data frames) */
@@ -764,9 +812,9 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
764 812
765 /* If any, copy the data from the card to the skb */ 813 /* If any, copy the data from the card to the skb */
766 if (datalen > 0) { 814 if (datalen > 0) {
767 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, datalen), 815 err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen),
768 ALIGN(datalen, 2), rxfid, 816 ALIGN(datalen, 2), rxfid,
769 HERMES_802_2_OFFSET); 817 HERMES_802_2_OFFSET);
770 if (err) { 818 if (err) {
771 printk(KERN_ERR "%s: error %d reading monitor frame\n", 819 printk(KERN_ERR "%s: error %d reading monitor frame\n",
772 dev->name, err); 820 dev->name, err);
@@ -792,7 +840,7 @@ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid,
792 stats->rx_dropped++; 840 stats->rx_dropped++;
793} 841}
794 842
795static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw) 843void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
796{ 844{
797 struct orinoco_private *priv = ndev_priv(dev); 845 struct orinoco_private *priv = ndev_priv(dev);
798 struct net_device_stats *stats = &priv->stats; 846 struct net_device_stats *stats = &priv->stats;
@@ -814,8 +862,8 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
814 862
815 rxfid = hermes_read_regn(hw, RXFID); 863 rxfid = hermes_read_regn(hw, RXFID);
816 864
817 err = hermes_bap_pread(hw, IRQ_BAP, desc, sizeof(*desc), 865 err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc),
818 rxfid, 0); 866 rxfid, 0);
819 if (err) { 867 if (err) {
820 printk(KERN_ERR "%s: error %d reading Rx descriptor. " 868 printk(KERN_ERR "%s: error %d reading Rx descriptor. "
821 "Frame dropped.\n", dev->name, err); 869 "Frame dropped.\n", dev->name, err);
@@ -882,9 +930,9 @@ static void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw)
882 nothing is removed. 2 is for aligning the IP header. */ 930 nothing is removed. 2 is for aligning the IP header. */
883 skb_reserve(skb, ETH_HLEN + 2); 931 skb_reserve(skb, ETH_HLEN + 2);
884 932
885 err = hermes_bap_pread(hw, IRQ_BAP, skb_put(skb, length), 933 err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length),
886 ALIGN(length, 2), rxfid, 934 ALIGN(length, 2), rxfid,
887 HERMES_802_2_OFFSET); 935 HERMES_802_2_OFFSET);
888 if (err) { 936 if (err) {
889 printk(KERN_ERR "%s: error %d reading frame. " 937 printk(KERN_ERR "%s: error %d reading frame. "
890 "Frame dropped.\n", dev->name, err); 938 "Frame dropped.\n", dev->name, err);
@@ -913,6 +961,7 @@ update_stats:
913out: 961out:
914 kfree(desc); 962 kfree(desc);
915} 963}
964EXPORT_SYMBOL(__orinoco_ev_rx);
916 965
917static void orinoco_rx(struct net_device *dev, 966static void orinoco_rx(struct net_device *dev,
918 struct hermes_rx_descriptor *desc, 967 struct hermes_rx_descriptor *desc,
@@ -1145,9 +1194,9 @@ static void orinoco_join_ap(struct work_struct *work)
1145 goto out; 1194 goto out;
1146 1195
1147 /* Read scan results from the firmware */ 1196 /* Read scan results from the firmware */
1148 err = hermes_read_ltv(hw, USER_BAP, 1197 err = hw->ops->read_ltv(hw, USER_BAP,
1149 HERMES_RID_SCANRESULTSTABLE, 1198 HERMES_RID_SCANRESULTSTABLE,
1150 MAX_SCAN_LEN, &len, buf); 1199 MAX_SCAN_LEN, &len, buf);
1151 if (err) { 1200 if (err) {
1152 printk(KERN_ERR "%s: Cannot read scan results\n", 1201 printk(KERN_ERR "%s: Cannot read scan results\n",
1153 dev->name); 1202 dev->name);
@@ -1194,8 +1243,8 @@ static void orinoco_send_bssid_wevent(struct orinoco_private *priv)
1194 union iwreq_data wrqu; 1243 union iwreq_data wrqu;
1195 int err; 1244 int err;
1196 1245
1197 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, 1246 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID,
1198 ETH_ALEN, NULL, wrqu.ap_addr.sa_data); 1247 ETH_ALEN, NULL, wrqu.ap_addr.sa_data);
1199 if (err != 0) 1248 if (err != 0)
1200 return; 1249 return;
1201 1250
@@ -1217,8 +1266,8 @@ static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv)
1217 if (!priv->has_wpa) 1266 if (!priv->has_wpa)
1218 return; 1267 return;
1219 1268
1220 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO, 1269 err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO,
1221 sizeof(buf), NULL, &buf); 1270 sizeof(buf), NULL, &buf);
1222 if (err != 0) 1271 if (err != 0)
1223 return; 1272 return;
1224 1273
@@ -1247,8 +1296,9 @@ static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv)
1247 if (!priv->has_wpa) 1296 if (!priv->has_wpa)
1248 return; 1297 return;
1249 1298
1250 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO, 1299 err = hw->ops->read_ltv(hw, USER_BAP,
1251 sizeof(buf), NULL, &buf); 1300 HERMES_RID_CURRENT_ASSOC_RESP_INFO,
1301 sizeof(buf), NULL, &buf);
1252 if (err != 0) 1302 if (err != 0)
1253 return; 1303 return;
1254 1304
@@ -1353,7 +1403,7 @@ static void orinoco_process_scan_results(struct work_struct *work)
1353 spin_unlock_irqrestore(&priv->scan_lock, flags); 1403 spin_unlock_irqrestore(&priv->scan_lock, flags);
1354} 1404}
1355 1405
1356static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw) 1406void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1357{ 1407{
1358 struct orinoco_private *priv = ndev_priv(dev); 1408 struct orinoco_private *priv = ndev_priv(dev);
1359 u16 infofid; 1409 u16 infofid;
@@ -1371,8 +1421,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1371 infofid = hermes_read_regn(hw, INFOFID); 1421 infofid = hermes_read_regn(hw, INFOFID);
1372 1422
1373 /* Read the info frame header - don't try too hard */ 1423 /* Read the info frame header - don't try too hard */
1374 err = hermes_bap_pread(hw, IRQ_BAP, &info, sizeof(info), 1424 err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info),
1375 infofid, 0); 1425 infofid, 0);
1376 if (err) { 1426 if (err) {
1377 printk(KERN_ERR "%s: error %d reading info frame. " 1427 printk(KERN_ERR "%s: error %d reading info frame. "
1378 "Frame dropped.\n", dev->name, err); 1428 "Frame dropped.\n", dev->name, err);
@@ -1393,8 +1443,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1393 len = sizeof(tallies); 1443 len = sizeof(tallies);
1394 } 1444 }
1395 1445
1396 err = hermes_bap_pread(hw, IRQ_BAP, &tallies, len, 1446 err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len,
1397 infofid, sizeof(info)); 1447 infofid, sizeof(info));
1398 if (err) 1448 if (err)
1399 break; 1449 break;
1400 1450
@@ -1429,8 +1479,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1429 break; 1479 break;
1430 } 1480 }
1431 1481
1432 err = hermes_bap_pread(hw, IRQ_BAP, &linkstatus, len, 1482 err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len,
1433 infofid, sizeof(info)); 1483 infofid, sizeof(info));
1434 if (err) 1484 if (err)
1435 break; 1485 break;
1436 newstatus = le16_to_cpu(linkstatus.linkstatus); 1486 newstatus = le16_to_cpu(linkstatus.linkstatus);
@@ -1494,8 +1544,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1494 } 1544 }
1495 1545
1496 /* Read scan data */ 1546 /* Read scan data */
1497 err = hermes_bap_pread(hw, IRQ_BAP, (void *) buf, len, 1547 err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len,
1498 infofid, sizeof(info)); 1548 infofid, sizeof(info));
1499 if (err) { 1549 if (err) {
1500 kfree(buf); 1550 kfree(buf);
1501 qabort_scan(priv); 1551 qabort_scan(priv);
@@ -1547,8 +1597,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1547 break; 1597 break;
1548 1598
1549 /* Read scan data */ 1599 /* Read scan data */
1550 err = hermes_bap_pread(hw, IRQ_BAP, (void *) bss, len, 1600 err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len,
1551 infofid, sizeof(info)); 1601 infofid, sizeof(info));
1552 if (err) 1602 if (err)
1553 kfree(bss); 1603 kfree(bss);
1554 else 1604 else
@@ -1568,9 +1618,8 @@ static void __orinoco_ev_info(struct net_device *dev, hermes_t *hw)
1568 /* We don't actually do anything about it */ 1618 /* We don't actually do anything about it */
1569 break; 1619 break;
1570 } 1620 }
1571
1572 return;
1573} 1621}
1622EXPORT_SYMBOL(__orinoco_ev_info);
1574 1623
1575static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw) 1624static void __orinoco_ev_infdrop(struct net_device *dev, hermes_t *hw)
1576{ 1625{
@@ -1647,7 +1696,7 @@ static int orinoco_reinit_firmware(struct orinoco_private *priv)
1647 struct hermes *hw = &priv->hw; 1696 struct hermes *hw = &priv->hw;
1648 int err; 1697 int err;
1649 1698
1650 err = hermes_init(hw); 1699 err = hw->ops->init(hw);
1651 if (priv->do_fw_download && !err) { 1700 if (priv->do_fw_download && !err) {
1652 err = orinoco_download(priv); 1701 err = orinoco_download(priv);
1653 if (err) 1702 if (err)
@@ -1735,7 +1784,7 @@ void orinoco_reset(struct work_struct *work)
1735 } 1784 }
1736 1785
1737 /* This has to be called from user context */ 1786 /* This has to be called from user context */
1738 spin_lock_irq(&priv->lock); 1787 orinoco_lock_irq(priv);
1739 1788
1740 priv->hw_unavailable--; 1789 priv->hw_unavailable--;
1741 1790
@@ -1750,7 +1799,7 @@ void orinoco_reset(struct work_struct *work)
1750 dev->trans_start = jiffies; 1799 dev->trans_start = jiffies;
1751 } 1800 }
1752 1801
1753 spin_unlock_irq(&priv->lock); 1802 orinoco_unlock_irq(priv);
1754 1803
1755 return; 1804 return;
1756 disable: 1805 disable:
@@ -1984,7 +2033,7 @@ int orinoco_init(struct orinoco_private *priv)
1984 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN; 2033 priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN;
1985 2034
1986 /* Initialize the firmware */ 2035 /* Initialize the firmware */
1987 err = hermes_init(hw); 2036 err = hw->ops->init(hw);
1988 if (err != 0) { 2037 if (err != 0) {
1989 dev_err(dev, "Failed to initialize firmware (err = %d)\n", 2038 dev_err(dev, "Failed to initialize firmware (err = %d)\n",
1990 err); 2039 err);
@@ -2067,9 +2116,9 @@ int orinoco_init(struct orinoco_private *priv)
2067 2116
2068 /* Make the hardware available, as long as it hasn't been 2117 /* Make the hardware available, as long as it hasn't been
2069 * removed elsewhere (e.g. by PCMCIA hot unplug) */ 2118 * removed elsewhere (e.g. by PCMCIA hot unplug) */
2070 spin_lock_irq(&priv->lock); 2119 orinoco_lock_irq(priv);
2071 priv->hw_unavailable--; 2120 priv->hw_unavailable--;
2072 spin_unlock_irq(&priv->lock); 2121 orinoco_unlock_irq(priv);
2073 2122
2074 dev_dbg(dev, "Ready\n"); 2123 dev_dbg(dev, "Ready\n");
2075 2124
@@ -2192,7 +2241,8 @@ EXPORT_SYMBOL(alloc_orinocodev);
2192 */ 2241 */
2193int orinoco_if_add(struct orinoco_private *priv, 2242int orinoco_if_add(struct orinoco_private *priv,
2194 unsigned long base_addr, 2243 unsigned long base_addr,
2195 unsigned int irq) 2244 unsigned int irq,
2245 const struct net_device_ops *ops)
2196{ 2246{
2197 struct wiphy *wiphy = priv_to_wiphy(priv); 2247 struct wiphy *wiphy = priv_to_wiphy(priv);
2198 struct wireless_dev *wdev; 2248 struct wireless_dev *wdev;
@@ -2211,16 +2261,21 @@ int orinoco_if_add(struct orinoco_private *priv,
2211 2261
2212 /* Setup / override net_device fields */ 2262 /* Setup / override net_device fields */
2213 dev->ieee80211_ptr = wdev; 2263 dev->ieee80211_ptr = wdev;
2214 dev->netdev_ops = &orinoco_netdev_ops;
2215 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2264 dev->watchdog_timeo = HZ; /* 1 second timeout */
2216 dev->wireless_handlers = &orinoco_handler_def; 2265 dev->wireless_handlers = &orinoco_handler_def;
2217#ifdef WIRELESS_SPY 2266#ifdef WIRELESS_SPY
2218 dev->wireless_data = &priv->wireless_data; 2267 dev->wireless_data = &priv->wireless_data;
2219#endif 2268#endif
2269 /* Default to standard ops if not set */
2270 if (ops)
2271 dev->netdev_ops = ops;
2272 else
2273 dev->netdev_ops = &orinoco_netdev_ops;
2274
2220 /* we use the default eth_mac_addr for setting the MAC addr */ 2275 /* we use the default eth_mac_addr for setting the MAC addr */
2221 2276
2222 /* Reserve space in skb for the SNAP header */ 2277 /* Reserve space in skb for the SNAP header */
2223 dev->hard_header_len += ENCAPS_OVERHEAD; 2278 dev->needed_headroom = ENCAPS_OVERHEAD;
2224 2279
2225 netif_carrier_off(dev); 2280 netif_carrier_off(dev);
2226 2281
@@ -2305,7 +2360,7 @@ int orinoco_up(struct orinoco_private *priv)
2305 unsigned long flags; 2360 unsigned long flags;
2306 int err; 2361 int err;
2307 2362
2308 spin_lock_irqsave(&priv->lock, flags); 2363 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
2309 2364
2310 err = orinoco_reinit_firmware(priv); 2365 err = orinoco_reinit_firmware(priv);
2311 if (err) { 2366 if (err) {
@@ -2325,7 +2380,7 @@ int orinoco_up(struct orinoco_private *priv)
2325 } 2380 }
2326 2381
2327exit: 2382exit:
2328 spin_unlock_irqrestore(&priv->lock, flags); 2383 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
2329 2384
2330 return 0; 2385 return 0;
2331} 2386}
@@ -2337,7 +2392,7 @@ void orinoco_down(struct orinoco_private *priv)
2337 unsigned long flags; 2392 unsigned long flags;
2338 int err; 2393 int err;
2339 2394
2340 spin_lock_irqsave(&priv->lock, flags); 2395 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
2341 err = __orinoco_down(priv); 2396 err = __orinoco_down(priv);
2342 if (err) 2397 if (err)
2343 printk(KERN_WARNING "%s: Error %d downing interface\n", 2398 printk(KERN_WARNING "%s: Error %d downing interface\n",
@@ -2345,7 +2400,7 @@ void orinoco_down(struct orinoco_private *priv)
2345 2400
2346 netif_device_detach(dev); 2401 netif_device_detach(dev);
2347 priv->hw_unavailable++; 2402 priv->hw_unavailable++;
2348 spin_unlock_irqrestore(&priv->lock, flags); 2403 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
2349} 2404}
2350EXPORT_SYMBOL(orinoco_down); 2405EXPORT_SYMBOL(orinoco_down);
2351 2406
diff --git a/drivers/net/wireless/orinoco/main.h b/drivers/net/wireless/orinoco/main.h
index 21ab36cd76c7..4dadf9880a97 100644
--- a/drivers/net/wireless/orinoco/main.h
+++ b/drivers/net/wireless/orinoco/main.h
@@ -33,18 +33,6 @@ int orinoco_commit(struct orinoco_private *priv);
33void orinoco_reset(struct work_struct *work); 33void orinoco_reset(struct work_struct *work);
34 34
35/* Information element helpers - find a home for these... */ 35/* Information element helpers - find a home for these... */
36static inline u8 *orinoco_get_ie(u8 *data, size_t len,
37 enum ieee80211_eid eid)
38{
39 u8 *p = data;
40 while ((p + 2) < (data + len)) {
41 if (p[0] == eid)
42 return p;
43 p += p[1] + 2;
44 }
45 return NULL;
46}
47
48#define WPA_OUI_TYPE "\x00\x50\xF2\x01" 36#define WPA_OUI_TYPE "\x00\x50\xF2\x01"
49#define WPA_SELECTOR_LEN 4 37#define WPA_SELECTOR_LEN 4
50static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len) 38static inline u8 *orinoco_get_wpa_ie(u8 *data, size_t len)
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 665ef56f8382..a6da86e0a70f 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -131,6 +131,8 @@ struct orinoco_private {
131 u16 ap_density, rts_thresh; 131 u16 ap_density, rts_thresh;
132 u16 pm_on, pm_mcast, pm_period, pm_timeout; 132 u16 pm_on, pm_mcast, pm_period, pm_timeout;
133 u16 preamble; 133 u16 preamble;
134 u16 short_retry_limit, long_retry_limit;
135 u16 retry_lifetime;
134#ifdef WIRELESS_SPY 136#ifdef WIRELESS_SPY
135 struct iw_spy_data spy_data; /* iwspy support */ 137 struct iw_spy_data spy_data; /* iwspy support */
136 struct iw_public_data wireless_data; 138 struct iw_public_data wireless_data;
@@ -188,12 +190,30 @@ extern void free_orinocodev(struct orinoco_private *priv);
188extern int orinoco_init(struct orinoco_private *priv); 190extern int orinoco_init(struct orinoco_private *priv);
189extern int orinoco_if_add(struct orinoco_private *priv, 191extern int orinoco_if_add(struct orinoco_private *priv,
190 unsigned long base_addr, 192 unsigned long base_addr,
191 unsigned int irq); 193 unsigned int irq,
194 const struct net_device_ops *ops);
192extern void orinoco_if_del(struct orinoco_private *priv); 195extern void orinoco_if_del(struct orinoco_private *priv);
193extern int orinoco_up(struct orinoco_private *priv); 196extern int orinoco_up(struct orinoco_private *priv);
194extern void orinoco_down(struct orinoco_private *priv); 197extern void orinoco_down(struct orinoco_private *priv);
195extern irqreturn_t orinoco_interrupt(int irq, void *dev_id); 198extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
196 199
200extern void __orinoco_ev_info(struct net_device *dev, hermes_t *hw);
201extern void __orinoco_ev_rx(struct net_device *dev, hermes_t *hw);
202
203int orinoco_process_xmit_skb(struct sk_buff *skb,
204 struct net_device *dev,
205 struct orinoco_private *priv,
206 int *tx_control,
207 u8 *mic);
208
209/* Common ndo functions exported for reuse by orinoco_usb */
210int orinoco_open(struct net_device *dev);
211int orinoco_stop(struct net_device *dev);
212struct net_device_stats *orinoco_get_stats(struct net_device *dev);
213void orinoco_set_multicast_list(struct net_device *dev);
214int orinoco_change_mtu(struct net_device *dev, int new_mtu);
215void orinoco_tx_timeout(struct net_device *dev);
216
197/********************************************************************/ 217/********************************************************************/
198/* Locking and synchronization functions */ 218/* Locking and synchronization functions */
199/********************************************************************/ 219/********************************************************************/
@@ -201,11 +221,11 @@ extern irqreturn_t orinoco_interrupt(int irq, void *dev_id);
201static inline int orinoco_lock(struct orinoco_private *priv, 221static inline int orinoco_lock(struct orinoco_private *priv,
202 unsigned long *flags) 222 unsigned long *flags)
203{ 223{
204 spin_lock_irqsave(&priv->lock, *flags); 224 priv->hw.ops->lock_irqsave(&priv->lock, flags);
205 if (priv->hw_unavailable) { 225 if (priv->hw_unavailable) {
206 DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n", 226 DEBUG(1, "orinoco_lock() called with hw_unavailable (dev=%p)\n",
207 priv->ndev); 227 priv->ndev);
208 spin_unlock_irqrestore(&priv->lock, *flags); 228 priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
209 return -EBUSY; 229 return -EBUSY;
210 } 230 }
211 return 0; 231 return 0;
@@ -214,7 +234,17 @@ static inline int orinoco_lock(struct orinoco_private *priv,
214static inline void orinoco_unlock(struct orinoco_private *priv, 234static inline void orinoco_unlock(struct orinoco_private *priv,
215 unsigned long *flags) 235 unsigned long *flags)
216{ 236{
217 spin_unlock_irqrestore(&priv->lock, *flags); 237 priv->hw.ops->unlock_irqrestore(&priv->lock, flags);
238}
239
240static inline void orinoco_lock_irq(struct orinoco_private *priv)
241{
242 priv->hw.ops->lock_irq(&priv->lock);
243}
244
245static inline void orinoco_unlock_irq(struct orinoco_private *priv)
246{
247 priv->hw.ops->unlock_irq(&priv->lock);
218} 248}
219 249
220/*** Navigate from net_device to orinoco_private ***/ 250/*** Navigate from net_device to orinoco_private ***/
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 1d4ada188eda..b16d5db52a4d 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -50,7 +50,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
50 * struct orinoco_private */ 50 * struct orinoco_private */
51struct orinoco_pccard { 51struct orinoco_pccard {
52 struct pcmcia_device *p_dev; 52 struct pcmcia_device *p_dev;
53 dev_node_t node;
54 53
55 /* Used to handle hard reset */ 54 /* Used to handle hard reset */
56 /* yuck, we need this hack to work around the insanity of the 55 /* yuck, we need this hack to work around the insanity of the
@@ -119,10 +118,6 @@ orinoco_cs_probe(struct pcmcia_device *link)
119 card->p_dev = link; 118 card->p_dev = link;
120 link->priv = priv; 119 link->priv = priv;
121 120
122 /* Interrupt setup */
123 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
124 link->irq.Handler = orinoco_interrupt;
125
126 /* General socket configuration defaults can go here. In this 121 /* General socket configuration defaults can go here. In this
127 * client, we assume very little, and rely on the CIS for 122 * client, we assume very little, and rely on the CIS for
128 * almost everything. In most clients, many details (i.e., 123 * almost everything. In most clients, many details (i.e.,
@@ -144,8 +139,7 @@ static void orinoco_cs_detach(struct pcmcia_device *link)
144{ 139{
145 struct orinoco_private *priv = link->priv; 140 struct orinoco_private *priv = link->priv;
146 141
147 if (link->dev_node) 142 orinoco_if_del(priv);
148 orinoco_if_del(priv);
149 143
150 orinoco_cs_release(link); 144 orinoco_cs_release(link);
151 145
@@ -230,7 +224,6 @@ static int
230orinoco_cs_config(struct pcmcia_device *link) 224orinoco_cs_config(struct pcmcia_device *link)
231{ 225{
232 struct orinoco_private *priv = link->priv; 226 struct orinoco_private *priv = link->priv;
233 struct orinoco_pccard *card = priv->card;
234 hermes_t *hw = &priv->hw; 227 hermes_t *hw = &priv->hw;
235 int ret; 228 int ret;
236 void __iomem *mem; 229 void __iomem *mem;
@@ -258,12 +251,7 @@ orinoco_cs_config(struct pcmcia_device *link)
258 goto failed; 251 goto failed;
259 } 252 }
260 253
261 /* 254 ret = pcmcia_request_irq(link, orinoco_interrupt);
262 * Allocate an interrupt line. Note that this does not assign
263 * a handler to the interrupt, unless the 'Handler' member of
264 * the irq structure is initialized.
265 */
266 ret = pcmcia_request_irq(link, &link->irq);
267 if (ret) 255 if (ret)
268 goto failed; 256 goto failed;
269 257
@@ -285,9 +273,6 @@ orinoco_cs_config(struct pcmcia_device *link)
285 if (ret) 273 if (ret)
286 goto failed; 274 goto failed;
287 275
288 /* Ok, we have the configuration, prepare to register the netdev */
289 card->node.major = card->node.minor = 0;
290
291 /* Initialise the main driver */ 276 /* Initialise the main driver */
292 if (orinoco_init(priv) != 0) { 277 if (orinoco_init(priv) != 0) {
293 printk(KERN_ERR PFX "orinoco_init() failed\n"); 278 printk(KERN_ERR PFX "orinoco_init() failed\n");
@@ -296,17 +281,11 @@ orinoco_cs_config(struct pcmcia_device *link)
296 281
297 /* Register an interface with the stack */ 282 /* Register an interface with the stack */
298 if (orinoco_if_add(priv, link->io.BasePort1, 283 if (orinoco_if_add(priv, link->io.BasePort1,
299 link->irq.AssignedIRQ) != 0) { 284 link->irq, NULL) != 0) {
300 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 285 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
301 goto failed; 286 goto failed;
302 } 287 }
303 288
304 /* At this point, the dev_node_t structure(s) needs to be
305 * initialized and arranged in a linked list at link->dev_node. */
306 strcpy(card->node.dev_name, priv->ndev->name);
307 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
308 * used to indicate that the
309 * net_device has been registered */
310 return 0; 289 return 0;
311 290
312 failed: 291 failed:
@@ -327,9 +306,9 @@ orinoco_cs_release(struct pcmcia_device *link)
327 306
328 /* We're committed to taking the device away now, so mark the 307 /* We're committed to taking the device away now, so mark the
329 * hardware as unavailable */ 308 * hardware as unavailable */
330 spin_lock_irqsave(&priv->lock, flags); 309 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
331 priv->hw_unavailable++; 310 priv->hw_unavailable++;
332 spin_unlock_irqrestore(&priv->lock, flags); 311 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
333 312
334 pcmcia_disable_device(link); 313 pcmcia_disable_device(link);
335 if (priv->hw.iobase) 314 if (priv->hw.iobase)
@@ -374,87 +353,90 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
374 "Pavel Roskin <proski@gnu.org>, et al)"; 353 "Pavel Roskin <proski@gnu.org>, et al)";
375 354
376static struct pcmcia_device_id orinoco_cs_ids[] = { 355static struct pcmcia_device_id orinoco_cs_ids[] = {
377 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
378 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
379 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
380 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ 356 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
381 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
382 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
383 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */ 357 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
384 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ 358 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
385 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */ 359 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
386 PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
387 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
388 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ 360 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
389 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */ 361 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
390 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */ 362 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
391 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */ 363 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
392 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
393 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
394 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
395 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
396 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ 364 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
397 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ 365 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
398 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ 366 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
399 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ 367 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
368 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
369 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
370 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
371 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
372 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
373 PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
374 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
375 PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
376 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
377 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
378 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
379 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
380 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
381 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
382 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
383 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
384 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
385 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
386 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
387 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
388 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
389 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
390#ifdef CONFIG_HERMES_PRISM
391 /* Only entries that certainly identify Prism chipset */
392 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
393 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
394 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
395 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
396 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
397 PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
398 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
399 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
400 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
401 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
402 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
400 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ 403 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
401 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ 404 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
402 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */ 405 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
403 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */ 406 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
404 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */ 407 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
405 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */ 408 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
406 PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9), 409 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
407 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
408 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), 410 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
409 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2), 411 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
410 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
411 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
412 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
413 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
414 PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
415 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
416 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), 412 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
417 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), 413 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
418 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), 414 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
419 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), 415 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
420 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), 416 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
421 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), 417 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
418 PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
422 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), 419 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
423 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), 420 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
424 PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916), 421 PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
425 PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
426 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
427 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
428 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
429 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
430 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), 422 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
431 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77), 423 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
432 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf), 424 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
433 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
434 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), 425 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
435 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
436 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
437 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
438 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01), 426 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
439 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
440 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
441 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1), 427 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
442 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767), 428 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
443 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6), 429 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
444 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
445 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), 430 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
446 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178), 431 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
447 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
448 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
449 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
450 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757), 432 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
451 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), 433 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
452 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
453 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), 434 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
454 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092), 435 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
455 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2), 436 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
456 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b), 437 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
457 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39), 438 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
439#endif
458 PCMCIA_DEVICE_NULL, 440 PCMCIA_DEVICE_NULL,
459}; 441};
460MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); 442MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index 075f446b3139..bc3ea0b67a4f 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -220,7 +220,7 @@ static int orinoco_nortel_init_one(struct pci_dev *pdev,
220 goto fail; 220 goto fail;
221 } 221 }
222 222
223 err = orinoco_if_add(priv, 0, 0); 223 err = orinoco_if_add(priv, 0, 0, NULL);
224 if (err) { 224 if (err) {
225 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 225 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
226 goto fail; 226 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index bda5317cc596..468197f86673 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -170,7 +170,7 @@ static int orinoco_pci_init_one(struct pci_dev *pdev,
170 goto fail; 170 goto fail;
171 } 171 }
172 172
173 err = orinoco_if_add(priv, 0, 0); 173 err = orinoco_if_add(priv, 0, 0, NULL);
174 if (err) { 174 if (err) {
175 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 175 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
176 goto fail; 176 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index e0d5874ab42f..9358f4d2307b 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -259,7 +259,7 @@ static int orinoco_plx_init_one(struct pci_dev *pdev,
259 goto fail; 259 goto fail;
260 } 260 }
261 261
262 err = orinoco_if_add(priv, 0, 0); 262 err = orinoco_if_add(priv, 0, 0, NULL);
263 if (err) { 263 if (err) {
264 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 264 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
265 goto fail; 265 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index 88cbc7902aa0..784605f0af15 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -156,7 +156,7 @@ static int orinoco_tmd_init_one(struct pci_dev *pdev,
156 goto fail; 156 goto fail;
157 } 157 }
158 158
159 err = orinoco_if_add(priv, 0, 0); 159 err = orinoco_if_add(priv, 0, 0, NULL);
160 if (err) { 160 if (err) {
161 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 161 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
162 goto fail; 162 goto fail;
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
new file mode 100644
index 000000000000..78f089baa8c9
--- /dev/null
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -0,0 +1,1795 @@
1/*
2 * USB Orinoco driver
3 *
4 * Copyright (c) 2003 Manuel Estrada Sainz
5 *
6 * The contents of this file are subject to the Mozilla Public License
7 * Version 1.1 (the "License"); you may not use this file except in
8 * compliance with the License. You may obtain a copy of the License
9 * at http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS"
12 * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
13 * the License for the specific language governing rights and
14 * limitations under the License.
15 *
16 * Alternatively, the contents of this file may be used under the
17 * terms of the GNU General Public License version 2 (the "GPL"), in
18 * which case the provisions of the GPL are applicable instead of the
19 * above. If you wish to allow the use of your version of this file
20 * only under the terms of the GPL and not to allow others to use your
21 * version of this file under the MPL, indicate your decision by
22 * deleting the provisions above and replace them with the notice and
23 * other provisions required by the GPL. If you do not delete the
24 * provisions above, a recipient may use your version of this file
25 * under either the MPL or the GPL.
26 *
27 * Queueing code based on linux-wlan-ng 0.2.1-pre5
28 *
29 * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved.
30 *
31 * The license is the same as above.
32 *
33 * Initialy based on USB Skeleton driver - 0.7
34 *
35 * Copyright (c) 2001 Greg Kroah-Hartman (greg@kroah.com)
36 *
37 * This program is free software; you can redistribute it and/or
38 * modify it under the terms of the GNU General Public License as
39 * published by the Free Software Foundation; either version 2 of
40 * the License, or (at your option) any later version.
41 *
42 * NOTE: The original USB Skeleton driver is GPL, but all that code is
43 * gone so MPL/GPL applies.
44 */
45
46#define DRIVER_NAME "orinoco_usb"
47#define PFX DRIVER_NAME ": "
48
49#include <linux/module.h>
50#include <linux/kernel.h>
51#include <linux/sched.h>
52#include <linux/signal.h>
53#include <linux/errno.h>
54#include <linux/poll.h>
55#include <linux/init.h>
56#include <linux/slab.h>
57#include <linux/fcntl.h>
58#include <linux/spinlock.h>
59#include <linux/list.h>
60#include <linux/smp_lock.h>
61#include <linux/usb.h>
62#include <linux/timer.h>
63
64#include <linux/netdevice.h>
65#include <linux/if_arp.h>
66#include <linux/etherdevice.h>
67#include <linux/wireless.h>
68#include <linux/firmware.h>
69
70#include "mic.h"
71#include "orinoco.h"
72
73#ifndef URB_ASYNC_UNLINK
74#define URB_ASYNC_UNLINK 0
75#endif
76
77/* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */
78static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
79#define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2)
80
81struct header_struct {
82 /* 802.3 */
83 u8 dest[ETH_ALEN];
84 u8 src[ETH_ALEN];
85 __be16 len;
86 /* 802.2 */
87 u8 dsap;
88 u8 ssap;
89 u8 ctrl;
90 /* SNAP */
91 u8 oui[3];
92 __be16 ethertype;
93} __attribute__ ((packed));
94
95struct ez_usb_fw {
96 u16 size;
97 const u8 *code;
98};
99
100static struct ez_usb_fw firmware = {
101 .size = 0,
102 .code = NULL,
103};
104
105#ifdef CONFIG_USB_DEBUG
106static int debug = 1;
107#else
108static int debug;
109#endif
110
111/* Debugging macros */
112#undef dbg
113#define dbg(format, arg...) \
114 do { if (debug) printk(KERN_DEBUG PFX "%s: " format "\n", \
115 __func__ , ## arg); } while (0)
116#undef err
117#define err(format, arg...) \
118 do { printk(KERN_ERR PFX format "\n", ## arg); } while (0)
119
120/* Module paramaters */
121module_param(debug, int, 0644);
122MODULE_PARM_DESC(debug, "Debug enabled or not");
123
124MODULE_FIRMWARE("orinoco_ezusb_fw");
125
126/*
127 * Under some conditions, the card gets stuck and stops paying attention
128 * to the world (i.e. data communication stalls) until we do something to
129 * it. Sending an INQ_TALLIES command seems to be enough and should be
130 * harmless otherwise. This behaviour has been observed when using the
131 * driver on a systemimager client during installation. In the past a
132 * timer was used to send INQ_TALLIES commands when there was no other
133 * activity, but it was troublesome and was removed.
134 */
135
136#define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */
137#define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */
138#define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */
139#define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */
140
141#define USB_MELCO_VENDOR_ID 0x0411
142#define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */
143#define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */
144#define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */
145
146#define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */
147#define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */
148
149#define USB_AVAYA8_VENDOR_ID 0x0D98
150#define USB_AVAYAE_VENDOR_ID 0x0D9E
151#define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya Wireless USB Card */
152
153#define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */
154#define USB_AGERE_MODEL0801_ID 0x1000 /* Wireless USB Card Model 0801 */
155#define USB_AGERE_MODEL0802_ID 0x1001 /* Wireless USB Card Model 0802 */
156#define USB_AGERE_REBRANDED_ID 0x047A /* WLAN USB Card */
157
158#define USB_ELSA_VENDOR_ID 0x05CC
159#define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */
160
161#define USB_LEGEND_VENDOR_ID 0x0E7C
162#define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet WLAN USB Card */
163
164#define USB_SAMSUNG_VENDOR_ID 0x04E8
165#define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */
166#define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */
167#define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */
168
169#define USB_IGATE_VENDOR_ID 0x0681
170#define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */
171
172#define USB_FUJITSU_VENDOR_ID 0x0BF8
173#define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */
174
175#define USB_2WIRE_VENDOR_ID 0x1630
176#define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire Wireless USB adapter */
177
178
179#define EZUSB_REQUEST_FW_TRANS 0xA0
180#define EZUSB_REQUEST_TRIGER 0xAA
181#define EZUSB_REQUEST_TRIG_AC 0xAC
182#define EZUSB_CPUCS_REG 0x7F92
183
184#define EZUSB_RID_TX 0x0700
185#define EZUSB_RID_RX 0x0701
186#define EZUSB_RID_INIT1 0x0702
187#define EZUSB_RID_ACK 0x0710
188#define EZUSB_RID_READ_PDA 0x0800
189#define EZUSB_RID_PROG_INIT 0x0852
190#define EZUSB_RID_PROG_SET_ADDR 0x0853
191#define EZUSB_RID_PROG_BYTES 0x0854
192#define EZUSB_RID_PROG_END 0x0855
193#define EZUSB_RID_DOCMD 0x0860
194
195/* Recognize info frames */
196#define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF))
197
198#define EZUSB_MAGIC 0x0210
199
200#define EZUSB_FRAME_DATA 1
201#define EZUSB_FRAME_CONTROL 2
202
203#define DEF_TIMEOUT (3*HZ)
204
205#define BULK_BUF_SIZE 2048
206
207#define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet))
208
209#define FW_BUF_SIZE 64
210#define FW_VAR_OFFSET_PTR 0x359
211#define FW_VAR_VALUE 0
212#define FW_HOLE_START 0x100
213#define FW_HOLE_END 0x300
214
215struct ezusb_packet {
216 __le16 magic; /* 0x0210 */
217 u8 req_reply_count;
218 u8 ans_reply_count;
219 __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */
220 __le16 size; /* transport size */
221 __le16 crc; /* CRC up to here */
222 __le16 hermes_len;
223 __le16 hermes_rid;
224 u8 data[0];
225} __attribute__ ((packed));
226
227/* Table of devices that work or may work with this driver */
228static struct usb_device_id ezusb_table[] = {
229 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)},
230 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)},
231 {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)},
232 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)},
233 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)},
234 {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)},
235 {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)},
236 {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
237 {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)},
238 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)},
239 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)},
240 {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)},
241 {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)},
242 {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID,
243 0, 0)},
244 {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)},
245 {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)},
246 {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)},
247 {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)},
248 {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)},
249 {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)},
250 {} /* Terminating entry */
251};
252
253MODULE_DEVICE_TABLE(usb, ezusb_table);
254
255/* Structure to hold all of our device specific stuff */
256struct ezusb_priv {
257 struct usb_device *udev;
258 struct net_device *dev;
259 struct mutex mtx;
260 spinlock_t req_lock;
261 struct list_head req_pending;
262 struct list_head req_active;
263 spinlock_t reply_count_lock;
264 u16 hermes_reg_fake[0x40];
265 u8 *bap_buf;
266 struct urb *read_urb;
267 int read_pipe;
268 int write_pipe;
269 u8 reply_count;
270};
271
272enum ezusb_state {
273 EZUSB_CTX_START,
274 EZUSB_CTX_QUEUED,
275 EZUSB_CTX_REQ_SUBMITTED,
276 EZUSB_CTX_REQ_COMPLETE,
277 EZUSB_CTX_RESP_RECEIVED,
278 EZUSB_CTX_REQ_TIMEOUT,
279 EZUSB_CTX_REQ_FAILED,
280 EZUSB_CTX_RESP_TIMEOUT,
281 EZUSB_CTX_REQSUBMIT_FAIL,
282 EZUSB_CTX_COMPLETE,
283};
284
285struct request_context {
286 struct list_head list;
287 atomic_t refcount;
288 struct completion done; /* Signals that CTX is dead */
289 int killed;
290 struct urb *outurb; /* OUT for req pkt */
291 struct ezusb_priv *upriv;
292 struct ezusb_packet *buf;
293 int buf_length;
294 struct timer_list timer; /* Timeout handling */
295 enum ezusb_state state; /* Current state */
296 /* the RID that we will wait for */
297 u16 out_rid;
298 u16 in_rid;
299};
300
301
302/* Forward declarations */
303static void ezusb_ctx_complete(struct request_context *ctx);
304static void ezusb_req_queue_run(struct ezusb_priv *upriv);
305static void ezusb_bulk_in_callback(struct urb *urb);
306
307static inline u8 ezusb_reply_inc(u8 count)
308{
309 if (count < 0x7F)
310 return count + 1;
311 else
312 return 1;
313}
314
315static void ezusb_request_context_put(struct request_context *ctx)
316{
317 if (!atomic_dec_and_test(&ctx->refcount))
318 return;
319
320 WARN_ON(!ctx->done.done);
321 BUG_ON(ctx->outurb->status == -EINPROGRESS);
322 BUG_ON(timer_pending(&ctx->timer));
323 usb_free_urb(ctx->outurb);
324 kfree(ctx->buf);
325 kfree(ctx);
326}
327
328static inline void ezusb_mod_timer(struct ezusb_priv *upriv,
329 struct timer_list *timer,
330 unsigned long expire)
331{
332 if (!upriv->udev)
333 return;
334 mod_timer(timer, expire);
335}
336
337static void ezusb_request_timerfn(u_long _ctx)
338{
339 struct request_context *ctx = (void *) _ctx;
340
341 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
342 if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
343 ctx->state = EZUSB_CTX_REQ_TIMEOUT;
344 } else {
345 ctx->state = EZUSB_CTX_RESP_TIMEOUT;
346 dbg("couldn't unlink");
347 atomic_inc(&ctx->refcount);
348 ctx->killed = 1;
349 ezusb_ctx_complete(ctx);
350 ezusb_request_context_put(ctx);
351 }
352};
353
354static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv,
355 u16 out_rid, u16 in_rid)
356{
357 struct request_context *ctx;
358
359 ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC);
360 if (!ctx)
361 return NULL;
362
363 memset(ctx, 0, sizeof(*ctx));
364
365 ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC);
366 if (!ctx->buf) {
367 kfree(ctx);
368 return NULL;
369 }
370 ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC);
371 if (!ctx->outurb) {
372 kfree(ctx->buf);
373 kfree(ctx);
374 return NULL;
375 }
376
377 ctx->upriv = upriv;
378 ctx->state = EZUSB_CTX_START;
379 ctx->out_rid = out_rid;
380 ctx->in_rid = in_rid;
381
382 atomic_set(&ctx->refcount, 1);
383 init_completion(&ctx->done);
384
385 init_timer(&ctx->timer);
386 ctx->timer.function = ezusb_request_timerfn;
387 ctx->timer.data = (u_long) ctx;
388 return ctx;
389}
390
391
392/* Hopefully the real complete_all will soon be exported, in the mean
393 * while this should work. */
394static inline void ezusb_complete_all(struct completion *comp)
395{
396 complete(comp);
397 complete(comp);
398 complete(comp);
399 complete(comp);
400}
401
402static void ezusb_ctx_complete(struct request_context *ctx)
403{
404 struct ezusb_priv *upriv = ctx->upriv;
405 unsigned long flags;
406
407 spin_lock_irqsave(&upriv->req_lock, flags);
408
409 list_del_init(&ctx->list);
410 if (upriv->udev) {
411 spin_unlock_irqrestore(&upriv->req_lock, flags);
412 ezusb_req_queue_run(upriv);
413 spin_lock_irqsave(&upriv->req_lock, flags);
414 }
415
416 switch (ctx->state) {
417 case EZUSB_CTX_COMPLETE:
418 case EZUSB_CTX_REQSUBMIT_FAIL:
419 case EZUSB_CTX_REQ_FAILED:
420 case EZUSB_CTX_REQ_TIMEOUT:
421 case EZUSB_CTX_RESP_TIMEOUT:
422 spin_unlock_irqrestore(&upriv->req_lock, flags);
423
424 if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) {
425 struct net_device *dev = upriv->dev;
426 struct orinoco_private *priv = ndev_priv(dev);
427 struct net_device_stats *stats = &priv->stats;
428
429 if (ctx->state != EZUSB_CTX_COMPLETE)
430 stats->tx_errors++;
431 else
432 stats->tx_packets++;
433
434 netif_wake_queue(dev);
435 }
436 ezusb_complete_all(&ctx->done);
437 ezusb_request_context_put(ctx);
438 break;
439
440 default:
441 spin_unlock_irqrestore(&upriv->req_lock, flags);
442 if (!upriv->udev) {
443 /* This is normal, as all request contexts get flushed
444 * when the device is disconnected */
445 err("Called, CTX not terminating, but device gone");
446 ezusb_complete_all(&ctx->done);
447 ezusb_request_context_put(ctx);
448 break;
449 }
450
451 err("Called, CTX not in terminating state.");
452 /* Things are really bad if this happens. Just leak
453 * the CTX because it may still be linked to the
454 * queue or the OUT urb may still be active.
455 * Just leaking at least prevents an Oops or Panic.
456 */
457 break;
458 }
459}
460
461/**
462 * ezusb_req_queue_run:
463 * Description:
464 * Note: Only one active CTX at any one time, because there's no
465 * other (reliable) way to match the response URB to the correct
466 * CTX.
467 **/
468static void ezusb_req_queue_run(struct ezusb_priv *upriv)
469{
470 unsigned long flags;
471 struct request_context *ctx;
472 int result;
473
474 spin_lock_irqsave(&upriv->req_lock, flags);
475
476 if (!list_empty(&upriv->req_active))
477 goto unlock;
478
479 if (list_empty(&upriv->req_pending))
480 goto unlock;
481
482 ctx =
483 list_entry(upriv->req_pending.next, struct request_context,
484 list);
485
486 if (!ctx->upriv->udev)
487 goto unlock;
488
489 /* We need to split this off to avoid a race condition */
490 list_move_tail(&ctx->list, &upriv->req_active);
491
492 if (ctx->state == EZUSB_CTX_QUEUED) {
493 atomic_inc(&ctx->refcount);
494 result = usb_submit_urb(ctx->outurb, GFP_ATOMIC);
495 if (result) {
496 ctx->state = EZUSB_CTX_REQSUBMIT_FAIL;
497
498 spin_unlock_irqrestore(&upriv->req_lock, flags);
499
500 err("Fatal, failed to submit command urb."
501 " error=%d\n", result);
502
503 ezusb_ctx_complete(ctx);
504 ezusb_request_context_put(ctx);
505 goto done;
506 }
507
508 ctx->state = EZUSB_CTX_REQ_SUBMITTED;
509 ezusb_mod_timer(ctx->upriv, &ctx->timer,
510 jiffies + DEF_TIMEOUT);
511 }
512
513 unlock:
514 spin_unlock_irqrestore(&upriv->req_lock, flags);
515
516 done:
517 return;
518}
519
520static void ezusb_req_enqueue_run(struct ezusb_priv *upriv,
521 struct request_context *ctx)
522{
523 unsigned long flags;
524
525 spin_lock_irqsave(&upriv->req_lock, flags);
526
527 if (!ctx->upriv->udev) {
528 spin_unlock_irqrestore(&upriv->req_lock, flags);
529 goto done;
530 }
531 atomic_inc(&ctx->refcount);
532 list_add_tail(&ctx->list, &upriv->req_pending);
533 spin_unlock_irqrestore(&upriv->req_lock, flags);
534
535 ctx->state = EZUSB_CTX_QUEUED;
536 ezusb_req_queue_run(upriv);
537
538 done:
539 return;
540}
541
542static void ezusb_request_out_callback(struct urb *urb)
543{
544 unsigned long flags;
545 enum ezusb_state state;
546 struct request_context *ctx = urb->context;
547 struct ezusb_priv *upriv = ctx->upriv;
548
549 spin_lock_irqsave(&upriv->req_lock, flags);
550
551 del_timer(&ctx->timer);
552
553 if (ctx->killed) {
554 spin_unlock_irqrestore(&upriv->req_lock, flags);
555 pr_warning("interrupt called with dead ctx");
556 goto out;
557 }
558
559 state = ctx->state;
560
561 if (urb->status == 0) {
562 switch (state) {
563 case EZUSB_CTX_REQ_SUBMITTED:
564 if (ctx->in_rid) {
565 ctx->state = EZUSB_CTX_REQ_COMPLETE;
566 /* reply URB still pending */
567 ezusb_mod_timer(upriv, &ctx->timer,
568 jiffies + DEF_TIMEOUT);
569 spin_unlock_irqrestore(&upriv->req_lock,
570 flags);
571 break;
572 }
573 /* fall through */
574 case EZUSB_CTX_RESP_RECEIVED:
575 /* IN already received before this OUT-ACK */
576 ctx->state = EZUSB_CTX_COMPLETE;
577 spin_unlock_irqrestore(&upriv->req_lock, flags);
578 ezusb_ctx_complete(ctx);
579 break;
580
581 default:
582 spin_unlock_irqrestore(&upriv->req_lock, flags);
583 err("Unexpected state(0x%x, %d) in OUT URB",
584 state, urb->status);
585 break;
586 }
587 } else {
588 /* If someone cancels the OUT URB then its status
589 * should be either -ECONNRESET or -ENOENT.
590 */
591 switch (state) {
592 case EZUSB_CTX_REQ_SUBMITTED:
593 case EZUSB_CTX_RESP_RECEIVED:
594 ctx->state = EZUSB_CTX_REQ_FAILED;
595 /* fall through */
596
597 case EZUSB_CTX_REQ_FAILED:
598 case EZUSB_CTX_REQ_TIMEOUT:
599 spin_unlock_irqrestore(&upriv->req_lock, flags);
600
601 ezusb_ctx_complete(ctx);
602 break;
603
604 default:
605 spin_unlock_irqrestore(&upriv->req_lock, flags);
606
607 err("Unexpected state(0x%x, %d) in OUT URB",
608 state, urb->status);
609 break;
610 }
611 }
612 out:
613 ezusb_request_context_put(ctx);
614}
615
616static void ezusb_request_in_callback(struct ezusb_priv *upriv,
617 struct urb *urb)
618{
619 struct ezusb_packet *ans = urb->transfer_buffer;
620 struct request_context *ctx = NULL;
621 enum ezusb_state state;
622 unsigned long flags;
623
624 /* Find the CTX on the active queue that requested this URB */
625 spin_lock_irqsave(&upriv->req_lock, flags);
626 if (upriv->udev) {
627 struct list_head *item;
628
629 list_for_each(item, &upriv->req_active) {
630 struct request_context *c;
631 int reply_count;
632
633 c = list_entry(item, struct request_context, list);
634 reply_count =
635 ezusb_reply_inc(c->buf->req_reply_count);
636 if ((ans->ans_reply_count == reply_count)
637 && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) {
638 ctx = c;
639 break;
640 }
641 dbg("Skipped (0x%x/0x%x) (%d/%d)",
642 le16_to_cpu(ans->hermes_rid),
643 c->in_rid, ans->ans_reply_count, reply_count);
644 }
645 }
646
647 if (ctx == NULL) {
648 spin_unlock_irqrestore(&upriv->req_lock, flags);
649 err("%s: got unexpected RID: 0x%04X", __func__,
650 le16_to_cpu(ans->hermes_rid));
651 ezusb_req_queue_run(upriv);
652 return;
653 }
654
655 /* The data we want is in the in buffer, exchange */
656 urb->transfer_buffer = ctx->buf;
657 ctx->buf = (void *) ans;
658 ctx->buf_length = urb->actual_length;
659
660 state = ctx->state;
661 switch (state) {
662 case EZUSB_CTX_REQ_SUBMITTED:
663 /* We have received our response URB before
664 * our request has been acknowledged. Do NOT
665 * destroy our CTX yet, because our OUT URB
666 * is still alive ...
667 */
668 ctx->state = EZUSB_CTX_RESP_RECEIVED;
669 spin_unlock_irqrestore(&upriv->req_lock, flags);
670
671 /* Let the machine continue running. */
672 break;
673
674 case EZUSB_CTX_REQ_COMPLETE:
675 /* This is the usual path: our request
676 * has already been acknowledged, and
677 * we have now received the reply.
678 */
679 ctx->state = EZUSB_CTX_COMPLETE;
680
681 /* Stop the intimer */
682 del_timer(&ctx->timer);
683 spin_unlock_irqrestore(&upriv->req_lock, flags);
684
685 /* Call the completion handler */
686 ezusb_ctx_complete(ctx);
687 break;
688
689 default:
690 spin_unlock_irqrestore(&upriv->req_lock, flags);
691
692 pr_warning("Matched IN URB, unexpected context state(0x%x)",
693 state);
694 /* Throw this CTX away and try submitting another */
695 del_timer(&ctx->timer);
696 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
697 usb_unlink_urb(ctx->outurb);
698 ezusb_req_queue_run(upriv);
699 break;
700 } /* switch */
701}
702
703
704static void ezusb_req_ctx_wait(struct ezusb_priv *upriv,
705 struct request_context *ctx)
706{
707 switch (ctx->state) {
708 case EZUSB_CTX_QUEUED:
709 case EZUSB_CTX_REQ_SUBMITTED:
710 case EZUSB_CTX_REQ_COMPLETE:
711 case EZUSB_CTX_RESP_RECEIVED:
712 if (in_softirq()) {
713 /* If we get called from a timer, timeout timers don't
714 * get the chance to run themselves. So we make sure
715 * that we don't sleep for ever */
716 int msecs = DEF_TIMEOUT * (1000 / HZ);
717 while (!ctx->done.done && msecs--)
718 udelay(1000);
719 } else {
720 wait_event_interruptible(ctx->done.wait,
721 ctx->done.done);
722 }
723 break;
724 default:
725 /* Done or failed - nothing to wait for */
726 break;
727 }
728}
729
730static inline u16 build_crc(struct ezusb_packet *data)
731{
732 u16 crc = 0;
733 u8 *bytes = (u8 *)data;
734 int i;
735
736 for (i = 0; i < 8; i++)
737 crc = (crc << 1) + bytes[i];
738
739 return crc;
740}
741
742/**
743 * ezusb_fill_req:
744 *
745 * if data == NULL and length > 0 the data is assumed to be already in
746 * the target buffer and only the header is filled.
747 *
748 */
749static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid,
750 const void *data, u16 frame_type, u8 reply_count)
751{
752 int total_size = sizeof(*req) + length;
753
754 BUG_ON(total_size > BULK_BUF_SIZE);
755
756 req->magic = cpu_to_le16(EZUSB_MAGIC);
757 req->req_reply_count = reply_count;
758 req->ans_reply_count = 0;
759 req->frame_type = cpu_to_le16(frame_type);
760 req->size = cpu_to_le16(length + 4);
761 req->crc = cpu_to_le16(build_crc(req));
762 req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length));
763 req->hermes_rid = cpu_to_le16(rid);
764 if (data)
765 memcpy(req->data, data, length);
766 return total_size;
767}
768
769static int ezusb_submit_in_urb(struct ezusb_priv *upriv)
770{
771 int retval = 0;
772 void *cur_buf = upriv->read_urb->transfer_buffer;
773
774 if (upriv->read_urb->status == -EINPROGRESS) {
775 dbg("urb busy, not resubmiting");
776 retval = -EBUSY;
777 goto exit;
778 }
779 usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe,
780 cur_buf, BULK_BUF_SIZE,
781 ezusb_bulk_in_callback, upriv);
782 upriv->read_urb->transfer_flags = 0;
783 retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC);
784 if (retval)
785 err("%s submit failed %d", __func__, retval);
786
787 exit:
788 return retval;
789}
790
791static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset)
792{
793 u8 res_val = reset; /* avoid argument promotion */
794
795 if (!upriv->udev) {
796 err("%s: !upriv->udev", __func__);
797 return -EFAULT;
798 }
799 return usb_control_msg(upriv->udev,
800 usb_sndctrlpipe(upriv->udev, 0),
801 EZUSB_REQUEST_FW_TRANS,
802 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
803 USB_DIR_OUT, EZUSB_CPUCS_REG, 0, &res_val,
804 sizeof(res_val), DEF_TIMEOUT);
805}
806
807static int ezusb_firmware_download(struct ezusb_priv *upriv,
808 struct ez_usb_fw *fw)
809{
810 u8 fw_buffer[FW_BUF_SIZE];
811 int retval, addr;
812 int variant_offset;
813
814 /*
815 * This byte is 1 and should be replaced with 0. The offset is
816 * 0x10AD in version 0.0.6. The byte in question should follow
817 * the end of the code pointed to by the jump in the beginning
818 * of the firmware. Also, it is read by code located at 0x358.
819 */
820 variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]);
821 if (variant_offset >= fw->size) {
822 printk(KERN_ERR PFX "Invalid firmware variant offset: "
823 "0x%04x\n", variant_offset);
824 retval = -EINVAL;
825 goto fail;
826 }
827
828 retval = ezusb_8051_cpucs(upriv, 1);
829 if (retval < 0)
830 goto fail;
831 for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) {
832 /* 0x100-0x300 should be left alone, it contains card
833 * specific data, like USB enumeration information */
834 if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END))
835 continue;
836
837 memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE);
838 if (variant_offset >= addr &&
839 variant_offset < addr + FW_BUF_SIZE) {
840 dbg("Patching card_variant byte at 0x%04X",
841 variant_offset);
842 fw_buffer[variant_offset - addr] = FW_VAR_VALUE;
843 }
844 retval = usb_control_msg(upriv->udev,
845 usb_sndctrlpipe(upriv->udev, 0),
846 EZUSB_REQUEST_FW_TRANS,
847 USB_TYPE_VENDOR | USB_RECIP_DEVICE
848 | USB_DIR_OUT,
849 addr, 0x0,
850 fw_buffer, FW_BUF_SIZE,
851 DEF_TIMEOUT);
852
853 if (retval < 0)
854 goto fail;
855 }
856 retval = ezusb_8051_cpucs(upriv, 0);
857 if (retval < 0)
858 goto fail;
859
860 goto exit;
861 fail:
862 printk(KERN_ERR PFX "Firmware download failed, error %d\n",
863 retval);
864 exit:
865 return retval;
866}
867
868static int ezusb_access_ltv(struct ezusb_priv *upriv,
869 struct request_context *ctx,
870 u16 length, const void *data, u16 frame_type,
871 void *ans_buff, int ans_size, u16 *ans_length)
872{
873 int req_size;
874 int retval = 0;
875 enum ezusb_state state;
876
877 BUG_ON(in_irq());
878
879 if (!upriv->udev) {
880 dbg("Device disconnected");
881 return -ENODEV;
882 }
883
884 if (upriv->read_urb->status != -EINPROGRESS)
885 err("%s: in urb not pending", __func__);
886
887 /* protect upriv->reply_count, guarantee sequential numbers */
888 spin_lock_bh(&upriv->reply_count_lock);
889 req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data,
890 frame_type, upriv->reply_count);
891 usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe,
892 ctx->buf, req_size,
893 ezusb_request_out_callback, ctx);
894
895 if (ctx->in_rid)
896 upriv->reply_count = ezusb_reply_inc(upriv->reply_count);
897
898 ezusb_req_enqueue_run(upriv, ctx);
899
900 spin_unlock_bh(&upriv->reply_count_lock);
901
902 if (ctx->in_rid)
903 ezusb_req_ctx_wait(upriv, ctx);
904
905 state = ctx->state;
906 switch (state) {
907 case EZUSB_CTX_COMPLETE:
908 retval = ctx->outurb->status;
909 break;
910
911 case EZUSB_CTX_QUEUED:
912 case EZUSB_CTX_REQ_SUBMITTED:
913 if (!ctx->in_rid)
914 break;
915 default:
916 err("%s: Unexpected context state %d", __func__,
917 state);
918 /* fall though */
919 case EZUSB_CTX_REQ_TIMEOUT:
920 case EZUSB_CTX_REQ_FAILED:
921 case EZUSB_CTX_RESP_TIMEOUT:
922 case EZUSB_CTX_REQSUBMIT_FAIL:
923 printk(KERN_ERR PFX "Access failed, resetting (state %d,"
924 " reply_count %d)\n", state, upriv->reply_count);
925 upriv->reply_count = 0;
926 if (state == EZUSB_CTX_REQ_TIMEOUT
927 || state == EZUSB_CTX_RESP_TIMEOUT) {
928 printk(KERN_ERR PFX "ctx timed out\n");
929 retval = -ETIMEDOUT;
930 } else {
931 printk(KERN_ERR PFX "ctx failed\n");
932 retval = -EFAULT;
933 }
934 goto exit;
935 break;
936 }
937 if (ctx->in_rid) {
938 struct ezusb_packet *ans = ctx->buf;
939 int exp_len;
940
941 if (ans->hermes_len != 0)
942 exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12;
943 else
944 exp_len = 14;
945
946 if (exp_len != ctx->buf_length) {
947 err("%s: length mismatch for RID 0x%04x: "
948 "expected %d, got %d", __func__,
949 ctx->in_rid, exp_len, ctx->buf_length);
950 retval = -EIO;
951 goto exit;
952 }
953
954 if (ans_buff)
955 memcpy(ans_buff, ans->data,
956 min_t(int, exp_len, ans_size));
957 if (ans_length)
958 *ans_length = le16_to_cpu(ans->hermes_len);
959 }
960 exit:
961 ezusb_request_context_put(ctx);
962 return retval;
963}
964
965static int ezusb_write_ltv(hermes_t *hw, int bap, u16 rid,
966 u16 length, const void *data)
967{
968 struct ezusb_priv *upriv = hw->priv;
969 u16 frame_type;
970 struct request_context *ctx;
971
972 if (length == 0)
973 return -EINVAL;
974
975 length = HERMES_RECLEN_TO_BYTES(length);
976
977 /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be
978 * set to be empty, but the USB bridge doesn't like it */
979 if (length == 0)
980 return 0;
981
982 ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK);
983 if (!ctx)
984 return -ENOMEM;
985
986 if (rid == EZUSB_RID_TX)
987 frame_type = EZUSB_FRAME_DATA;
988 else
989 frame_type = EZUSB_FRAME_CONTROL;
990
991 return ezusb_access_ltv(upriv, ctx, length, data, frame_type,
992 NULL, 0, NULL);
993}
994
995static int ezusb_read_ltv(hermes_t *hw, int bap, u16 rid,
996 unsigned bufsize, u16 *length, void *buf)
997{
998 struct ezusb_priv *upriv = hw->priv;
999 struct request_context *ctx;
1000
1001 if ((bufsize < 0) || (bufsize % 2))
1002 return -EINVAL;
1003
1004 ctx = ezusb_alloc_ctx(upriv, rid, rid);
1005 if (!ctx)
1006 return -ENOMEM;
1007
1008 return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL,
1009 buf, bufsize, length);
1010}
1011
1012static int ezusb_doicmd_wait(hermes_t *hw, u16 cmd, u16 parm0, u16 parm1,
1013 u16 parm2, struct hermes_response *resp)
1014{
1015 struct ezusb_priv *upriv = hw->priv;
1016 struct request_context *ctx;
1017
1018 __le16 data[4] = {
1019 cpu_to_le16(cmd),
1020 cpu_to_le16(parm0),
1021 cpu_to_le16(parm1),
1022 cpu_to_le16(parm2),
1023 };
1024 dbg("0x%04X, parm0 0x%04X, parm1 0x%04X, parm2 0x%04X",
1025 cmd, parm0, parm1, parm2);
1026 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
1027 if (!ctx)
1028 return -ENOMEM;
1029
1030 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1031 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1032}
1033
1034static int ezusb_docmd_wait(hermes_t *hw, u16 cmd, u16 parm0,
1035 struct hermes_response *resp)
1036{
1037 struct ezusb_priv *upriv = hw->priv;
1038 struct request_context *ctx;
1039
1040 __le16 data[4] = {
1041 cpu_to_le16(cmd),
1042 cpu_to_le16(parm0),
1043 0,
1044 0,
1045 };
1046 dbg("0x%04X, parm0 0x%04X", cmd, parm0);
1047 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK);
1048 if (!ctx)
1049 return -ENOMEM;
1050
1051 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1052 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1053}
1054
1055static int ezusb_bap_pread(struct hermes *hw, int bap,
1056 void *buf, int len, u16 id, u16 offset)
1057{
1058 struct ezusb_priv *upriv = hw->priv;
1059 struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer;
1060 int actual_length = upriv->read_urb->actual_length;
1061
1062 if (id == EZUSB_RID_RX) {
1063 if ((sizeof(*ans) + offset + len) > actual_length) {
1064 printk(KERN_ERR PFX "BAP read beyond buffer end "
1065 "in rx frame\n");
1066 return -EINVAL;
1067 }
1068 memcpy(buf, ans->data + offset, len);
1069 return 0;
1070 }
1071
1072 if (EZUSB_IS_INFO(id)) {
1073 /* Include 4 bytes for length/type */
1074 if ((sizeof(*ans) + offset + len - 4) > actual_length) {
1075 printk(KERN_ERR PFX "BAP read beyond buffer end "
1076 "in info frame\n");
1077 return -EFAULT;
1078 }
1079 memcpy(buf, ans->data + offset - 4, len);
1080 } else {
1081 printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id);
1082 return -EINVAL;
1083 }
1084
1085 return 0;
1086}
1087
1088static int ezusb_read_pda(struct hermes *hw, __le16 *pda,
1089 u32 pda_addr, u16 pda_len)
1090{
1091 struct ezusb_priv *upriv = hw->priv;
1092 struct request_context *ctx;
1093 __le16 data[] = {
1094 cpu_to_le16(pda_addr & 0xffff),
1095 cpu_to_le16(pda_len - 4)
1096 };
1097 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA);
1098 if (!ctx)
1099 return -ENOMEM;
1100
1101 /* wl_lkm does not include PDA size in the PDA area.
1102 * We will pad the information into pda, so other routines
1103 * don't have to be modified */
1104 pda[0] = cpu_to_le16(pda_len - 2);
1105 /* Includes CFG_PROD_DATA but not itself */
1106 pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */
1107
1108 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1109 EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4,
1110 NULL);
1111}
1112
1113static int ezusb_program_init(struct hermes *hw, u32 entry_point)
1114{
1115 struct ezusb_priv *upriv = hw->priv;
1116 struct request_context *ctx;
1117 __le32 data = cpu_to_le32(entry_point);
1118
1119 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK);
1120 if (!ctx)
1121 return -ENOMEM;
1122
1123 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1124 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1125}
1126
1127static int ezusb_program_end(struct hermes *hw)
1128{
1129 struct ezusb_priv *upriv = hw->priv;
1130 struct request_context *ctx;
1131
1132 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK);
1133 if (!ctx)
1134 return -ENOMEM;
1135
1136 return ezusb_access_ltv(upriv, ctx, 0, NULL,
1137 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1138}
1139
1140static int ezusb_program_bytes(struct hermes *hw, const char *buf,
1141 u32 addr, u32 len)
1142{
1143 struct ezusb_priv *upriv = hw->priv;
1144 struct request_context *ctx;
1145 __le32 data = cpu_to_le32(addr);
1146 int err;
1147
1148 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK);
1149 if (!ctx)
1150 return -ENOMEM;
1151
1152 err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data,
1153 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1154 if (err)
1155 return err;
1156
1157 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK);
1158 if (!ctx)
1159 return -ENOMEM;
1160
1161 return ezusb_access_ltv(upriv, ctx, len, buf,
1162 EZUSB_FRAME_CONTROL, NULL, 0, NULL);
1163}
1164
1165static int ezusb_program(struct hermes *hw, const char *buf,
1166 u32 addr, u32 len)
1167{
1168 u32 ch_addr;
1169 u32 ch_len;
1170 int err = 0;
1171
1172 /* We can only send 2048 bytes out of the bulk xmit at a time,
1173 * so we have to split any programming into chunks of <2048
1174 * bytes. */
1175
1176 ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE;
1177 ch_addr = addr;
1178
1179 while (ch_addr < (addr + len)) {
1180 pr_debug("Programming subblock of length %d "
1181 "to address 0x%08x. Data @ %p\n",
1182 ch_len, ch_addr, &buf[ch_addr - addr]);
1183
1184 err = ezusb_program_bytes(hw, &buf[ch_addr - addr],
1185 ch_addr, ch_len);
1186 if (err)
1187 break;
1188
1189 ch_addr += ch_len;
1190 ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ?
1191 (addr + len - ch_addr) : MAX_DL_SIZE;
1192 }
1193
1194 return err;
1195}
1196
1197static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
1198{
1199 struct orinoco_private *priv = ndev_priv(dev);
1200 struct net_device_stats *stats = &priv->stats;
1201 struct ezusb_priv *upriv = priv->card;
1202 u8 mic[MICHAEL_MIC_LEN+1];
1203 int err = 0;
1204 int tx_control;
1205 unsigned long flags;
1206 struct request_context *ctx;
1207 u8 *buf;
1208 int tx_size;
1209
1210 if (!netif_running(dev)) {
1211 printk(KERN_ERR "%s: Tx on stopped device!\n",
1212 dev->name);
1213 return NETDEV_TX_BUSY;
1214 }
1215
1216 if (netif_queue_stopped(dev)) {
1217 printk(KERN_DEBUG "%s: Tx while transmitter busy!\n",
1218 dev->name);
1219 return NETDEV_TX_BUSY;
1220 }
1221
1222 if (orinoco_lock(priv, &flags) != 0) {
1223 printk(KERN_ERR
1224 "%s: ezusb_xmit() called while hw_unavailable\n",
1225 dev->name);
1226 return NETDEV_TX_BUSY;
1227 }
1228
1229 if (!netif_carrier_ok(dev) ||
1230 (priv->iw_mode == NL80211_IFTYPE_MONITOR)) {
1231 /* Oops, the firmware hasn't established a connection,
1232 silently drop the packet (this seems to be the
1233 safest approach). */
1234 goto drop;
1235 }
1236
1237 /* Check packet length */
1238 if (skb->len < ETH_HLEN)
1239 goto drop;
1240
1241 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0);
1242 if (!ctx)
1243 goto busy;
1244
1245 memset(ctx->buf, 0, BULK_BUF_SIZE);
1246 buf = ctx->buf->data;
1247
1248 tx_control = 0;
1249
1250 err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control,
1251 &mic[0]);
1252 if (err)
1253 goto drop;
1254
1255 {
1256 __le16 *tx_cntl = (__le16 *)buf;
1257 *tx_cntl = cpu_to_le16(tx_control);
1258 buf += sizeof(*tx_cntl);
1259 }
1260
1261 memcpy(buf, skb->data, skb->len);
1262 buf += skb->len;
1263
1264 if (tx_control & HERMES_TXCTRL_MIC) {
1265 u8 *m = mic;
1266 /* Mic has been offset so it can be copied to an even
1267 * address. We're copying eveything anyway, so we
1268 * don't need to copy that first byte. */
1269 if (skb->len % 2)
1270 m++;
1271 memcpy(buf, m, MICHAEL_MIC_LEN);
1272 buf += MICHAEL_MIC_LEN;
1273 }
1274
1275 /* Finally, we actually initiate the send */
1276 netif_stop_queue(dev);
1277
1278 /* The card may behave better if we send evenly sized usb transfers */
1279 tx_size = ALIGN(buf - ctx->buf->data, 2);
1280
1281 err = ezusb_access_ltv(upriv, ctx, tx_size, NULL,
1282 EZUSB_FRAME_DATA, NULL, 0, NULL);
1283
1284 if (err) {
1285 netif_start_queue(dev);
1286 if (net_ratelimit())
1287 printk(KERN_ERR "%s: Error %d transmitting packet\n",
1288 dev->name, err);
1289 goto busy;
1290 }
1291
1292 dev->trans_start = jiffies;
1293 stats->tx_bytes += skb->len;
1294 goto ok;
1295
1296 drop:
1297 stats->tx_errors++;
1298 stats->tx_dropped++;
1299
1300 ok:
1301 orinoco_unlock(priv, &flags);
1302 dev_kfree_skb(skb);
1303 return NETDEV_TX_OK;
1304
1305 busy:
1306 orinoco_unlock(priv, &flags);
1307 return NETDEV_TX_BUSY;
1308}
1309
1310static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid)
1311{
1312 *fid = EZUSB_RID_TX;
1313 return 0;
1314}
1315
1316
1317static int ezusb_hard_reset(struct orinoco_private *priv)
1318{
1319 struct ezusb_priv *upriv = priv->card;
1320 int retval = ezusb_8051_cpucs(upriv, 1);
1321
1322 if (retval < 0) {
1323 err("Failed to reset");
1324 return retval;
1325 }
1326
1327 retval = ezusb_8051_cpucs(upriv, 0);
1328 if (retval < 0) {
1329 err("Failed to unreset");
1330 return retval;
1331 }
1332
1333 dbg("sending control message");
1334 retval = usb_control_msg(upriv->udev,
1335 usb_sndctrlpipe(upriv->udev, 0),
1336 EZUSB_REQUEST_TRIGER,
1337 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1338 USB_DIR_OUT, 0x0, 0x0, NULL, 0,
1339 DEF_TIMEOUT);
1340 if (retval < 0) {
1341 err("EZUSB_REQUEST_TRIGER failed retval %d", retval);
1342 return retval;
1343 }
1344#if 0
1345 dbg("Sending EZUSB_REQUEST_TRIG_AC");
1346 retval = usb_control_msg(upriv->udev,
1347 usb_sndctrlpipe(upriv->udev, 0),
1348 EZUSB_REQUEST_TRIG_AC,
1349 USB_TYPE_VENDOR | USB_RECIP_DEVICE |
1350 USB_DIR_OUT, 0x00FA, 0x0, NULL, 0,
1351 DEF_TIMEOUT);
1352 if (retval < 0) {
1353 err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval);
1354 return retval;
1355 }
1356#endif
1357
1358 return 0;
1359}
1360
1361
1362static int ezusb_init(hermes_t *hw)
1363{
1364 struct ezusb_priv *upriv = hw->priv;
1365 int retval;
1366
1367 BUG_ON(in_interrupt());
1368 BUG_ON(!upriv);
1369
1370 upriv->reply_count = 0;
1371 /* Write the MAGIC number on the simulated registers to keep
1372 * orinoco.c happy */
1373 hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC);
1374 hermes_write_regn(hw, RXFID, EZUSB_RID_RX);
1375
1376 usb_kill_urb(upriv->read_urb);
1377 ezusb_submit_in_urb(upriv);
1378
1379 retval = ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1,
1380 HERMES_BYTES_TO_RECLEN(2), "\x10\x00");
1381 if (retval < 0) {
1382 printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval);
1383 return retval;
1384 }
1385
1386 retval = ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL);
1387 if (retval < 0) {
1388 printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval);
1389 return retval;
1390 }
1391
1392 return 0;
1393}
1394
1395static void ezusb_bulk_in_callback(struct urb *urb)
1396{
1397 struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context;
1398 struct ezusb_packet *ans = urb->transfer_buffer;
1399 u16 crc;
1400 u16 hermes_rid;
1401
1402 if (upriv->udev == NULL) {
1403 dbg("disconnected");
1404 return;
1405 }
1406
1407 if (urb->status == -ETIMEDOUT) {
1408 /* When a device gets unplugged we get this every time
1409 * we resubmit, flooding the logs. Since we don't use
1410 * USB timeouts, it shouldn't happen any other time*/
1411 pr_warning("%s: urb timed out, not resubmiting", __func__);
1412 return;
1413 }
1414 if (urb->status == -ECONNABORTED) {
1415 pr_warning("%s: connection abort, resubmiting urb",
1416 __func__);
1417 goto resubmit;
1418 }
1419 if ((urb->status == -EILSEQ)
1420 || (urb->status == -ENOENT)
1421 || (urb->status == -ECONNRESET)) {
1422 dbg("status %d, not resubmiting", urb->status);
1423 return;
1424 }
1425 if (urb->status)
1426 dbg("status: %d length: %d",
1427 urb->status, urb->actual_length);
1428 if (urb->actual_length < sizeof(*ans)) {
1429 err("%s: short read, ignoring", __func__);
1430 goto resubmit;
1431 }
1432 crc = build_crc(ans);
1433 if (le16_to_cpu(ans->crc) != crc) {
1434 err("CRC error, ignoring packet");
1435 goto resubmit;
1436 }
1437
1438 hermes_rid = le16_to_cpu(ans->hermes_rid);
1439 if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) {
1440 ezusb_request_in_callback(upriv, urb);
1441 } else if (upriv->dev) {
1442 struct net_device *dev = upriv->dev;
1443 struct orinoco_private *priv = ndev_priv(dev);
1444 hermes_t *hw = &priv->hw;
1445
1446 if (hermes_rid == EZUSB_RID_RX) {
1447 __orinoco_ev_rx(dev, hw);
1448 } else {
1449 hermes_write_regn(hw, INFOFID,
1450 le16_to_cpu(ans->hermes_rid));
1451 __orinoco_ev_info(dev, hw);
1452 }
1453 }
1454
1455 resubmit:
1456 if (upriv->udev)
1457 ezusb_submit_in_urb(upriv);
1458}
1459
1460static inline void ezusb_delete(struct ezusb_priv *upriv)
1461{
1462 struct net_device *dev;
1463 struct list_head *item;
1464 struct list_head *tmp_item;
1465 unsigned long flags;
1466
1467 BUG_ON(in_interrupt());
1468 BUG_ON(!upriv);
1469
1470 dev = upriv->dev;
1471 mutex_lock(&upriv->mtx);
1472
1473 upriv->udev = NULL; /* No timer will be rearmed from here */
1474
1475 usb_kill_urb(upriv->read_urb);
1476
1477 spin_lock_irqsave(&upriv->req_lock, flags);
1478 list_for_each_safe(item, tmp_item, &upriv->req_active) {
1479 struct request_context *ctx;
1480 int err;
1481
1482 ctx = list_entry(item, struct request_context, list);
1483 atomic_inc(&ctx->refcount);
1484
1485 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
1486 err = usb_unlink_urb(ctx->outurb);
1487
1488 spin_unlock_irqrestore(&upriv->req_lock, flags);
1489 if (err == -EINPROGRESS)
1490 wait_for_completion(&ctx->done);
1491
1492 del_timer_sync(&ctx->timer);
1493 /* FIXME: there is an slight chance for the irq handler to
1494 * be running */
1495 if (!list_empty(&ctx->list))
1496 ezusb_ctx_complete(ctx);
1497
1498 ezusb_request_context_put(ctx);
1499 spin_lock_irqsave(&upriv->req_lock, flags);
1500 }
1501 spin_unlock_irqrestore(&upriv->req_lock, flags);
1502
1503 list_for_each_safe(item, tmp_item, &upriv->req_pending)
1504 ezusb_ctx_complete(list_entry(item,
1505 struct request_context, list));
1506
1507 if (upriv->read_urb->status == -EINPROGRESS)
1508 printk(KERN_ERR PFX "Some URB in progress\n");
1509
1510 mutex_unlock(&upriv->mtx);
1511
1512 kfree(upriv->read_urb->transfer_buffer);
1513 if (upriv->bap_buf != NULL)
1514 kfree(upriv->bap_buf);
1515 if (upriv->read_urb != NULL)
1516 usb_free_urb(upriv->read_urb);
1517 if (upriv->dev) {
1518 struct orinoco_private *priv = ndev_priv(upriv->dev);
1519 orinoco_if_del(priv);
1520 free_orinocodev(priv);
1521 }
1522}
1523
1524static void ezusb_lock_irqsave(spinlock_t *lock,
1525 unsigned long *flags) __acquires(lock)
1526{
1527 spin_lock_bh(lock);
1528}
1529
1530static void ezusb_unlock_irqrestore(spinlock_t *lock,
1531 unsigned long *flags) __releases(lock)
1532{
1533 spin_unlock_bh(lock);
1534}
1535
1536static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock)
1537{
1538 spin_lock_bh(lock);
1539}
1540
1541static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock)
1542{
1543 spin_unlock_bh(lock);
1544}
1545
1546static const struct hermes_ops ezusb_ops = {
1547 .init = ezusb_init,
1548 .cmd_wait = ezusb_docmd_wait,
1549 .init_cmd_wait = ezusb_doicmd_wait,
1550 .allocate = ezusb_allocate,
1551 .read_ltv = ezusb_read_ltv,
1552 .write_ltv = ezusb_write_ltv,
1553 .bap_pread = ezusb_bap_pread,
1554 .read_pda = ezusb_read_pda,
1555 .program_init = ezusb_program_init,
1556 .program_end = ezusb_program_end,
1557 .program = ezusb_program,
1558 .lock_irqsave = ezusb_lock_irqsave,
1559 .unlock_irqrestore = ezusb_unlock_irqrestore,
1560 .lock_irq = ezusb_lock_irq,
1561 .unlock_irq = ezusb_unlock_irq,
1562};
1563
1564static const struct net_device_ops ezusb_netdev_ops = {
1565 .ndo_open = orinoco_open,
1566 .ndo_stop = orinoco_stop,
1567 .ndo_start_xmit = ezusb_xmit,
1568 .ndo_set_multicast_list = orinoco_set_multicast_list,
1569 .ndo_change_mtu = orinoco_change_mtu,
1570 .ndo_set_mac_address = eth_mac_addr,
1571 .ndo_validate_addr = eth_validate_addr,
1572 .ndo_tx_timeout = orinoco_tx_timeout,
1573 .ndo_get_stats = orinoco_get_stats,
1574};
1575
1576static int ezusb_probe(struct usb_interface *interface,
1577 const struct usb_device_id *id)
1578{
1579 struct usb_device *udev = interface_to_usbdev(interface);
1580 struct orinoco_private *priv;
1581 hermes_t *hw;
1582 struct ezusb_priv *upriv = NULL;
1583 struct usb_interface_descriptor *iface_desc;
1584 struct usb_endpoint_descriptor *ep;
1585 const struct firmware *fw_entry;
1586 int retval = 0;
1587 int i;
1588
1589 priv = alloc_orinocodev(sizeof(*upriv), &udev->dev,
1590 ezusb_hard_reset, NULL);
1591 if (!priv) {
1592 err("Couldn't allocate orinocodev");
1593 goto exit;
1594 }
1595
1596 hw = &priv->hw;
1597
1598 upriv = priv->card;
1599
1600 mutex_init(&upriv->mtx);
1601 spin_lock_init(&upriv->reply_count_lock);
1602
1603 spin_lock_init(&upriv->req_lock);
1604 INIT_LIST_HEAD(&upriv->req_pending);
1605 INIT_LIST_HEAD(&upriv->req_active);
1606
1607 upriv->udev = udev;
1608
1609 hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake;
1610 hw->reg_spacing = HERMES_16BIT_REGSPACING;
1611 hw->priv = upriv;
1612 hw->ops = &ezusb_ops;
1613
1614 /* set up the endpoint information */
1615 /* check out the endpoints */
1616
1617 iface_desc = &interface->altsetting[0].desc;
1618 for (i = 0; i < iface_desc->bNumEndpoints; ++i) {
1619 ep = &interface->altsetting[0].endpoint[i].desc;
1620
1621 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1622 == USB_DIR_IN) &&
1623 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1624 == USB_ENDPOINT_XFER_BULK)) {
1625 /* we found a bulk in endpoint */
1626 if (upriv->read_urb != NULL) {
1627 pr_warning("Found a second bulk in ep, ignored");
1628 continue;
1629 }
1630
1631 upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL);
1632 if (!upriv->read_urb) {
1633 err("No free urbs available");
1634 goto error;
1635 }
1636 if (le16_to_cpu(ep->wMaxPacketSize) != 64)
1637 pr_warning("bulk in: wMaxPacketSize!= 64");
1638 if (ep->bEndpointAddress != (2 | USB_DIR_IN))
1639 pr_warning("bulk in: bEndpointAddress: %d",
1640 ep->bEndpointAddress);
1641 upriv->read_pipe = usb_rcvbulkpipe(udev,
1642 ep->
1643 bEndpointAddress);
1644 upriv->read_urb->transfer_buffer =
1645 kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
1646 if (!upriv->read_urb->transfer_buffer) {
1647 err("Couldn't allocate IN buffer");
1648 goto error;
1649 }
1650 }
1651
1652 if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
1653 == USB_DIR_OUT) &&
1654 ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
1655 == USB_ENDPOINT_XFER_BULK)) {
1656 /* we found a bulk out endpoint */
1657 if (upriv->bap_buf != NULL) {
1658 pr_warning("Found a second bulk out ep, ignored");
1659 continue;
1660 }
1661
1662 if (le16_to_cpu(ep->wMaxPacketSize) != 64)
1663 pr_warning("bulk out: wMaxPacketSize != 64");
1664 if (ep->bEndpointAddress != 2)
1665 pr_warning("bulk out: bEndpointAddress: %d",
1666 ep->bEndpointAddress);
1667 upriv->write_pipe = usb_sndbulkpipe(udev,
1668 ep->
1669 bEndpointAddress);
1670 upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL);
1671 if (!upriv->bap_buf) {
1672 err("Couldn't allocate bulk_out_buffer");
1673 goto error;
1674 }
1675 }
1676 }
1677 if (!upriv->bap_buf || !upriv->read_urb) {
1678 err("Didn't find the required bulk endpoints");
1679 goto error;
1680 }
1681
1682 if (request_firmware(&fw_entry, "orinoco_ezusb_fw",
1683 &interface->dev) == 0) {
1684 firmware.size = fw_entry->size;
1685 firmware.code = fw_entry->data;
1686 }
1687 if (firmware.size && firmware.code) {
1688 ezusb_firmware_download(upriv, &firmware);
1689 } else {
1690 err("No firmware to download");
1691 goto error;
1692 }
1693
1694 if (ezusb_hard_reset(priv) < 0) {
1695 err("Cannot reset the device");
1696 goto error;
1697 }
1698
1699 /* If the firmware is already downloaded orinoco.c will call
1700 * ezusb_init but if the firmware is not already there, that will make
1701 * the kernel very unstable, so we try initializing here and quit in
1702 * case of error */
1703 if (ezusb_init(hw) < 0) {
1704 err("Couldn't initialize the device");
1705 err("Firmware may not be downloaded or may be wrong.");
1706 goto error;
1707 }
1708
1709 /* Initialise the main driver */
1710 if (orinoco_init(priv) != 0) {
1711 err("orinoco_init() failed\n");
1712 goto error;
1713 }
1714
1715 if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) {
1716 upriv->dev = NULL;
1717 err("%s: orinoco_if_add() failed", __func__);
1718 goto error;
1719 }
1720 upriv->dev = priv->ndev;
1721
1722 goto exit;
1723
1724 error:
1725 ezusb_delete(upriv);
1726 if (upriv->dev) {
1727 /* upriv->dev was 0, so ezusb_delete() didn't free it */
1728 free_orinocodev(priv);
1729 }
1730 upriv = NULL;
1731 retval = -EFAULT;
1732 exit:
1733 if (fw_entry) {
1734 firmware.code = NULL;
1735 firmware.size = 0;
1736 release_firmware(fw_entry);
1737 }
1738 usb_set_intfdata(interface, upriv);
1739 return retval;
1740}
1741
1742
1743static void ezusb_disconnect(struct usb_interface *intf)
1744{
1745 struct ezusb_priv *upriv = usb_get_intfdata(intf);
1746 usb_set_intfdata(intf, NULL);
1747 ezusb_delete(upriv);
1748 printk(KERN_INFO PFX "Disconnected\n");
1749}
1750
1751
1752/* usb specific object needed to register this driver with the usb subsystem */
1753static struct usb_driver orinoco_driver = {
1754 .name = DRIVER_NAME,
1755 .probe = ezusb_probe,
1756 .disconnect = ezusb_disconnect,
1757 .id_table = ezusb_table,
1758};
1759
1760/* Can't be declared "const" or the whole __initdata section will
1761 * become const */
1762static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
1763 " (Manuel Estrada Sainz)";
1764
1765static int __init ezusb_module_init(void)
1766{
1767 int err;
1768
1769 printk(KERN_DEBUG "%s\n", version);
1770
1771 /* register this driver with the USB subsystem */
1772 err = usb_register(&orinoco_driver);
1773 if (err < 0) {
1774 printk(KERN_ERR PFX "usb_register failed, error %d\n",
1775 err);
1776 return err;
1777 }
1778
1779 return 0;
1780}
1781
1782static void __exit ezusb_module_exit(void)
1783{
1784 /* deregister this driver with the USB subsystem */
1785 usb_deregister(&orinoco_driver);
1786}
1787
1788
1789module_init(ezusb_module_init);
1790module_exit(ezusb_module_exit);
1791
1792MODULE_AUTHOR("Manuel Estrada Sainz");
1793MODULE_DESCRIPTION
1794 ("Driver for Orinoco wireless LAN cards using EZUSB bridge");
1795MODULE_LICENSE("Dual MPL/GPL");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index 330d42d45333..4300d9db7d8c 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -127,7 +127,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
127{ 127{
128 struct wiphy *wiphy = priv_to_wiphy(priv); 128 struct wiphy *wiphy = priv_to_wiphy(priv);
129 struct ieee80211_channel *channel; 129 struct ieee80211_channel *channel;
130 u8 *ie; 130 const u8 *ie;
131 u64 timestamp; 131 u64 timestamp;
132 s32 signal; 132 s32 signal;
133 u16 capability; 133 u16 capability;
@@ -136,7 +136,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
136 int chan, freq; 136 int chan, freq;
137 137
138 ie_len = len - sizeof(*bss); 138 ie_len = len - sizeof(*bss);
139 ie = orinoco_get_ie(bss->data, ie_len, WLAN_EID_DS_PARAMS); 139 ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
140 chan = ie ? ie[2] : 0; 140 chan = ie ? ie[2] : 0;
141 freq = ieee80211_dsss_chan_to_freq(chan); 141 freq = ieee80211_dsss_chan_to_freq(chan);
142 channel = ieee80211_get_channel(wiphy, freq); 142 channel = ieee80211_get_channel(wiphy, freq);
diff --git a/drivers/net/wireless/orinoco/spectrum_cs.c b/drivers/net/wireless/orinoco/spectrum_cs.c
index 59bda240fdc2..b51a9adc80f6 100644
--- a/drivers/net/wireless/orinoco/spectrum_cs.c
+++ b/drivers/net/wireless/orinoco/spectrum_cs.c
@@ -57,7 +57,6 @@ MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket
57 * struct orinoco_private */ 57 * struct orinoco_private */
58struct orinoco_pccard { 58struct orinoco_pccard {
59 struct pcmcia_device *p_dev; 59 struct pcmcia_device *p_dev;
60 dev_node_t node;
61}; 60};
62 61
63/********************************************************************/ 62/********************************************************************/
@@ -193,10 +192,6 @@ spectrum_cs_probe(struct pcmcia_device *link)
193 card->p_dev = link; 192 card->p_dev = link;
194 link->priv = priv; 193 link->priv = priv;
195 194
196 /* Interrupt setup */
197 link->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
198 link->irq.Handler = orinoco_interrupt;
199
200 /* General socket configuration defaults can go here. In this 195 /* General socket configuration defaults can go here. In this
201 * client, we assume very little, and rely on the CIS for 196 * client, we assume very little, and rely on the CIS for
202 * almost everything. In most clients, many details (i.e., 197 * almost everything. In most clients, many details (i.e.,
@@ -218,8 +213,7 @@ static void spectrum_cs_detach(struct pcmcia_device *link)
218{ 213{
219 struct orinoco_private *priv = link->priv; 214 struct orinoco_private *priv = link->priv;
220 215
221 if (link->dev_node) 216 orinoco_if_del(priv);
222 orinoco_if_del(priv);
223 217
224 spectrum_cs_release(link); 218 spectrum_cs_release(link);
225 219
@@ -304,7 +298,6 @@ static int
304spectrum_cs_config(struct pcmcia_device *link) 298spectrum_cs_config(struct pcmcia_device *link)
305{ 299{
306 struct orinoco_private *priv = link->priv; 300 struct orinoco_private *priv = link->priv;
307 struct orinoco_pccard *card = priv->card;
308 hermes_t *hw = &priv->hw; 301 hermes_t *hw = &priv->hw;
309 int ret; 302 int ret;
310 void __iomem *mem; 303 void __iomem *mem;
@@ -332,12 +325,7 @@ spectrum_cs_config(struct pcmcia_device *link)
332 goto failed; 325 goto failed;
333 } 326 }
334 327
335 /* 328 ret = pcmcia_request_irq(link, orinoco_interrupt);
336 * Allocate an interrupt line. Note that this does not assign
337 * a handler to the interrupt, unless the 'Handler' member of
338 * the irq structure is initialized.
339 */
340 ret = pcmcia_request_irq(link, &link->irq);
341 if (ret) 329 if (ret)
342 goto failed; 330 goto failed;
343 331
@@ -349,6 +337,7 @@ spectrum_cs_config(struct pcmcia_device *link)
349 goto failed; 337 goto failed;
350 338
351 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); 339 hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING);
340 hw->eeprom_pda = true;
352 341
353 /* 342 /*
354 * This actually configures the PCMCIA socket -- setting up 343 * This actually configures the PCMCIA socket -- setting up
@@ -359,9 +348,6 @@ spectrum_cs_config(struct pcmcia_device *link)
359 if (ret) 348 if (ret)
360 goto failed; 349 goto failed;
361 350
362 /* Ok, we have the configuration, prepare to register the netdev */
363 card->node.major = card->node.minor = 0;
364
365 /* Reset card */ 351 /* Reset card */
366 if (spectrum_cs_hard_reset(priv) != 0) 352 if (spectrum_cs_hard_reset(priv) != 0)
367 goto failed; 353 goto failed;
@@ -374,17 +360,11 @@ spectrum_cs_config(struct pcmcia_device *link)
374 360
375 /* Register an interface with the stack */ 361 /* Register an interface with the stack */
376 if (orinoco_if_add(priv, link->io.BasePort1, 362 if (orinoco_if_add(priv, link->io.BasePort1,
377 link->irq.AssignedIRQ) != 0) { 363 link->irq, NULL) != 0) {
378 printk(KERN_ERR PFX "orinoco_if_add() failed\n"); 364 printk(KERN_ERR PFX "orinoco_if_add() failed\n");
379 goto failed; 365 goto failed;
380 } 366 }
381 367
382 /* At this point, the dev_node_t structure(s) needs to be
383 * initialized and arranged in a linked list at link->dev_node. */
384 strcpy(card->node.dev_name, priv->ndev->name);
385 link->dev_node = &card->node; /* link->dev_node being non-NULL is also
386 * used to indicate that the
387 * net_device has been registered */
388 return 0; 368 return 0;
389 369
390 failed: 370 failed:
@@ -405,9 +385,9 @@ spectrum_cs_release(struct pcmcia_device *link)
405 385
406 /* We're committed to taking the device away now, so mark the 386 /* We're committed to taking the device away now, so mark the
407 * hardware as unavailable */ 387 * hardware as unavailable */
408 spin_lock_irqsave(&priv->lock, flags); 388 priv->hw.ops->lock_irqsave(&priv->lock, &flags);
409 priv->hw_unavailable++; 389 priv->hw_unavailable++;
410 spin_unlock_irqrestore(&priv->lock, flags); 390 priv->hw.ops->unlock_irqrestore(&priv->lock, &flags);
411 391
412 pcmcia_disable_device(link); 392 pcmcia_disable_device(link);
413 if (priv->hw.iobase) 393 if (priv->hw.iobase)
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index fbcc6e1a2e1d..5775124e2aee 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -458,7 +458,7 @@ static int orinoco_ioctl_setfreq(struct net_device *dev,
458 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { 458 if (priv->iw_mode == NL80211_IFTYPE_MONITOR) {
459 /* Fast channel change - no commit if successful */ 459 /* Fast channel change - no commit if successful */
460 hermes_t *hw = &priv->hw; 460 hermes_t *hw = &priv->hw;
461 err = hermes_docmd_wait(hw, HERMES_CMD_TEST | 461 err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST |
462 HERMES_TEST_SET_CHANNEL, 462 HERMES_TEST_SET_CHANNEL,
463 chan, NULL); 463 chan, NULL);
464 } 464 }
@@ -538,125 +538,6 @@ static int orinoco_ioctl_setsens(struct net_device *dev,
538 return -EINPROGRESS; /* Call commit handler */ 538 return -EINPROGRESS; /* Call commit handler */
539} 539}
540 540
541static int orinoco_ioctl_setrts(struct net_device *dev,
542 struct iw_request_info *info,
543 struct iw_param *rrq,
544 char *extra)
545{
546 struct orinoco_private *priv = ndev_priv(dev);
547 int val = rrq->value;
548 unsigned long flags;
549
550 if (rrq->disabled)
551 val = 2347;
552
553 if ((val < 0) || (val > 2347))
554 return -EINVAL;
555
556 if (orinoco_lock(priv, &flags) != 0)
557 return -EBUSY;
558
559 priv->rts_thresh = val;
560 orinoco_unlock(priv, &flags);
561
562 return -EINPROGRESS; /* Call commit handler */
563}
564
565static int orinoco_ioctl_getrts(struct net_device *dev,
566 struct iw_request_info *info,
567 struct iw_param *rrq,
568 char *extra)
569{
570 struct orinoco_private *priv = ndev_priv(dev);
571
572 rrq->value = priv->rts_thresh;
573 rrq->disabled = (rrq->value == 2347);
574 rrq->fixed = 1;
575
576 return 0;
577}
578
579static int orinoco_ioctl_setfrag(struct net_device *dev,
580 struct iw_request_info *info,
581 struct iw_param *frq,
582 char *extra)
583{
584 struct orinoco_private *priv = ndev_priv(dev);
585 int err = -EINPROGRESS; /* Call commit handler */
586 unsigned long flags;
587
588 if (orinoco_lock(priv, &flags) != 0)
589 return -EBUSY;
590
591 if (priv->has_mwo) {
592 if (frq->disabled)
593 priv->mwo_robust = 0;
594 else {
595 if (frq->fixed)
596 printk(KERN_WARNING "%s: Fixed fragmentation "
597 "is not supported on this firmware. "
598 "Using MWO robust instead.\n",
599 dev->name);
600 priv->mwo_robust = 1;
601 }
602 } else {
603 if (frq->disabled)
604 priv->frag_thresh = 2346;
605 else {
606 if ((frq->value < 256) || (frq->value > 2346))
607 err = -EINVAL;
608 else
609 /* must be even */
610 priv->frag_thresh = frq->value & ~0x1;
611 }
612 }
613
614 orinoco_unlock(priv, &flags);
615
616 return err;
617}
618
619static int orinoco_ioctl_getfrag(struct net_device *dev,
620 struct iw_request_info *info,
621 struct iw_param *frq,
622 char *extra)
623{
624 struct orinoco_private *priv = ndev_priv(dev);
625 hermes_t *hw = &priv->hw;
626 int err;
627 u16 val;
628 unsigned long flags;
629
630 if (orinoco_lock(priv, &flags) != 0)
631 return -EBUSY;
632
633 if (priv->has_mwo) {
634 err = hermes_read_wordrec(hw, USER_BAP,
635 HERMES_RID_CNFMWOROBUST_AGERE,
636 &val);
637 if (err)
638 val = 0;
639
640 frq->value = val ? 2347 : 0;
641 frq->disabled = !val;
642 frq->fixed = 0;
643 } else {
644 err = hermes_read_wordrec(hw, USER_BAP,
645 HERMES_RID_CNFFRAGMENTATIONTHRESHOLD,
646 &val);
647 if (err)
648 val = 0;
649
650 frq->value = val;
651 frq->disabled = (val >= 2346);
652 frq->fixed = 1;
653 }
654
655 orinoco_unlock(priv, &flags);
656
657 return err;
658}
659
660static int orinoco_ioctl_setrate(struct net_device *dev, 541static int orinoco_ioctl_setrate(struct net_device *dev,
661 struct iw_request_info *info, 542 struct iw_request_info *info,
662 struct iw_param *rrq, 543 struct iw_param *rrq,
@@ -1201,60 +1082,6 @@ static int orinoco_ioctl_set_mlme(struct net_device *dev,
1201 return ret; 1082 return ret;
1202} 1083}
1203 1084
1204static int orinoco_ioctl_getretry(struct net_device *dev,
1205 struct iw_request_info *info,
1206 struct iw_param *rrq,
1207 char *extra)
1208{
1209 struct orinoco_private *priv = ndev_priv(dev);
1210 hermes_t *hw = &priv->hw;
1211 int err = 0;
1212 u16 short_limit, long_limit, lifetime;
1213 unsigned long flags;
1214
1215 if (orinoco_lock(priv, &flags) != 0)
1216 return -EBUSY;
1217
1218 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT,
1219 &short_limit);
1220 if (err)
1221 goto out;
1222
1223 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT,
1224 &long_limit);
1225 if (err)
1226 goto out;
1227
1228 err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME,
1229 &lifetime);
1230 if (err)
1231 goto out;
1232
1233 rrq->disabled = 0; /* Can't be disabled */
1234
1235 /* Note : by default, display the retry number */
1236 if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) {
1237 rrq->flags = IW_RETRY_LIFETIME;
1238 rrq->value = lifetime * 1000; /* ??? */
1239 } else {
1240 /* By default, display the min number */
1241 if ((rrq->flags & IW_RETRY_LONG)) {
1242 rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
1243 rrq->value = long_limit;
1244 } else {
1245 rrq->flags = IW_RETRY_LIMIT;
1246 rrq->value = short_limit;
1247 if (short_limit != long_limit)
1248 rrq->flags |= IW_RETRY_SHORT;
1249 }
1250 }
1251
1252 out:
1253 orinoco_unlock(priv, &flags);
1254
1255 return err;
1256}
1257
1258static int orinoco_ioctl_reset(struct net_device *dev, 1085static int orinoco_ioctl_reset(struct net_device *dev,
1259 struct iw_request_info *info, 1086 struct iw_request_info *info,
1260 void *wrqu, 1087 void *wrqu,
@@ -1446,8 +1273,8 @@ static int orinoco_ioctl_getrid(struct net_device *dev,
1446 if (orinoco_lock(priv, &flags) != 0) 1273 if (orinoco_lock(priv, &flags) != 0)
1447 return -EBUSY; 1274 return -EBUSY;
1448 1275
1449 err = hermes_read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length, 1276 err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length,
1450 extra); 1277 extra);
1451 if (err) 1278 if (err)
1452 goto out; 1279 goto out;
1453 1280
@@ -1506,46 +1333,44 @@ static const struct iw_priv_args orinoco_privtab[] = {
1506 * Structures to export the Wireless Handlers 1333 * Structures to export the Wireless Handlers
1507 */ 1334 */
1508 1335
1509#define STD_IW_HANDLER(id, func) \
1510 [IW_IOCTL_IDX(id)] = (iw_handler) func
1511static const iw_handler orinoco_handler[] = { 1336static const iw_handler orinoco_handler[] = {
1512 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit), 1337 IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
1513 STD_IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname), 1338 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
1514 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq), 1339 IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
1515 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq), 1340 IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
1516 STD_IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode), 1341 IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
1517 STD_IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode), 1342 IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
1518 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens), 1343 IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
1519 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens), 1344 IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
1520 STD_IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange), 1345 IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
1521 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), 1346 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1522 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), 1347 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1523 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), 1348 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1524 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), 1349 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1525 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap), 1350 IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
1526 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap), 1351 IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
1527 STD_IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan), 1352 IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
1528 STD_IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan), 1353 IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
1529 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid), 1354 IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
1530 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid), 1355 IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
1531 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate), 1356 IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
1532 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate), 1357 IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
1533 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts), 1358 IW_HANDLER(SIOCSIWRTS, (iw_handler)cfg80211_wext_siwrts),
1534 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts), 1359 IW_HANDLER(SIOCGIWRTS, (iw_handler)cfg80211_wext_giwrts),
1535 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag), 1360 IW_HANDLER(SIOCSIWFRAG, (iw_handler)cfg80211_wext_siwfrag),
1536 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag), 1361 IW_HANDLER(SIOCGIWFRAG, (iw_handler)cfg80211_wext_giwfrag),
1537 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry), 1362 IW_HANDLER(SIOCGIWRETRY, (iw_handler)cfg80211_wext_giwretry),
1538 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode), 1363 IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
1539 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode), 1364 IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
1540 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower), 1365 IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
1541 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower), 1366 IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
1542 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie), 1367 IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
1543 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie), 1368 IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
1544 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme), 1369 IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
1545 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth), 1370 IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
1546 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth), 1371 IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
1547 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext), 1372 IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
1548 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext), 1373 IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
1549}; 1374};
1550 1375
1551 1376
@@ -1553,15 +1378,15 @@ static const iw_handler orinoco_handler[] = {
1553 Added typecasting since we no longer use iwreq_data -- Moustafa 1378 Added typecasting since we no longer use iwreq_data -- Moustafa
1554 */ 1379 */
1555static const iw_handler orinoco_private_handler[] = { 1380static const iw_handler orinoco_private_handler[] = {
1556 [0] = (iw_handler) orinoco_ioctl_reset, 1381 [0] = (iw_handler)orinoco_ioctl_reset,
1557 [1] = (iw_handler) orinoco_ioctl_reset, 1382 [1] = (iw_handler)orinoco_ioctl_reset,
1558 [2] = (iw_handler) orinoco_ioctl_setport3, 1383 [2] = (iw_handler)orinoco_ioctl_setport3,
1559 [3] = (iw_handler) orinoco_ioctl_getport3, 1384 [3] = (iw_handler)orinoco_ioctl_getport3,
1560 [4] = (iw_handler) orinoco_ioctl_setpreamble, 1385 [4] = (iw_handler)orinoco_ioctl_setpreamble,
1561 [5] = (iw_handler) orinoco_ioctl_getpreamble, 1386 [5] = (iw_handler)orinoco_ioctl_getpreamble,
1562 [6] = (iw_handler) orinoco_ioctl_setibssport, 1387 [6] = (iw_handler)orinoco_ioctl_setibssport,
1563 [7] = (iw_handler) orinoco_ioctl_getibssport, 1388 [7] = (iw_handler)orinoco_ioctl_getibssport,
1564 [9] = (iw_handler) orinoco_ioctl_getrid, 1389 [9] = (iw_handler)orinoco_ioctl_getrid,
1565}; 1390};
1566 1391
1567const struct iw_handler_def orinoco_handler_def = { 1392const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a7cb9eb759a1..c072f41747ca 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,7 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
546 IEEE80211_HW_SUPPORTS_PS | 546 IEEE80211_HW_SUPPORTS_PS |
547 IEEE80211_HW_PS_NULLFUNC_STACK | 547 IEEE80211_HW_PS_NULLFUNC_STACK |
548 IEEE80211_HW_BEACON_FILTER | 548 IEEE80211_HW_BEACON_FILTER |
549 IEEE80211_HW_NOISE_DBM; 549 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
550 550
551 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 551 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
552 BIT(NL80211_IFTYPE_ADHOC) | 552 BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 269fda362836..07c4528f6e6b 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -132,7 +132,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
132 132
133static void p54p_refill_rx_ring(struct ieee80211_hw *dev, 133static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
134 int ring_index, struct p54p_desc *ring, u32 ring_limit, 134 int ring_index, struct p54p_desc *ring, u32 ring_limit,
135 struct sk_buff **rx_buf) 135 struct sk_buff **rx_buf, u32 index)
136{ 136{
137 struct p54p_priv *priv = dev->priv; 137 struct p54p_priv *priv = dev->priv;
138 struct p54p_ring_control *ring_control = priv->ring_control; 138 struct p54p_ring_control *ring_control = priv->ring_control;
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
140 140
141 idx = le32_to_cpu(ring_control->host_idx[ring_index]); 141 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
142 limit = idx; 142 limit = idx;
143 limit -= le32_to_cpu(ring_control->device_idx[ring_index]); 143 limit -= index;
144 limit = ring_limit - limit; 144 limit = ring_limit - limit;
145 145
146 i = idx % ring_limit; 146 i = idx % ring_limit;
@@ -232,7 +232,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
232 i %= ring_limit; 232 i %= ring_limit;
233 } 233 }
234 234
235 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); 235 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
236} 236}
237 237
238static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, 238static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
@@ -246,7 +246,7 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
246 u32 idx, i; 246 u32 idx, i;
247 247
248 i = (*index) % ring_limit; 248 i = (*index) % ring_limit;
249 (*index) = idx = le32_to_cpu(ring_control->device_idx[1]); 249 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
250 idx %= ring_limit; 250 idx %= ring_limit;
251 251
252 while (i != idx) { 252 while (i != idx) {
@@ -445,10 +445,10 @@ static int p54p_open(struct ieee80211_hw *dev)
445 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; 445 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
446 446
447 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, 447 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
448 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data); 448 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
449 449
450 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, 450 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
451 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt); 451 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
452 452
453 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 453 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
454 P54P_READ(ring_control_base); 454 P54P_READ(ring_control_base);
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 743a6c68b29d..d5b197b4d5bb 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -875,7 +875,6 @@ static void p54u_stop(struct ieee80211_hw *dev)
875 the hardware is still usable next time we want to start it. 875 the hardware is still usable next time we want to start it.
876 until then, we just stop listening to the hardware.. */ 876 until then, we just stop listening to the hardware.. */
877 p54u_free_urbs(dev); 877 p54u_free_urbs(dev);
878 return;
879} 878}
880 879
881static int __devinit p54u_probe(struct usb_interface *intf, 880static int __devinit p54u_probe(struct usb_interface *intf,
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 66057999a93c..4e6891099d43 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,7 +38,7 @@ static void p54_dump_tx_queue(struct p54_common *priv)
38 u32 largest_hole = 0, free; 38 u32 largest_hole = 0, free;
39 39
40 spin_lock_irqsave(&priv->tx_queue.lock, flags); 40 spin_lock_irqsave(&priv->tx_queue.lock, flags);
41 printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n", 41 printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n",
42 wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue)); 42 wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
43 43
44 prev_addr = priv->rx_start; 44 prev_addr = priv->rx_start;
@@ -350,7 +350,6 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
350 rx_status->flag |= RX_FLAG_MMIC_ERROR; 350 rx_status->flag |= RX_FLAG_MMIC_ERROR;
351 351
352 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); 352 rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
353 rx_status->noise = priv->noise;
354 if (hdr->rate & 0x10) 353 if (hdr->rate & 0x10)
355 rx_status->flag |= RX_FLAG_SHORTPRE; 354 rx_status->flag |= RX_FLAG_SHORTPRE;
356 if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ) 355 if (priv->hw->conf.channel->band == IEEE80211_BAND_5GHZ)
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index a45818ebfdfb..8d1190c0f062 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -210,8 +210,6 @@ prism54_update_stats(struct work_struct *work)
210 priv->local_iwstatistics.discard.retries = r.u; 210 priv->local_iwstatistics.discard.retries = r.u;
211 211
212 mutex_unlock(&priv->stats_lock); 212 mutex_unlock(&priv->stats_lock);
213
214 return;
215} 213}
216 214
217struct iw_statistics * 215struct iw_statistics *
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 689d59a13d5b..2c8cc954d1b6 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -228,14 +228,14 @@ islpci_interrupt(int irq, void *config)
228 228
229#if VERBOSE > SHOW_ERROR_MESSAGES 229#if VERBOSE > SHOW_ERROR_MESSAGES
230 DEBUG(SHOW_FUNCTION_CALLS, 230 DEBUG(SHOW_FUNCTION_CALLS,
231 "IRQ: Identification register 0x%p 0x%x \n", device, reg); 231 "IRQ: Identification register 0x%p 0x%x\n", device, reg);
232#endif 232#endif
233 233
234 /* check for each bit in the register separately */ 234 /* check for each bit in the register separately */
235 if (reg & ISL38XX_INT_IDENT_UPDATE) { 235 if (reg & ISL38XX_INT_IDENT_UPDATE) {
236#if VERBOSE > SHOW_ERROR_MESSAGES 236#if VERBOSE > SHOW_ERROR_MESSAGES
237 /* Queue has been updated */ 237 /* Queue has been updated */
238 DEBUG(SHOW_TRACING, "IRQ: Update flag \n"); 238 DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
239 239
240 DEBUG(SHOW_QUEUE_INDEXES, 240 DEBUG(SHOW_QUEUE_INDEXES,
241 "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", 241 "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
@@ -301,7 +301,7 @@ islpci_interrupt(int irq, void *config)
301 ISL38XX_CB_RX_DATA_LQ) != 0) { 301 ISL38XX_CB_RX_DATA_LQ) != 0) {
302#if VERBOSE > SHOW_ERROR_MESSAGES 302#if VERBOSE > SHOW_ERROR_MESSAGES
303 DEBUG(SHOW_TRACING, 303 DEBUG(SHOW_TRACING,
304 "Received frame in Data Low Queue \n"); 304 "Received frame in Data Low Queue\n");
305#endif 305#endif
306 islpci_eth_receive(priv); 306 islpci_eth_receive(priv);
307 } 307 }
@@ -326,7 +326,7 @@ islpci_interrupt(int irq, void *config)
326 /* Device has been initialized */ 326 /* Device has been initialized */
327#if VERBOSE > SHOW_ERROR_MESSAGES 327#if VERBOSE > SHOW_ERROR_MESSAGES
328 DEBUG(SHOW_TRACING, 328 DEBUG(SHOW_TRACING,
329 "IRQ: Init flag, device initialized \n"); 329 "IRQ: Init flag, device initialized\n");
330#endif 330#endif
331 wake_up(&priv->reset_done); 331 wake_up(&priv->reset_done);
332 } 332 }
@@ -334,7 +334,7 @@ islpci_interrupt(int irq, void *config)
334 if (reg & ISL38XX_INT_IDENT_SLEEP) { 334 if (reg & ISL38XX_INT_IDENT_SLEEP) {
335 /* Device intends to move to powersave state */ 335 /* Device intends to move to powersave state */
336#if VERBOSE > SHOW_ERROR_MESSAGES 336#if VERBOSE > SHOW_ERROR_MESSAGES
337 DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n"); 337 DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
338#endif 338#endif
339 isl38xx_handle_sleep_request(priv->control_block, 339 isl38xx_handle_sleep_request(priv->control_block,
340 &powerstate, 340 &powerstate,
@@ -344,7 +344,7 @@ islpci_interrupt(int irq, void *config)
344 if (reg & ISL38XX_INT_IDENT_WAKEUP) { 344 if (reg & ISL38XX_INT_IDENT_WAKEUP) {
345 /* Device has been woken up to active state */ 345 /* Device has been woken up to active state */
346#if VERBOSE > SHOW_ERROR_MESSAGES 346#if VERBOSE > SHOW_ERROR_MESSAGES
347 DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n"); 347 DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
348#endif 348#endif
349 349
350 isl38xx_handle_wakeup(priv->control_block, 350 isl38xx_handle_wakeup(priv->control_block,
@@ -635,7 +635,7 @@ islpci_alloc_memory(islpci_private *priv)
635 ioremap(pci_resource_start(priv->pdev, 0), 635 ioremap(pci_resource_start(priv->pdev, 0),
636 ISL38XX_PCI_MEM_SIZE))) { 636 ISL38XX_PCI_MEM_SIZE))) {
637 /* error in remapping the PCI device memory address range */ 637 /* error in remapping the PCI device memory address range */
638 printk(KERN_ERR "PCI memory remapping failed \n"); 638 printk(KERN_ERR "PCI memory remapping failed\n");
639 return -1; 639 return -1;
640 } 640 }
641 641
@@ -902,7 +902,7 @@ islpci_setup(struct pci_dev *pdev)
902 902
903 if (register_netdev(ndev)) { 903 if (register_netdev(ndev)) {
904 DEBUG(SHOW_ERROR_MESSAGES, 904 DEBUG(SHOW_ERROR_MESSAGES,
905 "ERROR: register_netdev() failed \n"); 905 "ERROR: register_netdev() failed\n");
906 goto do_islpci_free_memory; 906 goto do_islpci_free_memory;
907 } 907 }
908 908
@@ -946,7 +946,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state)
946 if (!priv->state_off) 946 if (!priv->state_off)
947 priv->state = new_state; 947 priv->state = new_state;
948 break; 948 break;
949 }; 949 }
950#if 0 950#if 0
951 printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n", 951 printk(KERN_DEBUG "%s: state transition %d -> %d (off#%d)\n",
952 priv->ndev->name, old_state, new_state, priv->state_off); 952 priv->ndev->name, old_state, new_state, priv->state_off);
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ac99eaaeabce..2fc52bc2d7dd 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -90,7 +90,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
90 u32 curr_frag; 90 u32 curr_frag;
91 91
92#if VERBOSE > SHOW_ERROR_MESSAGES 92#if VERBOSE > SHOW_ERROR_MESSAGES
93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n"); 93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
94#endif 94#endif
95 95
96 /* lock the driver code */ 96 /* lock the driver code */
@@ -141,7 +141,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
141 } 141 }
142 142
143#if VERBOSE > SHOW_ERROR_MESSAGES 143#if VERBOSE > SHOW_ERROR_MESSAGES
144 DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data, 144 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
145 src, skb->len); 145 src, skb->len);
146#endif 146#endif
147 } else { 147 } else {
@@ -224,8 +224,6 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
224 priv->data_low_tx_full = 1; 224 priv->data_low_tx_full = 1;
225 } 225 }
226 226
227 /* set the transmission time */
228 ndev->trans_start = jiffies;
229 ndev->stats.tx_packets++; 227 ndev->stats.tx_packets++;
230 ndev->stats.tx_bytes += skb->len; 228 ndev->stats.tx_bytes += skb->len;
231 229
@@ -320,7 +318,7 @@ islpci_eth_receive(islpci_private *priv)
320 int discard = 0; 318 int discard = 0;
321 319
322#if VERBOSE > SHOW_ERROR_MESSAGES 320#if VERBOSE > SHOW_ERROR_MESSAGES
323 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n"); 321 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
324#endif 322#endif
325 323
326 /* the device has written an Ethernet frame in the data area 324 /* the device has written an Ethernet frame in the data area
@@ -432,7 +430,7 @@ islpci_eth_receive(islpci_private *priv)
432 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2); 430 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
433 if (unlikely(skb == NULL)) { 431 if (unlikely(skb == NULL)) {
434 /* error allocating an sk_buff structure elements */ 432 /* error allocating an sk_buff structure elements */
435 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n"); 433 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
436 break; 434 break;
437 } 435 }
438 skb_reserve(skb, (4 - (long) skb->data) & 0x03); 436 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index adb289723a96..a5224f6160e4 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -114,7 +114,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
114 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); 114 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
115 115
116#if VERBOSE > SHOW_ERROR_MESSAGES 116#if VERBOSE > SHOW_ERROR_MESSAGES
117 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n"); 117 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
118#endif 118#endif
119 119
120 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { 120 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
@@ -212,7 +212,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
212 { 212 {
213 pimfor_header_t *h = buf.mem; 213 pimfor_header_t *h = buf.mem;
214 DEBUG(SHOW_PIMFOR_FRAMES, 214 DEBUG(SHOW_PIMFOR_FRAMES,
215 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n", 215 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
216 h->operation, oid, h->device_id, h->flags, length); 216 h->operation, oid, h->device_id, h->flags, length);
217 217
218 /* display the buffer contents for debugging */ 218 /* display the buffer contents for debugging */
@@ -280,7 +280,7 @@ islpci_mgt_receive(struct net_device *ndev)
280 u32 curr_frag; 280 u32 curr_frag;
281 281
282#if VERBOSE > SHOW_ERROR_MESSAGES 282#if VERBOSE > SHOW_ERROR_MESSAGES
283 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n"); 283 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
284#endif 284#endif
285 285
286 /* Only once per interrupt, determine fragment range to 286 /* Only once per interrupt, determine fragment range to
@@ -339,7 +339,7 @@ islpci_mgt_receive(struct net_device *ndev)
339 339
340#if VERBOSE > SHOW_ERROR_MESSAGES 340#if VERBOSE > SHOW_ERROR_MESSAGES
341 DEBUG(SHOW_PIMFOR_FRAMES, 341 DEBUG(SHOW_PIMFOR_FRAMES,
342 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n", 342 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
343 header->operation, header->oid, header->device_id, 343 header->operation, header->oid, header->device_id,
344 header->flags, header->length); 344 header->flags, header->length);
345 345
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index d66933d70fb9..9b796cae4afe 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -820,7 +820,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
820 k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr); 820 k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
821 for (i = 0; i < list->nr; i++) 821 for (i = 0; i < list->nr; i++)
822 k += snprintf(str + k, PRIV_STR_SIZE - k, 822 k += snprintf(str + k, PRIV_STR_SIZE - k,
823 "bss[%u] : \nage=%u\nchannel=%u\n" 823 "bss[%u] :\nage=%u\nchannel=%u\n"
824 "capinfo=0x%X\nrates=0x%X\n" 824 "capinfo=0x%X\nrates=0x%X\n"
825 "basic_rates=0x%X\n", 825 "basic_rates=0x%X\n",
826 i, list->bsslist[i].age, 826 i, list->bsslist[i].age,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 11865ea21875..abff8934db13 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -51,7 +51,6 @@
51#include <pcmcia/cistpl.h> 51#include <pcmcia/cistpl.h>
52#include <pcmcia/cisreg.h> 52#include <pcmcia/cisreg.h>
53#include <pcmcia/ds.h> 53#include <pcmcia/ds.h>
54#include <pcmcia/mem_op.h>
55 54
56#include <linux/wireless.h> 55#include <linux/wireless.h>
57#include <net/iw_handler.h> 56#include <net/iw_handler.h>
@@ -321,10 +320,6 @@ static int ray_probe(struct pcmcia_device *p_dev)
321 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 320 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
322 p_dev->io.IOAddrLines = 5; 321 p_dev->io.IOAddrLines = 5;
323 322
324 /* Interrupt setup. For PCMCIA, driver takes what's given */
325 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
326 p_dev->irq.Handler = &ray_interrupt;
327
328 /* General socket configuration */ 323 /* General socket configuration */
329 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 324 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
330 p_dev->conf.IntType = INT_MEMORY_AND_IO; 325 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -383,8 +378,7 @@ static void ray_detach(struct pcmcia_device *link)
383 del_timer(&local->timer); 378 del_timer(&local->timer);
384 379
385 if (link->priv) { 380 if (link->priv) {
386 if (link->dev_node) 381 unregister_netdev(dev);
387 unregister_netdev(dev);
388 free_netdev(dev); 382 free_netdev(dev);
389 } 383 }
390 dev_dbg(&link->dev, "ray_cs ray_detach ending\n"); 384 dev_dbg(&link->dev, "ray_cs ray_detach ending\n");
@@ -417,10 +411,10 @@ static int ray_config(struct pcmcia_device *link)
417 /* Now allocate an interrupt line. Note that this does not 411 /* Now allocate an interrupt line. Note that this does not
418 actually assign a handler to the interrupt. 412 actually assign a handler to the interrupt.
419 */ 413 */
420 ret = pcmcia_request_irq(link, &link->irq); 414 ret = pcmcia_request_irq(link, ray_interrupt);
421 if (ret) 415 if (ret)
422 goto failed; 416 goto failed;
423 dev->irq = link->irq.AssignedIRQ; 417 dev->irq = link->irq;
424 418
425 /* This actually configures the PCMCIA socket -- setting up 419 /* This actually configures the PCMCIA socket -- setting up
426 the I/O windows and the interrupt mapping. 420 the I/O windows and the interrupt mapping.
@@ -493,9 +487,6 @@ static int ray_config(struct pcmcia_device *link)
493 return i; 487 return i;
494 } 488 }
495 489
496 strcpy(local->node.dev_name, dev->name);
497 link->dev_node = &local->node;
498
499 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n", 490 printk(KERN_INFO "%s: RayLink, irq %d, hw_addr %pM\n",
500 dev->name, dev->irq, dev->dev_addr); 491 dev->name, dev->irq, dev->dev_addr);
501 492
@@ -555,7 +546,7 @@ static int ray_init(struct net_device *dev)
555 local->fw_ver = local->startup_res.firmware_version[0]; 546 local->fw_ver = local->startup_res.firmware_version[0];
556 local->fw_bld = local->startup_res.firmware_version[1]; 547 local->fw_bld = local->startup_res.firmware_version[1];
557 local->fw_var = local->startup_res.firmware_version[2]; 548 local->fw_var = local->startup_res.firmware_version[2];
558 dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver, 549 dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
559 local->fw_bld); 550 local->fw_bld);
560 551
561 local->tib_length = 0x20; 552 local->tib_length = 0x20;
@@ -735,8 +726,6 @@ static void verify_dl_startup(u_long data)
735 start_net((u_long) local); 726 start_net((u_long) local);
736 else 727 else
737 join_net((u_long) local); 728 join_net((u_long) local);
738
739 return;
740} /* end verify_dl_startup */ 729} /* end verify_dl_startup */
741 730
742/*===========================================================================*/ 731/*===========================================================================*/
@@ -764,7 +753,6 @@ static void start_net(u_long data)
764 return; 753 return;
765 } 754 }
766 local->card_status = CARD_DOING_ACQ; 755 local->card_status = CARD_DOING_ACQ;
767 return;
768} /* end start_net */ 756} /* end start_net */
769 757
770/*===========================================================================*/ 758/*===========================================================================*/
@@ -795,7 +783,6 @@ static void join_net(u_long data)
795 return; 783 return;
796 } 784 }
797 local->card_status = CARD_DOING_ACQ; 785 local->card_status = CARD_DOING_ACQ;
798 return;
799} 786}
800 787
801/*============================================================================ 788/*============================================================================
@@ -941,7 +928,6 @@ static netdev_tx_t ray_dev_start_xmit(struct sk_buff *skb,
941 case XMIT_MSG_BAD: 928 case XMIT_MSG_BAD:
942 case XMIT_OK: 929 case XMIT_OK:
943 default: 930 default:
944 dev->trans_start = jiffies;
945 dev_kfree_skb(skb); 931 dev_kfree_skb(skb);
946 } 932 }
947 933
@@ -1112,10 +1098,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1112/* 1098/*
1113 * Wireless Handler : get protocol name 1099 * Wireless Handler : get protocol name
1114 */ 1100 */
1115static int ray_get_name(struct net_device *dev, 1101static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
1116 struct iw_request_info *info, char *cwrq, char *extra) 1102 union iwreq_data *wrqu, char *extra)
1117{ 1103{
1118 strcpy(cwrq, "IEEE 802.11-FH"); 1104 strcpy(wrqu->name, "IEEE 802.11-FH");
1119 return 0; 1105 return 0;
1120} 1106}
1121 1107
@@ -1123,9 +1109,8 @@ static int ray_get_name(struct net_device *dev,
1123/* 1109/*
1124 * Wireless Handler : set frequency 1110 * Wireless Handler : set frequency
1125 */ 1111 */
1126static int ray_set_freq(struct net_device *dev, 1112static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
1127 struct iw_request_info *info, 1113 union iwreq_data *wrqu, char *extra)
1128 struct iw_freq *fwrq, char *extra)
1129{ 1114{
1130 ray_dev_t *local = netdev_priv(dev); 1115 ray_dev_t *local = netdev_priv(dev);
1131 int err = -EINPROGRESS; /* Call commit handler */ 1116 int err = -EINPROGRESS; /* Call commit handler */
@@ -1135,10 +1120,10 @@ static int ray_set_freq(struct net_device *dev,
1135 return -EBUSY; 1120 return -EBUSY;
1136 1121
1137 /* Setting by channel number */ 1122 /* Setting by channel number */
1138 if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0)) 1123 if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
1139 err = -EOPNOTSUPP; 1124 err = -EOPNOTSUPP;
1140 else 1125 else
1141 local->sparm.b5.a_hop_pattern = fwrq->m; 1126 local->sparm.b5.a_hop_pattern = wrqu->freq.m;
1142 1127
1143 return err; 1128 return err;
1144} 1129}
@@ -1147,14 +1132,13 @@ static int ray_set_freq(struct net_device *dev,
1147/* 1132/*
1148 * Wireless Handler : get frequency 1133 * Wireless Handler : get frequency
1149 */ 1134 */
1150static int ray_get_freq(struct net_device *dev, 1135static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
1151 struct iw_request_info *info, 1136 union iwreq_data *wrqu, char *extra)
1152 struct iw_freq *fwrq, char *extra)
1153{ 1137{
1154 ray_dev_t *local = netdev_priv(dev); 1138 ray_dev_t *local = netdev_priv(dev);
1155 1139
1156 fwrq->m = local->sparm.b5.a_hop_pattern; 1140 wrqu->freq.m = local->sparm.b5.a_hop_pattern;
1157 fwrq->e = 0; 1141 wrqu->freq.e = 0;
1158 return 0; 1142 return 0;
1159} 1143}
1160 1144
@@ -1162,9 +1146,8 @@ static int ray_get_freq(struct net_device *dev,
1162/* 1146/*
1163 * Wireless Handler : set ESSID 1147 * Wireless Handler : set ESSID
1164 */ 1148 */
1165static int ray_set_essid(struct net_device *dev, 1149static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
1166 struct iw_request_info *info, 1150 union iwreq_data *wrqu, char *extra)
1167 struct iw_point *dwrq, char *extra)
1168{ 1151{
1169 ray_dev_t *local = netdev_priv(dev); 1152 ray_dev_t *local = netdev_priv(dev);
1170 1153
@@ -1173,19 +1156,17 @@ static int ray_set_essid(struct net_device *dev,
1173 return -EBUSY; 1156 return -EBUSY;
1174 1157
1175 /* Check if we asked for `any' */ 1158 /* Check if we asked for `any' */
1176 if (dwrq->flags == 0) { 1159 if (wrqu->essid.flags == 0)
1177 /* Corey : can you do that ? */ 1160 /* Corey : can you do that ? */
1178 return -EOPNOTSUPP; 1161 return -EOPNOTSUPP;
1179 } else {
1180 /* Check the size of the string */
1181 if (dwrq->length > IW_ESSID_MAX_SIZE) {
1182 return -E2BIG;
1183 }
1184 1162
1185 /* Set the ESSID in the card */ 1163 /* Check the size of the string */
1186 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE); 1164 if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
1187 memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length); 1165 return -E2BIG;
1188 } 1166
1167 /* Set the ESSID in the card */
1168 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
1169 memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
1189 1170
1190 return -EINPROGRESS; /* Call commit handler */ 1171 return -EINPROGRESS; /* Call commit handler */
1191} 1172}
@@ -1194,9 +1175,8 @@ static int ray_set_essid(struct net_device *dev,
1194/* 1175/*
1195 * Wireless Handler : get ESSID 1176 * Wireless Handler : get ESSID
1196 */ 1177 */
1197static int ray_get_essid(struct net_device *dev, 1178static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
1198 struct iw_request_info *info, 1179 union iwreq_data *wrqu, char *extra)
1199 struct iw_point *dwrq, char *extra)
1200{ 1180{
1201 ray_dev_t *local = netdev_priv(dev); 1181 ray_dev_t *local = netdev_priv(dev);
1202 1182
@@ -1204,8 +1184,8 @@ static int ray_get_essid(struct net_device *dev,
1204 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); 1184 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
1205 1185
1206 /* Push it out ! */ 1186 /* Push it out ! */
1207 dwrq->length = strlen(extra); 1187 wrqu->essid.length = strlen(extra);
1208 dwrq->flags = 1; /* active */ 1188 wrqu->essid.flags = 1; /* active */
1209 1189
1210 return 0; 1190 return 0;
1211} 1191}
@@ -1214,14 +1194,13 @@ static int ray_get_essid(struct net_device *dev,
1214/* 1194/*
1215 * Wireless Handler : get AP address 1195 * Wireless Handler : get AP address
1216 */ 1196 */
1217static int ray_get_wap(struct net_device *dev, 1197static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
1218 struct iw_request_info *info, 1198 union iwreq_data *wrqu, char *extra)
1219 struct sockaddr *awrq, char *extra)
1220{ 1199{
1221 ray_dev_t *local = netdev_priv(dev); 1200 ray_dev_t *local = netdev_priv(dev);
1222 1201
1223 memcpy(awrq->sa_data, local->bss_id, ETH_ALEN); 1202 memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
1224 awrq->sa_family = ARPHRD_ETHER; 1203 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
1225 1204
1226 return 0; 1205 return 0;
1227} 1206}
@@ -1230,9 +1209,8 @@ static int ray_get_wap(struct net_device *dev,
1230/* 1209/*
1231 * Wireless Handler : set Bit-Rate 1210 * Wireless Handler : set Bit-Rate
1232 */ 1211 */
1233static int ray_set_rate(struct net_device *dev, 1212static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
1234 struct iw_request_info *info, 1213 union iwreq_data *wrqu, char *extra)
1235 struct iw_param *vwrq, char *extra)
1236{ 1214{
1237 ray_dev_t *local = netdev_priv(dev); 1215 ray_dev_t *local = netdev_priv(dev);
1238 1216
@@ -1241,15 +1219,15 @@ static int ray_set_rate(struct net_device *dev,
1241 return -EBUSY; 1219 return -EBUSY;
1242 1220
1243 /* Check if rate is in range */ 1221 /* Check if rate is in range */
1244 if ((vwrq->value != 1000000) && (vwrq->value != 2000000)) 1222 if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
1245 return -EINVAL; 1223 return -EINVAL;
1246 1224
1247 /* Hack for 1.5 Mb/s instead of 2 Mb/s */ 1225 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1248 if ((local->fw_ver == 0x55) && /* Please check */ 1226 if ((local->fw_ver == 0x55) && /* Please check */
1249 (vwrq->value == 2000000)) 1227 (wrqu->bitrate.value == 2000000))
1250 local->net_default_tx_rate = 3; 1228 local->net_default_tx_rate = 3;
1251 else 1229 else
1252 local->net_default_tx_rate = vwrq->value / 500000; 1230 local->net_default_tx_rate = wrqu->bitrate.value / 500000;
1253 1231
1254 return 0; 1232 return 0;
1255} 1233}
@@ -1258,17 +1236,16 @@ static int ray_set_rate(struct net_device *dev,
1258/* 1236/*
1259 * Wireless Handler : get Bit-Rate 1237 * Wireless Handler : get Bit-Rate
1260 */ 1238 */
1261static int ray_get_rate(struct net_device *dev, 1239static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
1262 struct iw_request_info *info, 1240 union iwreq_data *wrqu, char *extra)
1263 struct iw_param *vwrq, char *extra)
1264{ 1241{
1265 ray_dev_t *local = netdev_priv(dev); 1242 ray_dev_t *local = netdev_priv(dev);
1266 1243
1267 if (local->net_default_tx_rate == 3) 1244 if (local->net_default_tx_rate == 3)
1268 vwrq->value = 2000000; /* Hum... */ 1245 wrqu->bitrate.value = 2000000; /* Hum... */
1269 else 1246 else
1270 vwrq->value = local->net_default_tx_rate * 500000; 1247 wrqu->bitrate.value = local->net_default_tx_rate * 500000;
1271 vwrq->fixed = 0; /* We are in auto mode */ 1248 wrqu->bitrate.fixed = 0; /* We are in auto mode */
1272 1249
1273 return 0; 1250 return 0;
1274} 1251}
@@ -1277,19 +1254,18 @@ static int ray_get_rate(struct net_device *dev,
1277/* 1254/*
1278 * Wireless Handler : set RTS threshold 1255 * Wireless Handler : set RTS threshold
1279 */ 1256 */
1280static int ray_set_rts(struct net_device *dev, 1257static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
1281 struct iw_request_info *info, 1258 union iwreq_data *wrqu, char *extra)
1282 struct iw_param *vwrq, char *extra)
1283{ 1259{
1284 ray_dev_t *local = netdev_priv(dev); 1260 ray_dev_t *local = netdev_priv(dev);
1285 int rthr = vwrq->value; 1261 int rthr = wrqu->rts.value;
1286 1262
1287 /* Reject if card is already initialised */ 1263 /* Reject if card is already initialised */
1288 if (local->card_status != CARD_AWAITING_PARAM) 1264 if (local->card_status != CARD_AWAITING_PARAM)
1289 return -EBUSY; 1265 return -EBUSY;
1290 1266
1291 /* if(wrq->u.rts.fixed == 0) we should complain */ 1267 /* if(wrq->u.rts.fixed == 0) we should complain */
1292 if (vwrq->disabled) 1268 if (wrqu->rts.disabled)
1293 rthr = 32767; 1269 rthr = 32767;
1294 else { 1270 else {
1295 if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */ 1271 if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
@@ -1305,16 +1281,15 @@ static int ray_set_rts(struct net_device *dev,
1305/* 1281/*
1306 * Wireless Handler : get RTS threshold 1282 * Wireless Handler : get RTS threshold
1307 */ 1283 */
1308static int ray_get_rts(struct net_device *dev, 1284static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
1309 struct iw_request_info *info, 1285 union iwreq_data *wrqu, char *extra)
1310 struct iw_param *vwrq, char *extra)
1311{ 1286{
1312 ray_dev_t *local = netdev_priv(dev); 1287 ray_dev_t *local = netdev_priv(dev);
1313 1288
1314 vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8) 1289 wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
1315 + local->sparm.b5.a_rts_threshold[1]; 1290 + local->sparm.b5.a_rts_threshold[1];
1316 vwrq->disabled = (vwrq->value == 32767); 1291 wrqu->rts.disabled = (wrqu->rts.value == 32767);
1317 vwrq->fixed = 1; 1292 wrqu->rts.fixed = 1;
1318 1293
1319 return 0; 1294 return 0;
1320} 1295}
@@ -1323,19 +1298,18 @@ static int ray_get_rts(struct net_device *dev,
1323/* 1298/*
1324 * Wireless Handler : set Fragmentation threshold 1299 * Wireless Handler : set Fragmentation threshold
1325 */ 1300 */
1326static int ray_set_frag(struct net_device *dev, 1301static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
1327 struct iw_request_info *info, 1302 union iwreq_data *wrqu, char *extra)
1328 struct iw_param *vwrq, char *extra)
1329{ 1303{
1330 ray_dev_t *local = netdev_priv(dev); 1304 ray_dev_t *local = netdev_priv(dev);
1331 int fthr = vwrq->value; 1305 int fthr = wrqu->frag.value;
1332 1306
1333 /* Reject if card is already initialised */ 1307 /* Reject if card is already initialised */
1334 if (local->card_status != CARD_AWAITING_PARAM) 1308 if (local->card_status != CARD_AWAITING_PARAM)
1335 return -EBUSY; 1309 return -EBUSY;
1336 1310
1337 /* if(wrq->u.frag.fixed == 0) should complain */ 1311 /* if(wrq->u.frag.fixed == 0) should complain */
1338 if (vwrq->disabled) 1312 if (wrqu->frag.disabled)
1339 fthr = 32767; 1313 fthr = 32767;
1340 else { 1314 else {
1341 if ((fthr < 256) || (fthr > 2347)) /* To check out ! */ 1315 if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
@@ -1351,16 +1325,15 @@ static int ray_set_frag(struct net_device *dev,
1351/* 1325/*
1352 * Wireless Handler : get Fragmentation threshold 1326 * Wireless Handler : get Fragmentation threshold
1353 */ 1327 */
1354static int ray_get_frag(struct net_device *dev, 1328static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
1355 struct iw_request_info *info, 1329 union iwreq_data *wrqu, char *extra)
1356 struct iw_param *vwrq, char *extra)
1357{ 1330{
1358 ray_dev_t *local = netdev_priv(dev); 1331 ray_dev_t *local = netdev_priv(dev);
1359 1332
1360 vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8) 1333 wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
1361 + local->sparm.b5.a_frag_threshold[1]; 1334 + local->sparm.b5.a_frag_threshold[1];
1362 vwrq->disabled = (vwrq->value == 32767); 1335 wrqu->frag.disabled = (wrqu->frag.value == 32767);
1363 vwrq->fixed = 1; 1336 wrqu->frag.fixed = 1;
1364 1337
1365 return 0; 1338 return 0;
1366} 1339}
@@ -1369,8 +1342,8 @@ static int ray_get_frag(struct net_device *dev,
1369/* 1342/*
1370 * Wireless Handler : set Mode of Operation 1343 * Wireless Handler : set Mode of Operation
1371 */ 1344 */
1372static int ray_set_mode(struct net_device *dev, 1345static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
1373 struct iw_request_info *info, __u32 *uwrq, char *extra) 1346 union iwreq_data *wrqu, char *extra)
1374{ 1347{
1375 ray_dev_t *local = netdev_priv(dev); 1348 ray_dev_t *local = netdev_priv(dev);
1376 int err = -EINPROGRESS; /* Call commit handler */ 1349 int err = -EINPROGRESS; /* Call commit handler */
@@ -1380,7 +1353,7 @@ static int ray_set_mode(struct net_device *dev,
1380 if (local->card_status != CARD_AWAITING_PARAM) 1353 if (local->card_status != CARD_AWAITING_PARAM)
1381 return -EBUSY; 1354 return -EBUSY;
1382 1355
1383 switch (*uwrq) { 1356 switch (wrqu->mode) {
1384 case IW_MODE_ADHOC: 1357 case IW_MODE_ADHOC:
1385 card_mode = 0; 1358 card_mode = 0;
1386 /* Fall through */ 1359 /* Fall through */
@@ -1398,15 +1371,15 @@ static int ray_set_mode(struct net_device *dev,
1398/* 1371/*
1399 * Wireless Handler : get Mode of Operation 1372 * Wireless Handler : get Mode of Operation
1400 */ 1373 */
1401static int ray_get_mode(struct net_device *dev, 1374static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
1402 struct iw_request_info *info, __u32 *uwrq, char *extra) 1375 union iwreq_data *wrqu, char *extra)
1403{ 1376{
1404 ray_dev_t *local = netdev_priv(dev); 1377 ray_dev_t *local = netdev_priv(dev);
1405 1378
1406 if (local->sparm.b5.a_network_type) 1379 if (local->sparm.b5.a_network_type)
1407 *uwrq = IW_MODE_INFRA; 1380 wrqu->mode = IW_MODE_INFRA;
1408 else 1381 else
1409 *uwrq = IW_MODE_ADHOC; 1382 wrqu->mode = IW_MODE_ADHOC;
1410 1383
1411 return 0; 1384 return 0;
1412} 1385}
@@ -1415,16 +1388,15 @@ static int ray_get_mode(struct net_device *dev,
1415/* 1388/*
1416 * Wireless Handler : get range info 1389 * Wireless Handler : get range info
1417 */ 1390 */
1418static int ray_get_range(struct net_device *dev, 1391static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
1419 struct iw_request_info *info, 1392 union iwreq_data *wrqu, char *extra)
1420 struct iw_point *dwrq, char *extra)
1421{ 1393{
1422 struct iw_range *range = (struct iw_range *)extra; 1394 struct iw_range *range = (struct iw_range *)extra;
1423 1395
1424 memset((char *)range, 0, sizeof(struct iw_range)); 1396 memset(range, 0, sizeof(struct iw_range));
1425 1397
1426 /* Set the length (very important for backward compatibility) */ 1398 /* Set the length (very important for backward compatibility) */
1427 dwrq->length = sizeof(struct iw_range); 1399 wrqu->data.length = sizeof(struct iw_range);
1428 1400
1429 /* Set the Wireless Extension versions */ 1401 /* Set the Wireless Extension versions */
1430 range->we_version_compiled = WIRELESS_EXT; 1402 range->we_version_compiled = WIRELESS_EXT;
@@ -1447,8 +1419,7 @@ static int ray_get_range(struct net_device *dev,
1447/* 1419/*
1448 * Wireless Private Handler : set framing mode 1420 * Wireless Private Handler : set framing mode
1449 */ 1421 */
1450static int ray_set_framing(struct net_device *dev, 1422static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
1451 struct iw_request_info *info,
1452 union iwreq_data *wrqu, char *extra) 1423 union iwreq_data *wrqu, char *extra)
1453{ 1424{
1454 translate = *(extra); /* Set framing mode */ 1425 translate = *(extra); /* Set framing mode */
@@ -1460,8 +1431,7 @@ static int ray_set_framing(struct net_device *dev,
1460/* 1431/*
1461 * Wireless Private Handler : get framing mode 1432 * Wireless Private Handler : get framing mode
1462 */ 1433 */
1463static int ray_get_framing(struct net_device *dev, 1434static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
1464 struct iw_request_info *info,
1465 union iwreq_data *wrqu, char *extra) 1435 union iwreq_data *wrqu, char *extra)
1466{ 1436{
1467 *(extra) = translate; 1437 *(extra) = translate;
@@ -1473,8 +1443,7 @@ static int ray_get_framing(struct net_device *dev,
1473/* 1443/*
1474 * Wireless Private Handler : get country 1444 * Wireless Private Handler : get country
1475 */ 1445 */
1476static int ray_get_country(struct net_device *dev, 1446static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
1477 struct iw_request_info *info,
1478 union iwreq_data *wrqu, char *extra) 1447 union iwreq_data *wrqu, char *extra)
1479{ 1448{
1480 *(extra) = country; 1449 *(extra) = country;
@@ -1486,10 +1455,9 @@ static int ray_get_country(struct net_device *dev,
1486/* 1455/*
1487 * Commit handler : called after a bunch of SET operations 1456 * Commit handler : called after a bunch of SET operations
1488 */ 1457 */
1489static int ray_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ 1458static int ray_commit(struct net_device *dev, struct iw_request_info *info,
1490 void *zwrq, /* NULL */ 1459 union iwreq_data *wrqu, char *extra)
1491 char *extra) 1460{
1492{ /* NULL */
1493 return 0; 1461 return 0;
1494} 1462}
1495 1463
@@ -1530,28 +1498,28 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev)
1530 */ 1498 */
1531 1499
1532static const iw_handler ray_handler[] = { 1500static const iw_handler ray_handler[] = {
1533 [SIOCSIWCOMMIT - SIOCIWFIRST] = (iw_handler) ray_commit, 1501 IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
1534 [SIOCGIWNAME - SIOCIWFIRST] = (iw_handler) ray_get_name, 1502 IW_HANDLER(SIOCGIWNAME, ray_get_name),
1535 [SIOCSIWFREQ - SIOCIWFIRST] = (iw_handler) ray_set_freq, 1503 IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
1536 [SIOCGIWFREQ - SIOCIWFIRST] = (iw_handler) ray_get_freq, 1504 IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
1537 [SIOCSIWMODE - SIOCIWFIRST] = (iw_handler) ray_set_mode, 1505 IW_HANDLER(SIOCSIWMODE, ray_set_mode),
1538 [SIOCGIWMODE - SIOCIWFIRST] = (iw_handler) ray_get_mode, 1506 IW_HANDLER(SIOCGIWMODE, ray_get_mode),
1539 [SIOCGIWRANGE - SIOCIWFIRST] = (iw_handler) ray_get_range, 1507 IW_HANDLER(SIOCGIWRANGE, ray_get_range),
1540#ifdef WIRELESS_SPY 1508#ifdef WIRELESS_SPY
1541 [SIOCSIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, 1509 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1542 [SIOCGIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, 1510 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1543 [SIOCSIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, 1511 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1544 [SIOCGIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, 1512 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1545#endif /* WIRELESS_SPY */ 1513#endif /* WIRELESS_SPY */
1546 [SIOCGIWAP - SIOCIWFIRST] = (iw_handler) ray_get_wap, 1514 IW_HANDLER(SIOCGIWAP, ray_get_wap),
1547 [SIOCSIWESSID - SIOCIWFIRST] = (iw_handler) ray_set_essid, 1515 IW_HANDLER(SIOCSIWESSID, ray_set_essid),
1548 [SIOCGIWESSID - SIOCIWFIRST] = (iw_handler) ray_get_essid, 1516 IW_HANDLER(SIOCGIWESSID, ray_get_essid),
1549 [SIOCSIWRATE - SIOCIWFIRST] = (iw_handler) ray_set_rate, 1517 IW_HANDLER(SIOCSIWRATE, ray_set_rate),
1550 [SIOCGIWRATE - SIOCIWFIRST] = (iw_handler) ray_get_rate, 1518 IW_HANDLER(SIOCGIWRATE, ray_get_rate),
1551 [SIOCSIWRTS - SIOCIWFIRST] = (iw_handler) ray_set_rts, 1519 IW_HANDLER(SIOCSIWRTS, ray_set_rts),
1552 [SIOCGIWRTS - SIOCIWFIRST] = (iw_handler) ray_get_rts, 1520 IW_HANDLER(SIOCGIWRTS, ray_get_rts),
1553 [SIOCSIWFRAG - SIOCIWFIRST] = (iw_handler) ray_set_frag, 1521 IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
1554 [SIOCGIWFRAG - SIOCIWFIRST] = (iw_handler) ray_get_frag, 1522 IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
1555}; 1523};
1556 1524
1557#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ 1525#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1559,9 +1527,9 @@ static const iw_handler ray_handler[] = {
1559#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ 1527#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
1560 1528
1561static const iw_handler ray_private_handler[] = { 1529static const iw_handler ray_private_handler[] = {
1562 [0] = (iw_handler) ray_set_framing, 1530 [0] = ray_set_framing,
1563 [1] = (iw_handler) ray_get_framing, 1531 [1] = ray_get_framing,
1564 [3] = (iw_handler) ray_get_country, 1532 [3] = ray_get_country,
1565}; 1533};
1566 1534
1567static const struct iw_priv_args ray_private_args[] = { 1535static const struct iw_priv_args ray_private_args[] = {
@@ -1645,7 +1613,6 @@ static int ray_dev_close(struct net_device *dev)
1645static void ray_reset(struct net_device *dev) 1613static void ray_reset(struct net_device *dev)
1646{ 1614{
1647 pr_debug("ray_reset entered\n"); 1615 pr_debug("ray_reset entered\n");
1648 return;
1649} 1616}
1650 1617
1651/*===========================================================================*/ 1618/*===========================================================================*/
@@ -1892,17 +1859,17 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1892 writeb(0xff, &pccs->var); 1859 writeb(0xff, &pccs->var);
1893 local->num_multi = 0xff; 1860 local->num_multi = 0xff;
1894 } else { 1861 } else {
1895 struct dev_mc_list *dmi; 1862 struct netdev_hw_addr *ha;
1896 int i = 0; 1863 int i = 0;
1897 1864
1898 /* Copy the kernel's list of MC addresses to card */ 1865 /* Copy the kernel's list of MC addresses to card */
1899 netdev_for_each_mc_addr(dmi, dev) { 1866 netdev_for_each_mc_addr(ha, dev) {
1900 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); 1867 memcpy_toio(p, ha->addr, ETH_ALEN);
1901 dev_dbg(&link->dev, 1868 dev_dbg(&link->dev,
1902 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1869 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
1903 dmi->dmi_addr[0], dmi->dmi_addr[1], 1870 ha->addr[0], ha->addr[1],
1904 dmi->dmi_addr[2], dmi->dmi_addr[3], 1871 ha->addr[2], ha->addr[3],
1905 dmi->dmi_addr[4], dmi->dmi_addr[5]); 1872 ha->addr[4], ha->addr[5]);
1906 p += ETH_ALEN; 1873 p += ETH_ALEN;
1907 i++; 1874 i++;
1908 } 1875 }
@@ -2251,7 +2218,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2251 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2218 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2252 FCS_LEN)) { 2219 FCS_LEN)) {
2253 pr_debug( 2220 pr_debug(
2254 "ray_cs invalid packet length %d received \n", 2221 "ray_cs invalid packet length %d received\n",
2255 rx_len); 2222 rx_len);
2256 return; 2223 return;
2257 } 2224 }
@@ -2262,7 +2229,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2262 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2229 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2263 FCS_LEN)) { 2230 FCS_LEN)) {
2264 pr_debug( 2231 pr_debug(
2265 "ray_cs invalid packet length %d received \n", 2232 "ray_cs invalid packet length %d received\n",
2266 rx_len); 2233 rx_len);
2267 return; 2234 return;
2268 } 2235 }
@@ -2770,11 +2737,11 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
2770 seq_printf(m, "Hop dwell = %d Kus\n", 2737 seq_printf(m, "Hop dwell = %d Kus\n",
2771 pfh->dwell_time[0] + 2738 pfh->dwell_time[0] +
2772 256 * pfh->dwell_time[1]); 2739 256 * pfh->dwell_time[1]);
2773 seq_printf(m, "Hop set = %d \n", 2740 seq_printf(m, "Hop set = %d\n",
2774 pfh->hop_set); 2741 pfh->hop_set);
2775 seq_printf(m, "Hop pattern = %d \n", 2742 seq_printf(m, "Hop pattern = %d\n",
2776 pfh->hop_pattern); 2743 pfh->hop_pattern);
2777 seq_printf(m, "Hop index = %d \n", 2744 seq_printf(m, "Hop index = %d\n",
2778 pfh->hop_index); 2745 pfh->hop_index);
2779 p += p[1] + 2; 2746 p += p[1] + 2;
2780 } else { 2747 } else {
diff --git a/drivers/net/wireless/ray_cs.h b/drivers/net/wireless/ray_cs.h
index 1e23b7f4cca7..9f01ddb19748 100644
--- a/drivers/net/wireless/ray_cs.h
+++ b/drivers/net/wireless/ray_cs.h
@@ -25,7 +25,6 @@ struct beacon_rx {
25typedef struct ray_dev_t { 25typedef struct ray_dev_t {
26 int card_status; 26 int card_status;
27 int authentication_state; 27 int authentication_state;
28 dev_node_t node;
29 window_handle_t amem_handle; /* handle to window for attribute memory */ 28 window_handle_t amem_handle; /* handle to window for attribute memory */
30 window_handle_t rmem_handle; /* handle to window for rx buffer on card */ 29 window_handle_t rmem_handle; /* handle to window for rx buffer on card */
31 void __iomem *sram; /* pointer to beginning of shared RAM */ 30 void __iomem *sram; /* pointer to beginning of shared RAM */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1de5b22d3efe..2d2890878dea 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -118,6 +118,7 @@ MODULE_PARM_DESC(workaround_interval,
118#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d) 118#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
119#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e) 119#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
120#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f) 120#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
121#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
121#define OID_802_11_PMKID cpu_to_le32(0x0d010123) 122#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
122#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203) 123#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
123#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204) 124#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
@@ -359,6 +360,30 @@ struct ndis_80211_assoc_info {
359 __le32 offset_resp_ies; 360 __le32 offset_resp_ies;
360} __attribute__((packed)); 361} __attribute__((packed));
361 362
363struct ndis_80211_auth_encr_pair {
364 __le32 auth_mode;
365 __le32 encr_mode;
366} __attribute__((packed));
367
368struct ndis_80211_capability {
369 __le32 length;
370 __le32 version;
371 __le32 num_pmkids;
372 __le32 num_auth_encr_pair;
373 struct ndis_80211_auth_encr_pair auth_encr_pair[0];
374} __attribute__((packed));
375
376struct ndis_80211_bssid_info {
377 u8 bssid[6];
378 u8 pmkid[16];
379};
380
381struct ndis_80211_pmkid {
382 __le32 length;
383 __le32 bssid_info_count;
384 struct ndis_80211_bssid_info bssid_info[0];
385};
386
362/* 387/*
363 * private data 388 * private data
364 */ 389 */
@@ -477,13 +502,7 @@ struct rndis_wlan_private {
477 /* encryption stuff */ 502 /* encryption stuff */
478 int encr_tx_key_index; 503 int encr_tx_key_index;
479 struct rndis_wlan_encr_key encr_keys[4]; 504 struct rndis_wlan_encr_key encr_keys[4];
480 enum nl80211_auth_type wpa_auth_type;
481 int wpa_version; 505 int wpa_version;
482 int wpa_keymgmt;
483 int wpa_ie_len;
484 u8 *wpa_ie;
485 int wpa_cipher_pair;
486 int wpa_cipher_group;
487 506
488 u8 command_buffer[COMMAND_BUFFER_SIZE]; 507 u8 command_buffer[COMMAND_BUFFER_SIZE];
489}; 508};
@@ -516,7 +535,7 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
516 535
517static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev); 536static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev);
518 537
519static int rndis_set_channel(struct wiphy *wiphy, 538static int rndis_set_channel(struct wiphy *wiphy, struct net_device *dev,
520 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type); 539 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type);
521 540
522static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev, 541static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
@@ -535,6 +554,14 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
535static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, 554static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
536 int idx, u8 *mac, struct station_info *sinfo); 555 int idx, u8 *mac, struct station_info *sinfo);
537 556
557static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
558 struct cfg80211_pmksa *pmksa);
559
560static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
561 struct cfg80211_pmksa *pmksa);
562
563static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
564
538static struct cfg80211_ops rndis_config_ops = { 565static struct cfg80211_ops rndis_config_ops = {
539 .change_virtual_intf = rndis_change_virtual_intf, 566 .change_virtual_intf = rndis_change_virtual_intf,
540 .scan = rndis_scan, 567 .scan = rndis_scan,
@@ -551,6 +578,9 @@ static struct cfg80211_ops rndis_config_ops = {
551 .set_default_key = rndis_set_default_key, 578 .set_default_key = rndis_set_default_key,
552 .get_station = rndis_get_station, 579 .get_station = rndis_get_station,
553 .dump_station = rndis_dump_station, 580 .dump_station = rndis_dump_station,
581 .set_pmksa = rndis_set_pmksa,
582 .del_pmksa = rndis_del_pmksa,
583 .flush_pmksa = rndis_flush_pmksa,
554}; 584};
555 585
556static void *rndis_wiphy_privid = &rndis_wiphy_privid; 586static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -705,6 +735,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
705 struct rndis_query_c *get_c; 735 struct rndis_query_c *get_c;
706 } u; 736 } u;
707 int ret, buflen; 737 int ret, buflen;
738 int resplen, respoffs, copylen;
708 739
709 buflen = *len + sizeof(*u.get); 740 buflen = *len + sizeof(*u.get);
710 if (buflen < CONTROL_BUFFER_SIZE) 741 if (buflen < CONTROL_BUFFER_SIZE)
@@ -734,11 +765,34 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
734 le32_to_cpu(u.get_c->status)); 765 le32_to_cpu(u.get_c->status));
735 766
736 if (ret == 0) { 767 if (ret == 0) {
737 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); 768 resplen = le32_to_cpu(u.get_c->len);
769 respoffs = le32_to_cpu(u.get_c->offset) + 8;
738 770
739 ret = le32_to_cpu(u.get_c->len); 771 if (respoffs > buflen) {
740 if (ret > *len) 772 /* Device returned data offset outside buffer, error. */
741 *len = ret; 773 netdev_dbg(dev->net, "%s(%s): received invalid "
774 "data offset: %d > %d\n", __func__,
775 oid_to_string(oid), respoffs, buflen);
776
777 ret = -EINVAL;
778 goto exit_unlock;
779 }
780
781 if ((resplen + respoffs) > buflen) {
782 /* Device would have returned more data if buffer would
783 * have been big enough. Copy just the bits that we got.
784 */
785 copylen = buflen - respoffs;
786 } else {
787 copylen = resplen;
788 }
789
790 if (copylen > *len)
791 copylen = *len;
792
793 memcpy(data, u.buf + respoffs, copylen);
794
795 *len = resplen;
742 796
743 ret = rndis_error_status(u.get_c->status); 797 ret = rndis_error_status(u.get_c->status);
744 if (ret < 0) 798 if (ret < 0)
@@ -747,6 +801,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
747 le32_to_cpu(u.get_c->status), ret); 801 le32_to_cpu(u.get_c->status), ret);
748 } 802 }
749 803
804exit_unlock:
750 mutex_unlock(&priv->command_lock); 805 mutex_unlock(&priv->command_lock);
751 806
752 if (u.buf != priv->command_buffer) 807 if (u.buf != priv->command_buffer)
@@ -1092,8 +1147,6 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1092 } 1147 }
1093 1148
1094 priv->wpa_version = wpa_version; 1149 priv->wpa_version = wpa_version;
1095 priv->wpa_auth_type = auth_type;
1096 priv->wpa_keymgmt = keymgmt;
1097 1150
1098 return 0; 1151 return 0;
1099} 1152}
@@ -1118,7 +1171,6 @@ static int set_priv_filter(struct usbnet *usbdev)
1118 1171
1119static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) 1172static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1120{ 1173{
1121 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1122 __le32 tmp; 1174 __le32 tmp;
1123 int encr_mode, ret; 1175 int encr_mode, ret;
1124 1176
@@ -1147,8 +1199,6 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1147 return ret; 1199 return ret;
1148 } 1200 }
1149 1201
1150 priv->wpa_cipher_pair = pairwise;
1151 priv->wpa_cipher_group = groupwise;
1152 return 0; 1202 return 0;
1153} 1203}
1154 1204
@@ -1496,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
1496static void set_multicast_list(struct usbnet *usbdev) 1546static void set_multicast_list(struct usbnet *usbdev)
1497{ 1547{
1498 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 1548 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1499 struct dev_mc_list *mclist; 1549 struct netdev_hw_addr *ha;
1500 __le32 filter, basefilter; 1550 __le32 filter, basefilter;
1501 int ret; 1551 int ret;
1502 char *mc_addrs = NULL; 1552 char *mc_addrs = NULL;
@@ -1535,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev)
1535 return; 1585 return;
1536 } 1586 }
1537 1587
1538 netdev_for_each_mc_addr(mclist, usbdev->net) 1588 netdev_for_each_mc_addr(ha, usbdev->net)
1539 memcpy(mc_addrs + i++ * ETH_ALEN, 1589 memcpy(mc_addrs + i++ * ETH_ALEN,
1540 mclist->dmi_addr, ETH_ALEN); 1590 ha->addr, ETH_ALEN);
1541 } 1591 }
1542 netif_addr_unlock_bh(usbdev->net); 1592 netif_addr_unlock_bh(usbdev->net);
1543 1593
@@ -1569,6 +1619,194 @@ set_filter:
1569 le32_to_cpu(filter), ret); 1619 le32_to_cpu(filter), ret);
1570} 1620}
1571 1621
1622#ifdef DEBUG
1623static void debug_print_pmkids(struct usbnet *usbdev,
1624 struct ndis_80211_pmkid *pmkids,
1625 const char *func_str)
1626{
1627 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1628 int i, len, count, max_pmkids, entry_len;
1629
1630 max_pmkids = priv->wdev.wiphy->max_num_pmkids;
1631 len = le32_to_cpu(pmkids->length);
1632 count = le32_to_cpu(pmkids->bssid_info_count);
1633
1634 entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
1635
1636 netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
1637 "%d)\n", func_str, count, len, entry_len);
1638
1639 if (count > max_pmkids)
1640 count = max_pmkids;
1641
1642 for (i = 0; i < count; i++) {
1643 u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
1644
1645 netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
1646 "pmkid: %08X:%08X:%08X:%08X\n",
1647 func_str, pmkids->bssid_info[i].bssid,
1648 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
1649 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
1650 }
1651}
1652#else
1653static void debug_print_pmkids(struct usbnet *usbdev,
1654 struct ndis_80211_pmkid *pmkids,
1655 const char *func_str)
1656{
1657 return;
1658}
1659#endif
1660
1661static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
1662{
1663 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1664 struct ndis_80211_pmkid *pmkids;
1665 int len, ret, max_pmkids;
1666
1667 max_pmkids = priv->wdev.wiphy->max_num_pmkids;
1668 len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
1669
1670 pmkids = kzalloc(len, GFP_KERNEL);
1671 if (!pmkids)
1672 return ERR_PTR(-ENOMEM);
1673
1674 pmkids->length = cpu_to_le32(len);
1675 pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
1676
1677 ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
1678 if (ret < 0) {
1679 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
1680 " -> %d\n", __func__, len, max_pmkids, ret);
1681
1682 kfree(pmkids);
1683 return ERR_PTR(ret);
1684 }
1685
1686 if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
1687 pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
1688
1689 debug_print_pmkids(usbdev, pmkids, __func__);
1690
1691 return pmkids;
1692}
1693
1694static int set_device_pmkids(struct usbnet *usbdev,
1695 struct ndis_80211_pmkid *pmkids)
1696{
1697 int ret, len, num_pmkids;
1698
1699 num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
1700 len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
1701 pmkids->length = cpu_to_le32(len);
1702
1703 debug_print_pmkids(usbdev, pmkids, __func__);
1704
1705 ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
1706 le32_to_cpu(pmkids->length));
1707 if (ret < 0) {
1708 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
1709 "\n", __func__, len, num_pmkids, ret);
1710 }
1711
1712 kfree(pmkids);
1713 return ret;
1714}
1715
1716static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
1717 struct ndis_80211_pmkid *pmkids,
1718 struct cfg80211_pmksa *pmksa,
1719 int max_pmkids)
1720{
1721 int i, len, count, newlen, err;
1722
1723 len = le32_to_cpu(pmkids->length);
1724 count = le32_to_cpu(pmkids->bssid_info_count);
1725
1726 if (count > max_pmkids)
1727 count = max_pmkids;
1728
1729 for (i = 0; i < count; i++)
1730 if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
1731 pmksa->bssid))
1732 break;
1733
1734 /* pmkid not found */
1735 if (i == count) {
1736 netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
1737 __func__, pmksa->bssid);
1738 err = -ENOENT;
1739 goto error;
1740 }
1741
1742 for (; i + 1 < count; i++)
1743 pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
1744
1745 count--;
1746 newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
1747
1748 pmkids->length = cpu_to_le32(newlen);
1749 pmkids->bssid_info_count = cpu_to_le32(count);
1750
1751 return pmkids;
1752error:
1753 kfree(pmkids);
1754 return ERR_PTR(err);
1755}
1756
1757static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
1758 struct ndis_80211_pmkid *pmkids,
1759 struct cfg80211_pmksa *pmksa,
1760 int max_pmkids)
1761{
1762 int i, err, len, count, newlen;
1763
1764 len = le32_to_cpu(pmkids->length);
1765 count = le32_to_cpu(pmkids->bssid_info_count);
1766
1767 if (count > max_pmkids)
1768 count = max_pmkids;
1769
1770 /* update with new pmkid */
1771 for (i = 0; i < count; i++) {
1772 if (compare_ether_addr(pmkids->bssid_info[i].bssid,
1773 pmksa->bssid))
1774 continue;
1775
1776 memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
1777 WLAN_PMKID_LEN);
1778
1779 return pmkids;
1780 }
1781
1782 /* out of space, return error */
1783 if (i == max_pmkids) {
1784 netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
1785 err = -ENOSPC;
1786 goto error;
1787 }
1788
1789 /* add new pmkid */
1790 newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
1791
1792 pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
1793 if (!pmkids) {
1794 err = -ENOMEM;
1795 goto error;
1796 }
1797
1798 pmkids->length = cpu_to_le32(newlen);
1799 pmkids->bssid_info_count = cpu_to_le32(count + 1);
1800
1801 memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
1802 memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
1803
1804 return pmkids;
1805error:
1806 kfree(pmkids);
1807 return ERR_PTR(err);
1808}
1809
1572/* 1810/*
1573 * cfg80211 ops 1811 * cfg80211 ops
1574 */ 1812 */
@@ -2053,7 +2291,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
2053 return deauthenticate(usbdev); 2291 return deauthenticate(usbdev);
2054} 2292}
2055 2293
2056static int rndis_set_channel(struct wiphy *wiphy, 2294static int rndis_set_channel(struct wiphy *wiphy, struct net_device *netdev,
2057 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) 2295 struct ieee80211_channel *chan, enum nl80211_channel_type channel_type)
2058{ 2296{
2059 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2297 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
@@ -2179,6 +2417,78 @@ static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
2179 return 0; 2417 return 0;
2180} 2418}
2181 2419
2420static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
2421 struct cfg80211_pmksa *pmksa)
2422{
2423 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2424 struct usbnet *usbdev = priv->usbdev;
2425 struct ndis_80211_pmkid *pmkids;
2426 u32 *tmp = (u32 *)pmksa->pmkid;
2427
2428 netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
2429 pmksa->bssid,
2430 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
2431 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
2432
2433 pmkids = get_device_pmkids(usbdev);
2434 if (IS_ERR(pmkids)) {
2435 /* couldn't read PMKID cache from device */
2436 return PTR_ERR(pmkids);
2437 }
2438
2439 pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
2440 if (IS_ERR(pmkids)) {
2441 /* not found, list full, etc */
2442 return PTR_ERR(pmkids);
2443 }
2444
2445 return set_device_pmkids(usbdev, pmkids);
2446}
2447
2448static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
2449 struct cfg80211_pmksa *pmksa)
2450{
2451 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2452 struct usbnet *usbdev = priv->usbdev;
2453 struct ndis_80211_pmkid *pmkids;
2454 u32 *tmp = (u32 *)pmksa->pmkid;
2455
2456 netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
2457 pmksa->bssid,
2458 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
2459 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
2460
2461 pmkids = get_device_pmkids(usbdev);
2462 if (IS_ERR(pmkids)) {
2463 /* Couldn't read PMKID cache from device */
2464 return PTR_ERR(pmkids);
2465 }
2466
2467 pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
2468 if (IS_ERR(pmkids)) {
2469 /* not found, etc */
2470 return PTR_ERR(pmkids);
2471 }
2472
2473 return set_device_pmkids(usbdev, pmkids);
2474}
2475
2476static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2477{
2478 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2479 struct usbnet *usbdev = priv->usbdev;
2480 struct ndis_80211_pmkid pmkid;
2481
2482 netdev_dbg(usbdev->net, "%s()\n", __func__);
2483
2484 memset(&pmkid, 0, sizeof(pmkid));
2485
2486 pmkid.length = cpu_to_le32(sizeof(pmkid));
2487 pmkid.bssid_info_count = cpu_to_le32(0);
2488
2489 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
2490}
2491
2182/* 2492/*
2183 * workers, indication handlers, device poller 2493 * workers, indication handlers, device poller
2184 */ 2494 */
@@ -2523,12 +2833,14 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
2523 } 2833 }
2524} 2834}
2525 2835
2526static int rndis_wlan_get_caps(struct usbnet *usbdev) 2836static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
2527{ 2837{
2528 struct { 2838 struct {
2529 __le32 num_items; 2839 __le32 num_items;
2530 __le32 items[8]; 2840 __le32 items[8];
2531 } networks_supported; 2841 } networks_supported;
2842 struct ndis_80211_capability *caps;
2843 u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
2532 int len, retval, i, n; 2844 int len, retval, i, n;
2533 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2845 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2534 2846
@@ -2556,6 +2868,21 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev)
2556 } 2868 }
2557 } 2869 }
2558 2870
2871 /* get device 802.11 capabilities, number of PMKIDs */
2872 caps = (struct ndis_80211_capability *)caps_buf;
2873 len = sizeof(caps_buf);
2874 retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
2875 if (retval >= 0) {
2876 netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
2877 "ver %d, pmkids %d, auth-encr-pairs %d\n",
2878 le32_to_cpu(caps->length),
2879 le32_to_cpu(caps->version),
2880 le32_to_cpu(caps->num_pmkids),
2881 le32_to_cpu(caps->num_auth_encr_pair));
2882 wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
2883 } else
2884 wiphy->max_num_pmkids = 0;
2885
2559 return retval; 2886 return retval;
2560} 2887}
2561 2888
@@ -2803,7 +3130,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
2803 wiphy->max_scan_ssids = 1; 3130 wiphy->max_scan_ssids = 1;
2804 3131
2805 /* TODO: fill-out band/encr information based on priv->caps */ 3132 /* TODO: fill-out band/encr information based on priv->caps */
2806 rndis_wlan_get_caps(usbdev); 3133 rndis_wlan_get_caps(usbdev, wiphy);
2807 3134
2808 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); 3135 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
2809 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); 3136 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2863,9 +3190,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
2863 flush_workqueue(priv->workqueue); 3190 flush_workqueue(priv->workqueue);
2864 destroy_workqueue(priv->workqueue); 3191 destroy_workqueue(priv->workqueue);
2865 3192
2866 if (priv && priv->wpa_ie_len)
2867 kfree(priv->wpa_ie);
2868
2869 rndis_unbind(usbdev, intf); 3193 rndis_unbind(usbdev, intf);
2870 3194
2871 wiphy_unregister(priv->wdev.wiphy); 3195 wiphy_unregister(priv->wdev.wiphy);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 5239e082cd0f..eea1ef2f502b 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -87,7 +87,7 @@ if RT2800PCI
87 87
88config RT2800PCI_RT30XX 88config RT2800PCI_RT30XX
89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices" 89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
90 default n 90 default y
91 ---help--- 91 ---help---
92 This adds support for rt30xx wireless chipset family to the 92 This adds support for rt30xx wireless chipset family to the
93 rt2800pci driver. 93 rt2800pci driver.
@@ -156,7 +156,7 @@ if RT2800USB
156 156
157config RT2800USB_RT30XX 157config RT2800USB_RT30XX
158 bool "rt2800usb - Include support for rt30xx (USB) devices" 158 bool "rt2800usb - Include support for rt30xx (USB) devices"
159 default n 159 default y
160 ---help--- 160 ---help---
161 This adds support for rt30xx wireless chipset family to the 161 This adds support for rt30xx wireless chipset family to the
162 rt2800usb driver. 162 rt2800usb driver.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5f5204b82891..4ba7b038928f 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -526,6 +526,10 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
526 526
527 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 527 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
528 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 528 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
529 } else {
530 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
531 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
532 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
529 } 533 }
530 534
531 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 535 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1003,19 +1007,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1003{ 1007{
1004 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1008 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1005 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1009 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1006 __le32 *txd = skbdesc->desc; 1010 __le32 *txd = entry_priv->desc;
1007 u32 word; 1011 u32 word;
1008 1012
1009 /* 1013 /*
1010 * Start writing the descriptor words. 1014 * Start writing the descriptor words.
1011 */ 1015 */
1012 rt2x00_desc_read(entry_priv->desc, 1, &word); 1016 rt2x00_desc_read(txd, 1, &word);
1013 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1017 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
1014 rt2x00_desc_write(entry_priv->desc, 1, word); 1018 rt2x00_desc_write(txd, 1, word);
1015 1019
1016 rt2x00_desc_read(txd, 2, &word); 1020 rt2x00_desc_read(txd, 2, &word);
1017 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, skb->len); 1021 rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length);
1018 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, skb->len); 1022 rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length);
1019 rt2x00_desc_write(txd, 2, word); 1023 rt2x00_desc_write(txd, 2, word);
1020 1024
1021 rt2x00_desc_read(txd, 3, &word); 1025 rt2x00_desc_read(txd, 3, &word);
@@ -1036,6 +1040,11 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1036 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); 1040 rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1);
1037 rt2x00_desc_write(txd, 4, word); 1041 rt2x00_desc_write(txd, 4, word);
1038 1042
1043 /*
1044 * Writing TXD word 0 must the last to prevent a race condition with
1045 * the device, whereby the device may take hold of the TXD before we
1046 * finished updating it.
1047 */
1039 rt2x00_desc_read(txd, 0, &word); 1048 rt2x00_desc_read(txd, 0, &word);
1040 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1049 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1041 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1050 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1051,12 +1060,19 @@ static void rt2400pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1051 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1060 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1052 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1061 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1053 rt2x00_desc_write(txd, 0, word); 1062 rt2x00_desc_write(txd, 0, word);
1063
1064 /*
1065 * Register descriptor details in skb frame descriptor.
1066 */
1067 skbdesc->desc = txd;
1068 skbdesc->desc_len = TXD_DESC_SIZE;
1054} 1069}
1055 1070
1056/* 1071/*
1057 * TX data initialization 1072 * TX data initialization
1058 */ 1073 */
1059static void rt2400pci_write_beacon(struct queue_entry *entry) 1074static void rt2400pci_write_beacon(struct queue_entry *entry,
1075 struct txentry_desc *txdesc)
1060{ 1076{
1061 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1077 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1062 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1078 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1072,20 +1088,19 @@ static void rt2400pci_write_beacon(struct queue_entry *entry)
1072 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1088 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1073 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1089 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1074 1090
1075 /*
1076 * Replace rt2x00lib allocated descriptor with the
1077 * pointer to the _real_ hardware descriptor.
1078 * After that, map the beacon to DMA and update the
1079 * descriptor.
1080 */
1081 memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
1082 skbdesc->desc = entry_priv->desc;
1083
1084 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1091 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
1085 1092
1086 rt2x00_desc_read(entry_priv->desc, 1, &word); 1093 rt2x00_desc_read(entry_priv->desc, 1, &word);
1087 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1094 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
1088 rt2x00_desc_write(entry_priv->desc, 1, word); 1095 rt2x00_desc_write(entry_priv->desc, 1, word);
1096
1097 /*
1098 * Enable beaconing again.
1099 */
1100 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1101 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1102 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1103 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1089} 1104}
1090 1105
1091static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1106static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1093,17 +1108,6 @@ static void rt2400pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1093{ 1108{
1094 u32 reg; 1109 u32 reg;
1095 1110
1096 if (queue == QID_BEACON) {
1097 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1098 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1099 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1100 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1101 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1102 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1103 }
1104 return;
1105 }
1106
1107 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1111 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1108 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1112 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1109 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1113 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2a73f593aab0..89d132d4af12 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -574,6 +574,10 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
574 574
575 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 575 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
576 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 576 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
577 } else {
578 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
579 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
580 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
577 } 581 }
578 582
579 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 583 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1161,15 +1165,15 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1161{ 1165{
1162 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1166 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1163 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data; 1167 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1164 __le32 *txd = skbdesc->desc; 1168 __le32 *txd = entry_priv->desc;
1165 u32 word; 1169 u32 word;
1166 1170
1167 /* 1171 /*
1168 * Start writing the descriptor words. 1172 * Start writing the descriptor words.
1169 */ 1173 */
1170 rt2x00_desc_read(entry_priv->desc, 1, &word); 1174 rt2x00_desc_read(txd, 1, &word);
1171 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1175 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
1172 rt2x00_desc_write(entry_priv->desc, 1, word); 1176 rt2x00_desc_write(txd, 1, word);
1173 1177
1174 rt2x00_desc_read(txd, 2, &word); 1178 rt2x00_desc_read(txd, 2, &word);
1175 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1179 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
@@ -1190,6 +1194,11 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1190 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); 1194 test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags));
1191 rt2x00_desc_write(txd, 10, word); 1195 rt2x00_desc_write(txd, 10, word);
1192 1196
1197 /*
1198 * Writing TXD word 0 must the last to prevent a race condition with
1199 * the device, whereby the device may take hold of the TXD before we
1200 * finished updating it.
1201 */
1193 rt2x00_desc_read(txd, 0, &word); 1202 rt2x00_desc_read(txd, 0, &word);
1194 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1203 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1195 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1204 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1205,15 +1214,22 @@ static void rt2500pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1205 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs); 1214 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1206 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, 1215 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1207 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); 1216 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1208 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1217 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1209 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); 1218 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE);
1210 rt2x00_desc_write(txd, 0, word); 1219 rt2x00_desc_write(txd, 0, word);
1220
1221 /*
1222 * Register descriptor details in skb frame descriptor.
1223 */
1224 skbdesc->desc = txd;
1225 skbdesc->desc_len = TXD_DESC_SIZE;
1211} 1226}
1212 1227
1213/* 1228/*
1214 * TX data initialization 1229 * TX data initialization
1215 */ 1230 */
1216static void rt2500pci_write_beacon(struct queue_entry *entry) 1231static void rt2500pci_write_beacon(struct queue_entry *entry,
1232 struct txentry_desc *txdesc)
1217{ 1233{
1218 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1234 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1219 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1235 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
@@ -1229,20 +1245,19 @@ static void rt2500pci_write_beacon(struct queue_entry *entry)
1229 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); 1245 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
1230 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1246 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1231 1247
1232 /*
1233 * Replace rt2x00lib allocated descriptor with the
1234 * pointer to the _real_ hardware descriptor.
1235 * After that, map the beacon to DMA and update the
1236 * descriptor.
1237 */
1238 memcpy(entry_priv->desc, skbdesc->desc, skbdesc->desc_len);
1239 skbdesc->desc = entry_priv->desc;
1240
1241 rt2x00queue_map_txskb(rt2x00dev, entry->skb); 1248 rt2x00queue_map_txskb(rt2x00dev, entry->skb);
1242 1249
1243 rt2x00_desc_read(entry_priv->desc, 1, &word); 1250 rt2x00_desc_read(entry_priv->desc, 1, &word);
1244 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); 1251 rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma);
1245 rt2x00_desc_write(entry_priv->desc, 1, word); 1252 rt2x00_desc_write(entry_priv->desc, 1, word);
1253
1254 /*
1255 * Enable beaconing again.
1256 */
1257 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1258 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1259 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1260 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1246} 1261}
1247 1262
1248static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev, 1263static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
@@ -1250,17 +1265,6 @@ static void rt2500pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1250{ 1265{
1251 u32 reg; 1266 u32 reg;
1252 1267
1253 if (queue == QID_BEACON) {
1254 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
1255 if (!rt2x00_get_field32(reg, CSR14_BEACON_GEN)) {
1256 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
1257 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
1258 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
1259 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1260 }
1261 return;
1262 }
1263
1264 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg); 1268 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1265 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE)); 1269 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue == QID_AC_BE));
1266 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK)); 1270 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue == QID_AC_BK));
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 8ebb705fe106..9ae96a626e6d 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -649,6 +649,10 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
649 649
650 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); 650 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
651 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); 651 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
652 } else {
653 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
654 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
655 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
652 } 656 }
653 657
654 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 658 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1030,12 +1034,30 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1030 struct txentry_desc *txdesc) 1034 struct txentry_desc *txdesc)
1031{ 1035{
1032 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1036 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1033 __le32 *txd = skbdesc->desc; 1037 __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
1034 u32 word; 1038 u32 word;
1035 1039
1036 /* 1040 /*
1037 * Start writing the descriptor words. 1041 * Start writing the descriptor words.
1038 */ 1042 */
1043 rt2x00_desc_read(txd, 0, &word);
1044 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit);
1045 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1046 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1047 rt2x00_set_field32(&word, TXD_W0_ACK,
1048 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1049 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1050 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1051 rt2x00_set_field32(&word, TXD_W0_OFDM,
1052 (txdesc->rate_mode == RATE_MODE_OFDM));
1053 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1054 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1055 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1056 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1057 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
1058 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
1059 rt2x00_desc_write(txd, 0, word);
1060
1039 rt2x00_desc_read(txd, 1, &word); 1061 rt2x00_desc_read(txd, 1, &word);
1040 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1062 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1041 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs); 1063 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs);
@@ -1055,23 +1077,11 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1055 _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); 1077 _rt2x00_desc_write(txd, 4, skbdesc->iv[1]);
1056 } 1078 }
1057 1079
1058 rt2x00_desc_read(txd, 0, &word); 1080 /*
1059 rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); 1081 * Register descriptor details in skb frame descriptor.
1060 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1082 */
1061 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 1083 skbdesc->desc = txd;
1062 rt2x00_set_field32(&word, TXD_W0_ACK, 1084 skbdesc->desc_len = TXD_DESC_SIZE;
1063 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1064 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1065 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1066 rt2x00_set_field32(&word, TXD_W0_OFDM,
1067 (txdesc->rate_mode == RATE_MODE_OFDM));
1068 rt2x00_set_field32(&word, TXD_W0_NEW_SEQ,
1069 test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags));
1070 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1071 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1072 rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher);
1073 rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx);
1074 rt2x00_desc_write(txd, 0, word);
1075} 1085}
1076 1086
1077/* 1087/*
@@ -1079,22 +1089,15 @@ static void rt2500usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1079 */ 1089 */
1080static void rt2500usb_beacondone(struct urb *urb); 1090static void rt2500usb_beacondone(struct urb *urb);
1081 1091
1082static void rt2500usb_write_beacon(struct queue_entry *entry) 1092static void rt2500usb_write_beacon(struct queue_entry *entry,
1093 struct txentry_desc *txdesc)
1083{ 1094{
1084 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1095 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1085 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 1096 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
1086 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; 1097 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
1087 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1088 int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); 1098 int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint);
1089 int length; 1099 int length;
1090 u16 reg; 1100 u16 reg, reg0;
1091
1092 /*
1093 * Add the descriptor in front of the skb.
1094 */
1095 skb_push(entry->skb, entry->queue->desc_size);
1096 memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
1097 skbdesc->desc = entry->skb->data;
1098 1101
1099 /* 1102 /*
1100 * Disable beaconing while we are reloading the beacon data, 1103 * Disable beaconing while we are reloading the beacon data,
@@ -1105,6 +1108,11 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1105 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); 1108 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1106 1109
1107 /* 1110 /*
1111 * Take the descriptor in front of the skb into account.
1112 */
1113 skb_push(entry->skb, TXD_DESC_SIZE);
1114
1115 /*
1108 * USB devices cannot blindly pass the skb->len as the 1116 * USB devices cannot blindly pass the skb->len as the
1109 * length of the data to usb_fill_bulk_urb. Pass the skb 1117 * length of the data to usb_fill_bulk_urb. Pass the skb
1110 * to the driver to determine what the length should be. 1118 * to the driver to determine what the length should be.
@@ -1129,6 +1137,26 @@ static void rt2500usb_write_beacon(struct queue_entry *entry)
1129 * Send out the guardian byte. 1137 * Send out the guardian byte.
1130 */ 1138 */
1131 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); 1139 usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC);
1140
1141 /*
1142 * Enable beaconing again.
1143 */
1144 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
1145 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
1146 reg0 = reg;
1147 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
1148 /*
1149 * Beacon generation will fail initially.
1150 * To prevent this we need to change the TXRX_CSR19
1151 * register several times (reg0 is the same as reg
1152 * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
1153 * and 1 in reg).
1154 */
1155 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1156 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
1157 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1158 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
1159 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1132} 1160}
1133 1161
1134static int rt2500usb_get_tx_data_len(struct queue_entry *entry) 1162static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
@@ -1145,37 +1173,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
1145 return length; 1173 return length;
1146} 1174}
1147 1175
1148static void rt2500usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1149 const enum data_queue_qid queue)
1150{
1151 u16 reg, reg0;
1152
1153 if (queue != QID_BEACON) {
1154 rt2x00usb_kick_tx_queue(rt2x00dev, queue);
1155 return;
1156 }
1157
1158 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
1159 if (!rt2x00_get_field16(reg, TXRX_CSR19_BEACON_GEN)) {
1160 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
1161 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
1162 reg0 = reg;
1163 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
1164 /*
1165 * Beacon generation will fail initially.
1166 * To prevent this we need to change the TXRX_CSR19
1167 * register several times (reg0 is the same as reg
1168 * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0
1169 * and 1 in reg).
1170 */
1171 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1172 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
1173 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1174 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0);
1175 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
1176 }
1177}
1178
1179/* 1176/*
1180 * RX control handlers 1177 * RX control handlers
1181 */ 1178 */
@@ -1210,11 +1207,9 @@ static void rt2500usb_fill_rxdone(struct queue_entry *entry,
1210 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) 1207 if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR))
1211 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; 1208 rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC;
1212 1209
1213 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 1210 rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER);
1214 rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); 1211 if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR))
1215 if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) 1212 rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
1216 rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY;
1217 }
1218 1213
1219 if (rxdesc->cipher != CIPHER_NONE) { 1214 if (rxdesc->cipher != CIPHER_NONE) {
1220 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); 1215 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -1644,11 +1639,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1644 unsigned int i; 1639 unsigned int i;
1645 1640
1646 /* 1641 /*
1647 * Disable powersaving as default.
1648 */
1649 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1650
1651 /*
1652 * Initialize all hw fields. 1642 * Initialize all hw fields.
1653 */ 1643 */
1654 rt2x00dev->hw->flags = 1644 rt2x00dev->hw->flags =
@@ -1781,7 +1771,7 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1781 .write_tx_data = rt2x00usb_write_tx_data, 1771 .write_tx_data = rt2x00usb_write_tx_data,
1782 .write_beacon = rt2500usb_write_beacon, 1772 .write_beacon = rt2500usb_write_beacon,
1783 .get_tx_data_len = rt2500usb_get_tx_data_len, 1773 .get_tx_data_len = rt2500usb_get_tx_data_len,
1784 .kick_tx_queue = rt2500usb_kick_tx_queue, 1774 .kick_tx_queue = rt2x00usb_kick_tx_queue,
1785 .kill_tx_queue = rt2x00usb_kill_tx_queue, 1775 .kill_tx_queue = rt2x00usb_kill_tx_queue,
1786 .fill_rxdone = rt2500usb_fill_rxdone, 1776 .fill_rxdone = rt2500usb_fill_rxdone,
1787 .config_shared_key = rt2500usb_config_key, 1777 .config_shared_key = rt2500usb_config_key,
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 74c0433dba37..2aa03751c341 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -56,15 +56,20 @@
56#define RF3021 0x0007 56#define RF3021 0x0007
57#define RF3022 0x0008 57#define RF3022 0x0008
58#define RF3052 0x0009 58#define RF3052 0x0009
59#define RF3320 0x000b
59 60
60/* 61/*
61 * Chipset version. 62 * Chipset revisions.
62 */ 63 */
63#define RT2860C_VERSION 0x0100 64#define REV_RT2860C 0x0100
64#define RT2860D_VERSION 0x0101 65#define REV_RT2860D 0x0101
65#define RT2880E_VERSION 0x0200 66#define REV_RT2870D 0x0101
66#define RT2883_VERSION 0x0300 67#define REV_RT2872E 0x0200
67#define RT3070_VERSION 0x0200 68#define REV_RT3070E 0x0200
69#define REV_RT3070F 0x0201
70#define REV_RT3071E 0x0211
71#define REV_RT3090E 0x0211
72#define REV_RT3390E 0x0211
68 73
69/* 74/*
70 * Signal information. 75 * Signal information.
@@ -90,13 +95,19 @@
90#define NUM_TX_QUEUES 4 95#define NUM_TX_QUEUES 4
91 96
92/* 97/*
93 * USB registers. 98 * Registers.
94 */ 99 */
95 100
96/* 101/*
102 * OPT_14: Unknown register used by rt3xxx devices.
103 */
104#define OPT_14_CSR 0x0114
105#define OPT_14_CSR_BIT0 FIELD32(0x00000001)
106
107/*
97 * INT_SOURCE_CSR: Interrupt source register. 108 * INT_SOURCE_CSR: Interrupt source register.
98 * Write one to clear corresponding bit. 109 * Write one to clear corresponding bit.
99 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c 110 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read TX_STA_FIFO
100 */ 111 */
101#define INT_SOURCE_CSR 0x0200 112#define INT_SOURCE_CSR 0x0200
102#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001) 113#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
@@ -398,6 +409,31 @@
398#define EFUSE_DATA3 0x059c 409#define EFUSE_DATA3 0x059c
399 410
400/* 411/*
412 * LDO_CFG0
413 */
414#define LDO_CFG0 0x05d4
415#define LDO_CFG0_DELAY3 FIELD32(0x000000ff)
416#define LDO_CFG0_DELAY2 FIELD32(0x0000ff00)
417#define LDO_CFG0_DELAY1 FIELD32(0x00ff0000)
418#define LDO_CFG0_BGSEL FIELD32(0x03000000)
419#define LDO_CFG0_LDO_CORE_VLEVEL FIELD32(0x1c000000)
420#define LD0_CFG0_LDO25_LEVEL FIELD32(0x60000000)
421#define LDO_CFG0_LDO25_LARGEA FIELD32(0x80000000)
422
423/*
424 * GPIO_SWITCH
425 */
426#define GPIO_SWITCH 0x05dc
427#define GPIO_SWITCH_0 FIELD32(0x00000001)
428#define GPIO_SWITCH_1 FIELD32(0x00000002)
429#define GPIO_SWITCH_2 FIELD32(0x00000004)
430#define GPIO_SWITCH_3 FIELD32(0x00000008)
431#define GPIO_SWITCH_4 FIELD32(0x00000010)
432#define GPIO_SWITCH_5 FIELD32(0x00000020)
433#define GPIO_SWITCH_6 FIELD32(0x00000040)
434#define GPIO_SWITCH_7 FIELD32(0x00000080)
435
436/*
401 * MAC Control/Status Registers(CSR). 437 * MAC Control/Status Registers(CSR).
402 * Some values are set in TU, whereas 1 TU == 1024 us. 438 * Some values are set in TU, whereas 1 TU == 1024 us.
403 */ 439 */
@@ -809,7 +845,7 @@
809 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz 845 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
810 */ 846 */
811#define TX_BAND_CFG 0x132c 847#define TX_BAND_CFG 0x132c
812#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001) 848#define TX_BAND_CFG_HT40_MINUS FIELD32(0x00000001)
813#define TX_BAND_CFG_A FIELD32(0x00000002) 849#define TX_BAND_CFG_A FIELD32(0x00000002)
814#define TX_BAND_CFG_BG FIELD32(0x00000004) 850#define TX_BAND_CFG_BG FIELD32(0x00000004)
815 851
@@ -1483,7 +1519,7 @@ struct mac_iveiv_entry {
1483 * BBP 3: RX Antenna 1519 * BBP 3: RX Antenna
1484 */ 1520 */
1485#define BBP3_RX_ANTENNA FIELD8(0x18) 1521#define BBP3_RX_ANTENNA FIELD8(0x18)
1486#define BBP3_HT40_PLUS FIELD8(0x20) 1522#define BBP3_HT40_MINUS FIELD8(0x20)
1487 1523
1488/* 1524/*
1489 * BBP 4: Bandwidth 1525 * BBP 4: Bandwidth
@@ -1492,14 +1528,32 @@ struct mac_iveiv_entry {
1492#define BBP4_BANDWIDTH FIELD8(0x18) 1528#define BBP4_BANDWIDTH FIELD8(0x18)
1493 1529
1494/* 1530/*
1531 * BBP 138: Unknown
1532 */
1533#define BBP138_RX_ADC1 FIELD8(0x02)
1534#define BBP138_RX_ADC2 FIELD8(0x04)
1535#define BBP138_TX_DAC1 FIELD8(0x20)
1536#define BBP138_TX_DAC2 FIELD8(0x40)
1537
1538/*
1495 * RFCSR registers 1539 * RFCSR registers
1496 * The wordsize of the RFCSR is 8 bits. 1540 * The wordsize of the RFCSR is 8 bits.
1497 */ 1541 */
1498 1542
1499/* 1543/*
1544 * RFCSR 1:
1545 */
1546#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
1547#define RFCSR1_RX0_PD FIELD8(0x04)
1548#define RFCSR1_TX0_PD FIELD8(0x08)
1549#define RFCSR1_RX1_PD FIELD8(0x10)
1550#define RFCSR1_TX1_PD FIELD8(0x20)
1551
1552/*
1500 * RFCSR 6: 1553 * RFCSR 6:
1501 */ 1554 */
1502#define RFCSR6_R FIELD8(0x03) 1555#define RFCSR6_R1 FIELD8(0x03)
1556#define RFCSR6_R2 FIELD8(0x40)
1503 1557
1504/* 1558/*
1505 * RFCSR 7: 1559 * RFCSR 7:
@@ -1512,6 +1566,33 @@ struct mac_iveiv_entry {
1512#define RFCSR12_TX_POWER FIELD8(0x1f) 1566#define RFCSR12_TX_POWER FIELD8(0x1f)
1513 1567
1514/* 1568/*
1569 * RFCSR 13:
1570 */
1571#define RFCSR13_TX_POWER FIELD8(0x1f)
1572
1573/*
1574 * RFCSR 15:
1575 */
1576#define RFCSR15_TX_LO2_EN FIELD8(0x08)
1577
1578/*
1579 * RFCSR 17:
1580 */
1581#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
1582#define RFCSR17_TX_LO1_EN FIELD8(0x08)
1583#define RFCSR17_R FIELD8(0x20)
1584
1585/*
1586 * RFCSR 20:
1587 */
1588#define RFCSR20_RX_LO1_EN FIELD8(0x08)
1589
1590/*
1591 * RFCSR 21:
1592 */
1593#define RFCSR21_RX_LO2_EN FIELD8(0x08)
1594
1595/*
1515 * RFCSR 22: 1596 * RFCSR 22:
1516 */ 1597 */
1517#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01) 1598#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
@@ -1522,6 +1603,14 @@ struct mac_iveiv_entry {
1522#define RFCSR23_FREQ_OFFSET FIELD8(0x7f) 1603#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1523 1604
1524/* 1605/*
1606 * RFCSR 27:
1607 */
1608#define RFCSR27_R1 FIELD8(0x03)
1609#define RFCSR27_R2 FIELD8(0x04)
1610#define RFCSR27_R3 FIELD8(0x30)
1611#define RFCSR27_R4 FIELD8(0x40)
1612
1613/*
1525 * RFCSR 30: 1614 * RFCSR 30:
1526 */ 1615 */
1527#define RFCSR30_RF_CALIBRATION FIELD8(0x80) 1616#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
@@ -1603,6 +1692,8 @@ struct mac_iveiv_entry {
1603#define EEPROM_NIC_WPS_PBC FIELD16(0x0080) 1692#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1604#define EEPROM_NIC_BW40M_BG FIELD16(0x0100) 1693#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1605#define EEPROM_NIC_BW40M_A FIELD16(0x0200) 1694#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1695#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
1696#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
1606 1697
1607/* 1698/*
1608 * EEPROM frequency 1699 * EEPROM frequency
@@ -1659,6 +1750,12 @@ struct mac_iveiv_entry {
1659#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00) 1750#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1660 1751
1661/* 1752/*
1753 * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
1754 */
1755#define EEPROM_TXMIXER_GAIN_BG 0x0024
1756#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
1757
1758/*
1662 * EEPROM RSSI A offset 1759 * EEPROM RSSI A offset
1663 */ 1760 */
1664#define EEPROM_RSSI_A 0x0025 1761#define EEPROM_RSSI_A 0x0025
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c015ce9fdd09..db4250d1c8b3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -41,9 +41,6 @@
41#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE) 41#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
42#include "rt2x00usb.h" 42#include "rt2x00usb.h"
43#endif 43#endif
44#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
45#include "rt2x00pci.h"
46#endif
47#include "rt2800lib.h" 44#include "rt2800lib.h"
48#include "rt2800.h" 45#include "rt2800.h"
49#include "rt2800usb.h" 46#include "rt2800usb.h"
@@ -76,6 +73,23 @@ MODULE_LICENSE("GPL");
76 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \ 73 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
77 H2M_MAILBOX_CSR_OWNER, (__reg)) 74 H2M_MAILBOX_CSR_OWNER, (__reg))
78 75
76static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev)
77{
78 /* check for rt2872 on SoC */
79 if (!rt2x00_is_soc(rt2x00dev) ||
80 !rt2x00_rt(rt2x00dev, RT2872))
81 return false;
82
83 /* we know for sure that these rf chipsets are used on rt305x boards */
84 if (rt2x00_rf(rt2x00dev, RF3020) ||
85 rt2x00_rf(rt2x00dev, RF3021) ||
86 rt2x00_rf(rt2x00dev, RF3022))
87 return true;
88
89 NOTICE(rt2x00dev, "Unknown RF chipset on rt305x\n");
90 return false;
91}
92
79static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, 93static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
80 const unsigned int word, const u8 value) 94 const unsigned int word, const u8 value)
81{ 95{
@@ -268,6 +282,104 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
268} 282}
269EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); 283EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
270 284
285void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc)
286{
287 __le32 *txwi = (__le32 *)(skb->data - TXWI_DESC_SIZE);
288 u32 word;
289
290 /*
291 * Initialize TX Info descriptor
292 */
293 rt2x00_desc_read(txwi, 0, &word);
294 rt2x00_set_field32(&word, TXWI_W0_FRAG,
295 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
296 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
297 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
298 rt2x00_set_field32(&word, TXWI_W0_TS,
299 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
300 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
301 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
302 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
303 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->txop);
304 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
305 rt2x00_set_field32(&word, TXWI_W0_BW,
306 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
307 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
308 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
309 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
310 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
311 rt2x00_desc_write(txwi, 0, word);
312
313 rt2x00_desc_read(txwi, 1, &word);
314 rt2x00_set_field32(&word, TXWI_W1_ACK,
315 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
316 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
317 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
318 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
319 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
320 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
321 txdesc->key_idx : 0xff);
322 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
323 txdesc->length);
324 rt2x00_set_field32(&word, TXWI_W1_PACKETID, txdesc->queue + 1);
325 rt2x00_desc_write(txwi, 1, word);
326
327 /*
328 * Always write 0 to IV/EIV fields, hardware will insert the IV
329 * from the IVEIV register when TXD_W3_WIV is set to 0.
330 * When TXD_W3_WIV is set to 1 it will use the IV data
331 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
332 * crypto entry in the registers should be used to encrypt the frame.
333 */
334 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
335 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
336}
337EXPORT_SYMBOL_GPL(rt2800_write_txwi);
338
339void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *rxdesc)
340{
341 __le32 *rxwi = (__le32 *) skb->data;
342 u32 word;
343
344 rt2x00_desc_read(rxwi, 0, &word);
345
346 rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF);
347 rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
348
349 rt2x00_desc_read(rxwi, 1, &word);
350
351 if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI))
352 rxdesc->flags |= RX_FLAG_SHORT_GI;
353
354 if (rt2x00_get_field32(word, RXWI_W1_BW))
355 rxdesc->flags |= RX_FLAG_40MHZ;
356
357 /*
358 * Detect RX rate, always use MCS as signal type.
359 */
360 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
361 rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS);
362 rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE);
363
364 /*
365 * Mask of 0x8 bit to remove the short preamble flag.
366 */
367 if (rxdesc->rate_mode == RATE_MODE_CCK)
368 rxdesc->signal &= ~0x8;
369
370 rt2x00_desc_read(rxwi, 2, &word);
371
372 rxdesc->rssi =
373 (rt2x00_get_field32(word, RXWI_W2_RSSI0) +
374 rt2x00_get_field32(word, RXWI_W2_RSSI1)) / 2;
375
376 /*
377 * Remove RXWI descriptor from start of buffer.
378 */
379 skb_pull(skb, RXWI_DESC_SIZE);
380}
381EXPORT_SYMBOL_GPL(rt2800_process_rxwi);
382
271#ifdef CONFIG_RT2X00_LIB_DEBUGFS 383#ifdef CONFIG_RT2X00_LIB_DEBUGFS
272const struct rt2x00debug rt2800_rt2x00debug = { 384const struct rt2x00debug rt2800_rt2x00debug = {
273 .owner = THIS_MODULE, 385 .owner = THIS_MODULE,
@@ -360,11 +472,6 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
360 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg); 472 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
361 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on); 473 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
362 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off); 474 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
363 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
364 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
365 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
366 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
367 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
368 rt2800_register_write(led->rt2x00dev, LED_CFG, reg); 475 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
369 476
370 return 0; 477 return 0;
@@ -610,10 +717,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
610{ 717{
611 u32 reg; 718 u32 reg;
612 719
613 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
614 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
615 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
616
617 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 720 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
618 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 721 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
619 !!erp->short_preamble); 722 !!erp->short_preamble);
@@ -632,15 +735,10 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
632 735
633 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg); 736 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
634 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); 737 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
635 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
636 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 738 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
637 739
638 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg); 740 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
639 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
640 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
641 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
642 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs); 741 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
643 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
644 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); 742 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
645 743
646 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 744 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
@@ -718,10 +816,10 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
718 rt2x00dev->lna_gain = lna_gain; 816 rt2x00dev->lna_gain = lna_gain;
719} 817}
720 818
721static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev, 819static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
722 struct ieee80211_conf *conf, 820 struct ieee80211_conf *conf,
723 struct rf_channel *rf, 821 struct rf_channel *rf,
724 struct channel_info *info) 822 struct channel_info *info)
725{ 823{
726 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 824 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
727 825
@@ -787,10 +885,10 @@ static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
787 rt2800_rf_write(rt2x00dev, 4, rf->rf4); 885 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
788} 886}
789 887
790static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, 888static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
791 struct ieee80211_conf *conf, 889 struct ieee80211_conf *conf,
792 struct rf_channel *rf, 890 struct rf_channel *rf,
793 struct channel_info *info) 891 struct channel_info *info)
794{ 892{
795 u8 rfcsr; 893 u8 rfcsr;
796 894
@@ -798,7 +896,7 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
798 rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3); 896 rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
799 897
800 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); 898 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
801 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2); 899 rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
802 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 900 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
803 901
804 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); 902 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
@@ -806,6 +904,11 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
806 TXPOWER_G_TO_DEV(info->tx_power1)); 904 TXPOWER_G_TO_DEV(info->tx_power1));
807 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); 905 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
808 906
907 rt2800_rfcsr_read(rt2x00dev, 13, &rfcsr);
908 rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER,
909 TXPOWER_G_TO_DEV(info->tx_power2));
910 rt2800_rfcsr_write(rt2x00dev, 13, rfcsr);
911
809 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr); 912 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
810 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); 913 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
811 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); 914 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
@@ -827,15 +930,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
827 unsigned int tx_pin; 930 unsigned int tx_pin;
828 u8 bbp; 931 u8 bbp;
829 932
830 if ((rt2x00_rt(rt2x00dev, RT3070) || 933 if (rt2x00_rf(rt2x00dev, RF2020) ||
831 rt2x00_rt(rt2x00dev, RT3090)) && 934 rt2x00_rf(rt2x00dev, RF3020) ||
832 (rt2x00_rf(rt2x00dev, RF2020) || 935 rt2x00_rf(rt2x00dev, RF3021) ||
833 rt2x00_rf(rt2x00dev, RF3020) || 936 rt2x00_rf(rt2x00dev, RF3022))
834 rt2x00_rf(rt2x00dev, RF3021) || 937 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
835 rt2x00_rf(rt2x00dev, RF3022)))
836 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
837 else 938 else
838 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); 939 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
839 940
840 /* 941 /*
841 * Change BBP settings 942 * Change BBP settings
@@ -863,7 +964,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
863 } 964 }
864 965
865 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg); 966 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
866 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf)); 967 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf));
867 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14); 968 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
868 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14); 969 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
869 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg); 970 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
@@ -896,11 +997,10 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
896 rt2800_bbp_write(rt2x00dev, 4, bbp); 997 rt2800_bbp_write(rt2x00dev, 4, bbp);
897 998
898 rt2800_bbp_read(rt2x00dev, 3, &bbp); 999 rt2800_bbp_read(rt2x00dev, 3, &bbp);
899 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 1000 rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf));
900 rt2800_bbp_write(rt2x00dev, 3, bbp); 1001 rt2800_bbp_write(rt2x00dev, 3, bbp);
901 1002
902 if (rt2x00_rt(rt2x00dev, RT2860) && 1003 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
903 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
904 if (conf_is_ht40(conf)) { 1004 if (conf_is_ht40(conf)) {
905 rt2800_bbp_write(rt2x00dev, 69, 0x1a); 1005 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
906 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1006 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -988,10 +1088,6 @@ static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
988 libconf->conf->short_frame_max_tx_count); 1088 libconf->conf->short_frame_max_tx_count);
989 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 1089 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
990 libconf->conf->long_frame_max_tx_count); 1090 libconf->conf->long_frame_max_tx_count);
991 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
992 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
993 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
994 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
995 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg); 1091 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
996} 1092}
997 1093
@@ -1015,13 +1111,13 @@ static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
1015 1111
1016 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 1112 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1017 } else { 1113 } else {
1018 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1019
1020 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg); 1114 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
1021 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0); 1115 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
1022 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0); 1116 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
1023 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0); 1117 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
1024 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); 1118 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
1119
1120 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1025 } 1121 }
1026} 1122}
1027 1123
@@ -1062,9 +1158,10 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
1062static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) 1158static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1063{ 1159{
1064 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 1160 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1065 if (rt2x00_is_usb(rt2x00dev) && 1161 if (rt2x00_rt(rt2x00dev, RT3070) ||
1066 rt2x00_rt(rt2x00dev, RT3070) && 1162 rt2x00_rt(rt2x00dev, RT3071) ||
1067 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) 1163 rt2x00_rt(rt2x00dev, RT3090) ||
1164 rt2x00_rt(rt2x00dev, RT3390))
1068 return 0x1c + (2 * rt2x00dev->lna_gain); 1165 return 0x1c + (2 * rt2x00dev->lna_gain);
1069 else 1166 else
1070 return 0x2e + rt2x00dev->lna_gain; 1167 return 0x2e + rt2x00dev->lna_gain;
@@ -1095,8 +1192,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1095void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 1192void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1096 const u32 count) 1193 const u32 count)
1097{ 1194{
1098 if (rt2x00_rt(rt2x00dev, RT2860) && 1195 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
1099 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
1100 return; 1196 return;
1101 1197
1102 /* 1198 /*
@@ -1114,8 +1210,17 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1114int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 1210int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1115{ 1211{
1116 u32 reg; 1212 u32 reg;
1213 u16 eeprom;
1117 unsigned int i; 1214 unsigned int i;
1118 1215
1216 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1217 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1218 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1219 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1220 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1221 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1222 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1223
1119 if (rt2x00_is_usb(rt2x00dev)) { 1224 if (rt2x00_is_usb(rt2x00dev)) {
1120 /* 1225 /*
1121 * Wait until BBP and RF are ready. 1226 * Wait until BBP and RF are ready.
@@ -1135,8 +1240,25 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1135 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 1240 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1136 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 1241 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1137 reg & ~0x00002000); 1242 reg & ~0x00002000);
1138 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 1243 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1244 /*
1245 * Reset DMA indexes
1246 */
1247 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
1248 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
1249 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
1250 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
1251 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
1252 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
1253 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
1254 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
1255 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
1256
1257 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
1258 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
1259
1139 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 1260 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1261 }
1140 1262
1141 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 1263 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1142 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 1264 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
@@ -1181,12 +1303,42 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1181 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0); 1303 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1182 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1304 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1183 1305
1184 if (rt2x00_is_usb(rt2x00dev) && 1306 rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
1185 rt2x00_rt(rt2x00dev, RT3070) && 1307
1186 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) { 1308 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
1309 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9);
1310 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
1311 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
1312
1313 if (rt2x00_rt(rt2x00dev, RT3071) ||
1314 rt2x00_rt(rt2x00dev, RT3090) ||
1315 rt2x00_rt(rt2x00dev, RT3390)) {
1187 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1316 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1188 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 1317 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1189 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1318 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1319 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
1320 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
1321 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1322 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
1323 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
1324 0x0000002c);
1325 else
1326 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
1327 0x0000000f);
1328 } else {
1329 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1330 }
1331 rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
1332 } else if (rt2x00_rt(rt2x00dev, RT3070)) {
1333 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1334
1335 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
1336 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1337 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
1338 } else {
1339 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1340 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1341 }
1190 } else { 1342 } else {
1191 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 1343 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1192 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 1344 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1205,19 +1357,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1205 1357
1206 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg); 1358 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1207 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9); 1359 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1360 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
1208 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10); 1361 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1209 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg); 1362 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1210 1363
1211 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg); 1364 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1212 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); 1365 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1213 if ((rt2x00_rt(rt2x00dev, RT2872) && 1366 if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
1214 (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
1215 rt2x00_rt(rt2x00dev, RT2880) ||
1216 rt2x00_rt(rt2x00dev, RT2883) || 1367 rt2x00_rt(rt2x00dev, RT2883) ||
1217 rt2x00_rt(rt2x00dev, RT2890) || 1368 rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
1218 rt2x00_rt(rt2x00dev, RT3052) ||
1219 (rt2x00_rt(rt2x00dev, RT3070) &&
1220 (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
1221 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2); 1369 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1222 else 1370 else
1223 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1); 1371 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1225,38 +1373,61 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1225 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0); 1373 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1226 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); 1374 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1227 1375
1376 rt2800_register_read(rt2x00dev, LED_CFG, &reg);
1377 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70);
1378 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30);
1379 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
1380 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
1381 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
1382 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
1383 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
1384 rt2800_register_write(rt2x00dev, LED_CFG, reg);
1385
1228 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); 1386 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1229 1387
1388 rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
1389 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
1390 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
1391 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
1392 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
1393 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
1394 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
1395 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
1396
1230 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 1397 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1231 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1); 1398 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1399 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
1232 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0); 1400 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1233 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0); 1401 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1402 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
1234 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0); 1403 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1235 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0); 1404 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1236 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); 1405 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1237 1406
1238 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg); 1407 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1239 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8); 1408 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
1240 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0); 1409 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1241 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1); 1410 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1242 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1411 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1243 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1412 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1244 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); 1413 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1245 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1414 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1246 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1415 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1247 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1416 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1417 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
1248 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); 1418 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1249 1419
1250 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 1420 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1251 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8); 1421 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
1252 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0); 1422 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1253 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1); 1423 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1254 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1424 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1255 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1425 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1256 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); 1426 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1257 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1427 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1258 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1428 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1259 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1429 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1430 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
1260 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); 1431 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1261 1432
1262 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg); 1433 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1269,11 +1440,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1269 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0); 1440 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1270 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1441 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1271 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0); 1442 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1443 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0);
1272 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); 1444 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1273 1445
1274 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg); 1446 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1275 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); 1447 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1276 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0); 1448 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL,
1449 !rt2x00_is_usb(rt2x00dev));
1277 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1); 1450 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1278 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1451 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1279 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1452 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -1281,6 +1454,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1281 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1454 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1282 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1455 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1283 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1456 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1457 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0);
1284 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); 1458 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1285 1459
1286 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg); 1460 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
@@ -1293,6 +1467,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1293 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0); 1467 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1294 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1468 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1295 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0); 1469 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1470 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0);
1296 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); 1471 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1297 1472
1298 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg); 1473 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
@@ -1305,6 +1480,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1305 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1480 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1306 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1481 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1307 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1482 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1483 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0);
1308 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); 1484 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1309 1485
1310 if (rt2x00_is_usb(rt2x00dev)) { 1486 if (rt2x00_is_usb(rt2x00dev)) {
@@ -1334,6 +1510,22 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1334 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); 1510 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
1335 1511
1336 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); 1512 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1513
1514 /*
1515 * Usually the CCK SIFS time should be set to 10 and the OFDM SIFS
1516 * time should be set to 16. However, the original Ralink driver uses
1517 * 16 for both and indeed using a value of 10 for CCK SIFS results in
1518 * connection problems with 11g + CTS protection. Hence, use the same
1519 * defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS.
1520 */
1521 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
1522 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16);
1523 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16);
1524 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
1525 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314);
1526 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
1527 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
1528
1337 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 1529 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1338 1530
1339 /* 1531 /*
@@ -1481,45 +1673,79 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1481 rt2800_wait_bbp_ready(rt2x00dev))) 1673 rt2800_wait_bbp_ready(rt2x00dev)))
1482 return -EACCES; 1674 return -EACCES;
1483 1675
1676 if (rt2800_is_305x_soc(rt2x00dev))
1677 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1678
1484 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 1679 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
1485 rt2800_bbp_write(rt2x00dev, 66, 0x38); 1680 rt2800_bbp_write(rt2x00dev, 66, 0x38);
1486 rt2800_bbp_write(rt2x00dev, 69, 0x12); 1681
1682 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
1683 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1684 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1685 } else {
1686 rt2800_bbp_write(rt2x00dev, 69, 0x12);
1687 rt2800_bbp_write(rt2x00dev, 73, 0x10);
1688 }
1689
1487 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1690 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1488 rt2800_bbp_write(rt2x00dev, 73, 0x10); 1691
1489 rt2800_bbp_write(rt2x00dev, 81, 0x37); 1692 if (rt2x00_rt(rt2x00dev, RT3070) ||
1693 rt2x00_rt(rt2x00dev, RT3071) ||
1694 rt2x00_rt(rt2x00dev, RT3090) ||
1695 rt2x00_rt(rt2x00dev, RT3390)) {
1696 rt2800_bbp_write(rt2x00dev, 79, 0x13);
1697 rt2800_bbp_write(rt2x00dev, 80, 0x05);
1698 rt2800_bbp_write(rt2x00dev, 81, 0x33);
1699 } else if (rt2800_is_305x_soc(rt2x00dev)) {
1700 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1701 rt2800_bbp_write(rt2x00dev, 80, 0x08);
1702 } else {
1703 rt2800_bbp_write(rt2x00dev, 81, 0x37);
1704 }
1705
1490 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1706 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1491 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 1707 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
1492 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1708
1709 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
1710 rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
1711 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1712 else
1713 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1714
1493 rt2800_bbp_write(rt2x00dev, 86, 0x00); 1715 rt2800_bbp_write(rt2x00dev, 86, 0x00);
1494 rt2800_bbp_write(rt2x00dev, 91, 0x04); 1716 rt2800_bbp_write(rt2x00dev, 91, 0x04);
1495 rt2800_bbp_write(rt2x00dev, 92, 0x00); 1717 rt2800_bbp_write(rt2x00dev, 92, 0x00);
1496 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1497 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1498 1718
1499 if (rt2x00_rt(rt2x00dev, RT2860) && 1719 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
1500 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) { 1720 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
1501 rt2800_bbp_write(rt2x00dev, 69, 0x16); 1721 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
1502 rt2800_bbp_write(rt2x00dev, 73, 0x12); 1722 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E) ||
1503 } 1723 rt2800_is_305x_soc(rt2x00dev))
1504 1724 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
1505 if (rt2x00_rt(rt2x00dev, RT2860) && 1725 else
1506 (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)) 1726 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1507 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1508 1727
1509 if (rt2x00_is_usb(rt2x00dev) && 1728 if (rt2800_is_305x_soc(rt2x00dev))
1510 rt2x00_rt(rt2x00dev, RT3070) && 1729 rt2800_bbp_write(rt2x00dev, 105, 0x01);
1511 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) { 1730 else
1512 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1513 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1514 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1731 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1515 } 1732 rt2800_bbp_write(rt2x00dev, 106, 0x35);
1516 1733
1517 if (rt2x00_rt(rt2x00dev, RT3052)) { 1734 if (rt2x00_rt(rt2x00dev, RT3071) ||
1518 rt2800_bbp_write(rt2x00dev, 31, 0x08); 1735 rt2x00_rt(rt2x00dev, RT3090) ||
1519 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 1736 rt2x00_rt(rt2x00dev, RT3390)) {
1520 rt2800_bbp_write(rt2x00dev, 80, 0x08); 1737 rt2800_bbp_read(rt2x00dev, 138, &value);
1738
1739 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1740 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
1741 value |= 0x20;
1742 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
1743 value &= ~0x02;
1744
1745 rt2800_bbp_write(rt2x00dev, 138, value);
1521 } 1746 }
1522 1747
1748
1523 for (i = 0; i < EEPROM_BBP_SIZE; i++) { 1749 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1524 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom); 1750 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1525 1751
@@ -1598,19 +1824,16 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1598{ 1824{
1599 u8 rfcsr; 1825 u8 rfcsr;
1600 u8 bbp; 1826 u8 bbp;
1827 u32 reg;
1828 u16 eeprom;
1601 1829
1602 if (rt2x00_is_usb(rt2x00dev) && 1830 if (!rt2x00_rt(rt2x00dev, RT3070) &&
1603 rt2x00_rt(rt2x00dev, RT3070) && 1831 !rt2x00_rt(rt2x00dev, RT3071) &&
1604 (rt2x00_rev(rt2x00dev) != RT3070_VERSION)) 1832 !rt2x00_rt(rt2x00dev, RT3090) &&
1833 !rt2x00_rt(rt2x00dev, RT3390) &&
1834 !rt2800_is_305x_soc(rt2x00dev))
1605 return 0; 1835 return 0;
1606 1836
1607 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1608 if (!rt2x00_rf(rt2x00dev, RF3020) &&
1609 !rt2x00_rf(rt2x00dev, RF3021) &&
1610 !rt2x00_rf(rt2x00dev, RF3022))
1611 return 0;
1612 }
1613
1614 /* 1837 /*
1615 * Init RF calibration. 1838 * Init RF calibration.
1616 */ 1839 */
@@ -1621,13 +1844,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1621 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 1844 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1622 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 1845 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1623 1846
1624 if (rt2x00_is_usb(rt2x00dev)) { 1847 if (rt2x00_rt(rt2x00dev, RT3070) ||
1848 rt2x00_rt(rt2x00dev, RT3071) ||
1849 rt2x00_rt(rt2x00dev, RT3090)) {
1625 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 1850 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1626 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 1851 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1627 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 1852 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1628 rt2800_rfcsr_write(rt2x00dev, 7, 0x70); 1853 rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
1629 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); 1854 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1630 rt2800_rfcsr_write(rt2x00dev, 10, 0x71); 1855 rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
1631 rt2800_rfcsr_write(rt2x00dev, 11, 0x21); 1856 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1632 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); 1857 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
1633 rt2800_rfcsr_write(rt2x00dev, 14, 0x90); 1858 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
@@ -1640,9 +1865,41 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1640 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); 1865 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1641 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 1866 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
1642 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 1867 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1643 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1644 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 1868 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
1645 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) { 1869 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
1870 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
1871 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
1872 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
1873 rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
1874 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1875 rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
1876 rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
1877 rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
1878 rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
1879 rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
1880 rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
1881 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1882 rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
1883 rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
1884 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1885 rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
1886 rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
1887 rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
1888 rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
1889 rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
1890 rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
1891 rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
1892 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
1893 rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
1894 rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
1895 rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
1896 rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
1897 rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
1898 rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
1899 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
1900 rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
1901 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
1902 } else if (rt2800_is_305x_soc(rt2x00dev)) {
1646 rt2800_rfcsr_write(rt2x00dev, 0, 0x50); 1903 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
1647 rt2800_rfcsr_write(rt2x00dev, 1, 0x01); 1904 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
1648 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); 1905 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1673,15 +1930,57 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1673 rt2800_rfcsr_write(rt2x00dev, 27, 0x23); 1930 rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
1674 rt2800_rfcsr_write(rt2x00dev, 28, 0x13); 1931 rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
1675 rt2800_rfcsr_write(rt2x00dev, 29, 0x83); 1932 rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
1933 rt2800_rfcsr_write(rt2x00dev, 30, 0x00);
1934 rt2800_rfcsr_write(rt2x00dev, 31, 0x00);
1935 return 0;
1936 }
1937
1938 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
1939 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
1940 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
1941 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
1942 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
1943 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
1944 rt2x00_rt(rt2x00dev, RT3090)) {
1945 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
1946 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
1947 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
1948
1949 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
1950
1951 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
1952 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
1953 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1954 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
1955 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1956 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
1957 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
1958 else
1959 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
1960 }
1961 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
1962 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
1963 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
1964 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
1965 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
1676 } 1966 }
1677 1967
1678 /* 1968 /*
1679 * Set RX Filter calibration for 20MHz and 40MHz 1969 * Set RX Filter calibration for 20MHz and 40MHz
1680 */ 1970 */
1681 rt2x00dev->calibration[0] = 1971 if (rt2x00_rt(rt2x00dev, RT3070)) {
1682 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16); 1972 rt2x00dev->calibration[0] =
1683 rt2x00dev->calibration[1] = 1973 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1684 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19); 1974 rt2x00dev->calibration[1] =
1975 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1976 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
1977 rt2x00_rt(rt2x00dev, RT3090) ||
1978 rt2x00_rt(rt2x00dev, RT3390)) {
1979 rt2x00dev->calibration[0] =
1980 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
1981 rt2x00dev->calibration[1] =
1982 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
1983 }
1685 1984
1686 /* 1985 /*
1687 * Set back to initial state 1986 * Set back to initial state
@@ -1699,6 +1998,81 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1699 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 1998 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1700 rt2800_bbp_write(rt2x00dev, 4, bbp); 1999 rt2800_bbp_write(rt2x00dev, 4, bbp);
1701 2000
2001 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
2002 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2003 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
2004 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
2005 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
2006
2007 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
2008 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
2009 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
2010
2011 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
2012 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
2013 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2014 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
2015 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
2016 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
2017 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
2018 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
2019 }
2020 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
2021 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
2022 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
2023 rt2x00_get_field16(eeprom,
2024 EEPROM_TXMIXER_GAIN_BG_VAL));
2025 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
2026
2027 if (rt2x00_rt(rt2x00dev, RT3090)) {
2028 rt2800_bbp_read(rt2x00dev, 138, &bbp);
2029
2030 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
2031 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
2032 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
2033 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
2034 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
2035
2036 rt2800_bbp_write(rt2x00dev, 138, bbp);
2037 }
2038
2039 if (rt2x00_rt(rt2x00dev, RT3071) ||
2040 rt2x00_rt(rt2x00dev, RT3090) ||
2041 rt2x00_rt(rt2x00dev, RT3390)) {
2042 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
2043 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
2044 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
2045 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
2046 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
2047 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
2048 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
2049
2050 rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
2051 rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
2052 rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
2053
2054 rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
2055 rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
2056 rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
2057
2058 rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
2059 rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
2060 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
2061 }
2062
2063 if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
2064 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
2065 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
2066 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
2067 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
2068 else
2069 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
2070 rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
2071 rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
2072 rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
2073 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
2074 }
2075
1702 return 0; 2076 return 0;
1703} 2077}
1704EXPORT_SYMBOL_GPL(rt2800_init_rfcsr); 2078EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
@@ -1774,10 +2148,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1774 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 2148 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1775 } else if (rt2x00_rt(rt2x00dev, RT2860) || 2149 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
1776 rt2x00_rt(rt2x00dev, RT2870) || 2150 rt2x00_rt(rt2x00dev, RT2870) ||
1777 rt2x00_rt(rt2x00dev, RT2872) || 2151 rt2x00_rt(rt2x00dev, RT2872)) {
1778 rt2x00_rt(rt2x00dev, RT2880) ||
1779 (rt2x00_rt(rt2x00dev, RT2883) &&
1780 (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
1781 /* 2152 /*
1782 * There is a max of 2 RX streams for RT28x0 series 2153 * There is a max of 2 RX streams for RT28x0 series
1783 */ 2154 */
@@ -1882,10 +2253,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1882 if (!rt2x00_rt(rt2x00dev, RT2860) && 2253 if (!rt2x00_rt(rt2x00dev, RT2860) &&
1883 !rt2x00_rt(rt2x00dev, RT2870) && 2254 !rt2x00_rt(rt2x00dev, RT2870) &&
1884 !rt2x00_rt(rt2x00dev, RT2872) && 2255 !rt2x00_rt(rt2x00dev, RT2872) &&
1885 !rt2x00_rt(rt2x00dev, RT2880) &&
1886 !rt2x00_rt(rt2x00dev, RT2883) && 2256 !rt2x00_rt(rt2x00dev, RT2883) &&
1887 !rt2x00_rt(rt2x00dev, RT2890) &&
1888 !rt2x00_rt(rt2x00dev, RT3052) &&
1889 !rt2x00_rt(rt2x00dev, RT3070) && 2257 !rt2x00_rt(rt2x00dev, RT3070) &&
1890 !rt2x00_rt(rt2x00dev, RT3071) && 2258 !rt2x00_rt(rt2x00dev, RT3071) &&
1891 !rt2x00_rt(rt2x00dev, RT3090) && 2259 !rt2x00_rt(rt2x00dev, RT3090) &&
@@ -1954,7 +2322,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1954EXPORT_SYMBOL_GPL(rt2800_init_eeprom); 2322EXPORT_SYMBOL_GPL(rt2800_init_eeprom);
1955 2323
1956/* 2324/*
1957 * RF value list for rt28x0 2325 * RF value list for rt28xx
1958 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750) 2326 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
1959 */ 2327 */
1960static const struct rf_channel rf_vals[] = { 2328static const struct rf_channel rf_vals[] = {
@@ -2029,10 +2397,10 @@ static const struct rf_channel rf_vals[] = {
2029}; 2397};
2030 2398
2031/* 2399/*
2032 * RF value list for rt3070 2400 * RF value list for rt3xxx
2033 * Supports: 2.4 GHz 2401 * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052)
2034 */ 2402 */
2035static const struct rf_channel rf_vals_302x[] = { 2403static const struct rf_channel rf_vals_3x[] = {
2036 {1, 241, 2, 2 }, 2404 {1, 241, 2, 2 },
2037 {2, 241, 2, 7 }, 2405 {2, 241, 2, 7 },
2038 {3, 242, 2, 2 }, 2406 {3, 242, 2, 2 },
@@ -2047,6 +2415,51 @@ static const struct rf_channel rf_vals_302x[] = {
2047 {12, 246, 2, 7 }, 2415 {12, 246, 2, 7 },
2048 {13, 247, 2, 2 }, 2416 {13, 247, 2, 2 },
2049 {14, 248, 2, 4 }, 2417 {14, 248, 2, 4 },
2418
2419 /* 802.11 UNI / HyperLan 2 */
2420 {36, 0x56, 0, 4},
2421 {38, 0x56, 0, 6},
2422 {40, 0x56, 0, 8},
2423 {44, 0x57, 0, 0},
2424 {46, 0x57, 0, 2},
2425 {48, 0x57, 0, 4},
2426 {52, 0x57, 0, 8},
2427 {54, 0x57, 0, 10},
2428 {56, 0x58, 0, 0},
2429 {60, 0x58, 0, 4},
2430 {62, 0x58, 0, 6},
2431 {64, 0x58, 0, 8},
2432
2433 /* 802.11 HyperLan 2 */
2434 {100, 0x5b, 0, 8},
2435 {102, 0x5b, 0, 10},
2436 {104, 0x5c, 0, 0},
2437 {108, 0x5c, 0, 4},
2438 {110, 0x5c, 0, 6},
2439 {112, 0x5c, 0, 8},
2440 {116, 0x5d, 0, 0},
2441 {118, 0x5d, 0, 2},
2442 {120, 0x5d, 0, 4},
2443 {124, 0x5d, 0, 8},
2444 {126, 0x5d, 0, 10},
2445 {128, 0x5e, 0, 0},
2446 {132, 0x5e, 0, 4},
2447 {134, 0x5e, 0, 6},
2448 {136, 0x5e, 0, 8},
2449 {140, 0x5f, 0, 0},
2450
2451 /* 802.11 UNII */
2452 {149, 0x5f, 0, 9},
2453 {151, 0x5f, 0, 11},
2454 {153, 0x60, 0, 1},
2455 {157, 0x60, 0, 5},
2456 {159, 0x60, 0, 7},
2457 {161, 0x60, 0, 9},
2458 {165, 0x61, 0, 1},
2459 {167, 0x61, 0, 3},
2460 {169, 0x61, 0, 5},
2461 {171, 0x61, 0, 7},
2462 {173, 0x61, 0, 9},
2050}; 2463};
2051 2464
2052int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) 2465int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
@@ -2087,11 +2500,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2087 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; 2500 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
2088 2501
2089 if (rt2x00_rf(rt2x00dev, RF2820) || 2502 if (rt2x00_rf(rt2x00dev, RF2820) ||
2090 rt2x00_rf(rt2x00dev, RF2720) || 2503 rt2x00_rf(rt2x00dev, RF2720)) {
2091 rt2x00_rf(rt2x00dev, RF3052)) {
2092 spec->num_channels = 14; 2504 spec->num_channels = 14;
2093 spec->channels = rf_vals; 2505 spec->channels = rf_vals;
2094 } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) { 2506 } else if (rt2x00_rf(rt2x00dev, RF2850) ||
2507 rt2x00_rf(rt2x00dev, RF2750)) {
2095 spec->supported_bands |= SUPPORT_BAND_5GHZ; 2508 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2096 spec->num_channels = ARRAY_SIZE(rf_vals); 2509 spec->num_channels = ARRAY_SIZE(rf_vals);
2097 spec->channels = rf_vals; 2510 spec->channels = rf_vals;
@@ -2099,8 +2512,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2099 rt2x00_rf(rt2x00dev, RF2020) || 2512 rt2x00_rf(rt2x00dev, RF2020) ||
2100 rt2x00_rf(rt2x00dev, RF3021) || 2513 rt2x00_rf(rt2x00dev, RF3021) ||
2101 rt2x00_rf(rt2x00dev, RF3022)) { 2514 rt2x00_rf(rt2x00dev, RF3022)) {
2102 spec->num_channels = ARRAY_SIZE(rf_vals_302x); 2515 spec->num_channels = 14;
2103 spec->channels = rf_vals_302x; 2516 spec->channels = rf_vals_3x;
2517 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
2518 spec->supported_bands |= SUPPORT_BAND_5GHZ;
2519 spec->num_channels = ARRAY_SIZE(rf_vals_3x);
2520 spec->channels = rf_vals_3x;
2104 } 2521 }
2105 2522
2106 /* 2523 /*
@@ -2111,8 +2528,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2111 else 2528 else
2112 spec->ht.ht_supported = false; 2529 spec->ht.ht_supported = false;
2113 2530
2531 /*
2532 * Don't set IEEE80211_HT_CAP_SUP_WIDTH_20_40 for now as it causes
2533 * reception problems with HT40 capable 11n APs
2534 */
2114 spec->ht.cap = 2535 spec->ht.cap =
2115 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
2116 IEEE80211_HT_CAP_GRN_FLD | 2536 IEEE80211_HT_CAP_GRN_FLD |
2117 IEEE80211_HT_CAP_SGI_20 | 2537 IEEE80211_HT_CAP_SGI_20 |
2118 IEEE80211_HT_CAP_SGI_40 | 2538 IEEE80211_HT_CAP_SGI_40 |
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index ebabeae62d1b..94de999e2290 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -111,6 +111,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
111 const u8 command, const u8 token, 111 const u8 command, const u8 token,
112 const u8 arg0, const u8 arg1); 112 const u8 arg0, const u8 arg1);
113 113
114void rt2800_write_txwi(struct sk_buff *skb, struct txentry_desc *txdesc);
115void rt2800_process_rxwi(struct sk_buff *skb, struct rxdone_entry_desc *txdesc);
116
114extern const struct rt2x00debug rt2800_rt2x00debug; 117extern const struct rt2x00debug rt2800_rt2x00debug;
115 118
116int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev); 119int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 91cce2d0f6db..b2f23272c3aa 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -60,6 +60,12 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
60 unsigned int i; 60 unsigned int i;
61 u32 reg; 61 u32 reg;
62 62
63 /*
64 * SOC devices don't support MCU requests.
65 */
66 if (rt2x00_is_soc(rt2x00dev))
67 return;
68
63 for (i = 0; i < 200; i++) { 69 for (i = 0; i < 200; i++) {
64 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg); 70 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
65 71
@@ -341,19 +347,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
341 struct queue_entry_priv_pci *entry_priv; 347 struct queue_entry_priv_pci *entry_priv;
342 u32 reg; 348 u32 reg;
343 349
344 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
345 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
346 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
347 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
348 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
349 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
350 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
351 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
352 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
353
354 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
355 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
356
357 /* 350 /*
358 * Initialize registers. 351 * Initialize registers.
359 */ 352 */
@@ -620,64 +613,31 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
620/* 613/*
621 * TX descriptor initialization 614 * TX descriptor initialization
622 */ 615 */
616static int rt2800pci_write_tx_data(struct queue_entry* entry,
617 struct txentry_desc *txdesc)
618{
619 int ret;
620
621 ret = rt2x00pci_write_tx_data(entry, txdesc);
622 if (ret)
623 return ret;
624
625 rt2800_write_txwi(entry->skb, txdesc);
626
627 return 0;
628}
629
630
623static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev, 631static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
624 struct sk_buff *skb, 632 struct sk_buff *skb,
625 struct txentry_desc *txdesc) 633 struct txentry_desc *txdesc)
626{ 634{
627 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 635 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
628 __le32 *txd = skbdesc->desc; 636 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
629 __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->ops->extra_tx_headroom); 637 __le32 *txd = entry_priv->desc;
630 u32 word; 638 u32 word;
631 639
632 /* 640 /*
633 * Initialize TX Info descriptor
634 */
635 rt2x00_desc_read(txwi, 0, &word);
636 rt2x00_set_field32(&word, TXWI_W0_FRAG,
637 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
638 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
639 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
640 rt2x00_set_field32(&word, TXWI_W0_TS,
641 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
642 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
643 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
644 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
645 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
646 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
647 rt2x00_set_field32(&word, TXWI_W0_BW,
648 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
649 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
650 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
651 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
652 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
653 rt2x00_desc_write(txwi, 0, word);
654
655 rt2x00_desc_read(txwi, 1, &word);
656 rt2x00_set_field32(&word, TXWI_W1_ACK,
657 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
658 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
659 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
660 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
661 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
662 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
663 txdesc->key_idx : 0xff);
664 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
665 skb->len - txdesc->l2pad);
666 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
667 skbdesc->entry->queue->qid + 1);
668 rt2x00_desc_write(txwi, 1, word);
669
670 /*
671 * Always write 0 to IV/EIV fields, hardware will insert the IV
672 * from the IVEIV register when TXD_W3_WIV is set to 0.
673 * When TXD_W3_WIV is set to 1 it will use the IV data
674 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
675 * crypto entry in the registers should be used to encrypt the frame.
676 */
677 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
678 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
679
680 /*
681 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 641 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
682 * must contains a TXWI structure + 802.11 header + padding + 802.11 642 * must contains a TXWI structure + 802.11 header + padding + 802.11
683 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and 643 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
@@ -698,15 +658,14 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
698 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); 658 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
699 rt2x00_set_field32(&word, TXD_W1_BURST, 659 rt2x00_set_field32(&word, TXD_W1_BURST,
700 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 660 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
701 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, 661 rt2x00_set_field32(&word, TXD_W1_SD_LEN0, TXWI_DESC_SIZE);
702 rt2x00dev->ops->extra_tx_headroom);
703 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); 662 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
704 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); 663 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
705 rt2x00_desc_write(txd, 1, word); 664 rt2x00_desc_write(txd, 1, word);
706 665
707 rt2x00_desc_read(txd, 2, &word); 666 rt2x00_desc_read(txd, 2, &word);
708 rt2x00_set_field32(&word, TXD_W2_SD_PTR1, 667 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
709 skbdesc->skb_dma + rt2x00dev->ops->extra_tx_headroom); 668 skbdesc->skb_dma + TXWI_DESC_SIZE);
710 rt2x00_desc_write(txd, 2, word); 669 rt2x00_desc_write(txd, 2, word);
711 670
712 rt2x00_desc_read(txd, 3, &word); 671 rt2x00_desc_read(txd, 3, &word);
@@ -714,15 +673,21 @@ static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
714 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 673 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
715 rt2x00_set_field32(&word, TXD_W3_QSEL, 2); 674 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
716 rt2x00_desc_write(txd, 3, word); 675 rt2x00_desc_write(txd, 3, word);
676
677 /*
678 * Register descriptor details in skb frame descriptor.
679 */
680 skbdesc->desc = txd;
681 skbdesc->desc_len = TXD_DESC_SIZE;
717} 682}
718 683
719/* 684/*
720 * TX data initialization 685 * TX data initialization
721 */ 686 */
722static void rt2800pci_write_beacon(struct queue_entry *entry) 687static void rt2800pci_write_beacon(struct queue_entry *entry,
688 struct txentry_desc *txdesc)
723{ 689{
724 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 690 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
725 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
726 unsigned int beacon_base; 691 unsigned int beacon_base;
727 u32 reg; 692 u32 reg;
728 693
@@ -735,15 +700,25 @@ static void rt2800pci_write_beacon(struct queue_entry *entry)
735 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 700 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
736 701
737 /* 702 /*
738 * Write entire beacon with descriptor to register. 703 * Add the TXWI for the beacon to the skb.
704 */
705 rt2800_write_txwi(entry->skb, txdesc);
706 skb_push(entry->skb, TXWI_DESC_SIZE);
707
708 /*
709 * Write entire beacon with TXWI to register.
739 */ 710 */
740 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 711 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
741 rt2800_register_multiwrite(rt2x00dev, 712 rt2800_register_multiwrite(rt2x00dev, beacon_base,
742 beacon_base, 713 entry->skb->data, entry->skb->len);
743 skbdesc->desc, skbdesc->desc_len); 714
744 rt2800_register_multiwrite(rt2x00dev, 715 /*
745 beacon_base + skbdesc->desc_len, 716 * Enable beaconing again.
746 entry->skb->data, entry->skb->len); 717 */
718 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
719 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
720 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
721 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
747 722
748 /* 723 /*
749 * Clean up beacon skb. 724 * Clean up beacon skb.
@@ -757,18 +732,6 @@ static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
757{ 732{
758 struct data_queue *queue; 733 struct data_queue *queue;
759 unsigned int idx, qidx = 0; 734 unsigned int idx, qidx = 0;
760 u32 reg;
761
762 if (queue_idx == QID_BEACON) {
763 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
764 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
765 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
766 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
767 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
768 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
769 }
770 return;
771 }
772 735
773 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT) 736 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
774 return; 737 return;
@@ -811,34 +774,21 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
811 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 774 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
812 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 775 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
813 __le32 *rxd = entry_priv->desc; 776 __le32 *rxd = entry_priv->desc;
814 __le32 *rxwi = (__le32 *)entry->skb->data; 777 u32 word;
815 u32 rxd3; 778
816 u32 rxwi0; 779 rt2x00_desc_read(rxd, 3, &word);
817 u32 rxwi1; 780
818 u32 rxwi2; 781 if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR))
819 u32 rxwi3;
820
821 rt2x00_desc_read(rxd, 3, &rxd3);
822 rt2x00_desc_read(rxwi, 0, &rxwi0);
823 rt2x00_desc_read(rxwi, 1, &rxwi1);
824 rt2x00_desc_read(rxwi, 2, &rxwi2);
825 rt2x00_desc_read(rxwi, 3, &rxwi3);
826
827 if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
828 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 782 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
829 783
830 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 784 /*
831 /* 785 * Unfortunately we don't know the cipher type used during
832 * Unfortunately we don't know the cipher type used during 786 * decryption. This prevents us from correct providing
833 * decryption. This prevents us from correct providing 787 * correct statistics through debugfs.
834 * correct statistics through debugfs. 788 */
835 */ 789 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR);
836 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
837 rxdesc->cipher_status =
838 rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
839 }
840 790
841 if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) { 791 if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) {
842 /* 792 /*
843 * Hardware has stripped IV/EIV data from 802.11 frame during 793 * Hardware has stripped IV/EIV data from 802.11 frame during
844 * decryption. Unfortunately the descriptor doesn't contain 794 * decryption. Unfortunately the descriptor doesn't contain
@@ -853,51 +803,22 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
853 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 803 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
854 } 804 }
855 805
856 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS)) 806 if (rt2x00_get_field32(word, RXD_W3_MY_BSS))
857 rxdesc->dev_flags |= RXDONE_MY_BSS; 807 rxdesc->dev_flags |= RXDONE_MY_BSS;
858 808
859 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) 809 if (rt2x00_get_field32(word, RXD_W3_L2PAD))
860 rxdesc->dev_flags |= RXDONE_L2PAD; 810 rxdesc->dev_flags |= RXDONE_L2PAD;
861 811
862 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
863 rxdesc->flags |= RX_FLAG_SHORT_GI;
864
865 if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
866 rxdesc->flags |= RX_FLAG_40MHZ;
867
868 /* 812 /*
869 * Detect RX rate, always use MCS as signal type. 813 * Process the RXWI structure that is at the start of the buffer.
870 */ 814 */
871 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS; 815 rt2800_process_rxwi(entry->skb, rxdesc);
872 rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
873 rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
874
875 /*
876 * Mask of 0x8 bit to remove the short preamble flag.
877 */
878 if (rxdesc->rate_mode == RATE_MODE_CCK)
879 rxdesc->signal &= ~0x8;
880
881 rxdesc->rssi =
882 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
883 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
884
885 rxdesc->noise =
886 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
887 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
888
889 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
890 816
891 /* 817 /*
892 * Set RX IDX in register to inform hardware that we have handled 818 * Set RX IDX in register to inform hardware that we have handled
893 * this entry and it is available for reuse again. 819 * this entry and it is available for reuse again.
894 */ 820 */
895 rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx); 821 rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
896
897 /*
898 * Remove TXWI descriptor from start of buffer.
899 */
900 skb_pull(entry->skb, RXWI_DESC_SIZE);
901} 822}
902 823
903/* 824/*
@@ -907,14 +828,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
907{ 828{
908 struct data_queue *queue; 829 struct data_queue *queue;
909 struct queue_entry *entry; 830 struct queue_entry *entry;
910 struct queue_entry *entry_done; 831 __le32 *txwi;
911 struct queue_entry_priv_pci *entry_priv;
912 struct txdone_entry_desc txdesc; 832 struct txdone_entry_desc txdesc;
913 u32 word; 833 u32 word;
914 u32 reg; 834 u32 reg;
915 u32 old_reg; 835 u32 old_reg;
916 unsigned int type; 836 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
917 unsigned int index;
918 u16 mcs, real_mcs; 837 u16 mcs, real_mcs;
919 838
920 /* 839 /*
@@ -936,76 +855,89 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
936 break; 855 break;
937 old_reg = reg; 856 old_reg = reg;
938 857
858 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
859 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
860 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
861
939 /* 862 /*
940 * Skip this entry when it contains an invalid 863 * Skip this entry when it contains an invalid
941 * queue identication number. 864 * queue identication number.
942 */ 865 */
943 type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1; 866 if (pid <= 0 || pid > QID_RX)
944 if (type >= QID_RX)
945 continue; 867 continue;
946 868
947 queue = rt2x00queue_get_queue(rt2x00dev, type); 869 queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
948 if (unlikely(!queue)) 870 if (unlikely(!queue))
949 continue; 871 continue;
950 872
951 /* 873 /*
952 * Skip this entry when it contains an invalid 874 * Inside each queue, we process each entry in a chronological
953 * index number. 875 * order. We first check that the queue is not empty.
954 */ 876 */
955 index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1; 877 if (rt2x00queue_empty(queue))
956 if (unlikely(index >= queue->limit))
957 continue; 878 continue;
879 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
958 880
959 entry = &queue->entries[index]; 881 /* Check if we got a match by looking at WCID/ACK/PID
960 entry_priv = entry->priv_data; 882 * fields */
961 rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word); 883 txwi = (__le32 *)(entry->skb->data -
884 rt2x00dev->ops->extra_tx_headroom);
962 885
963 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 886 rt2x00_desc_read(txwi, 1, &word);
964 while (entry != entry_done) { 887 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
965 /* 888 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
966 * Catch up. 889 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
967 * Just report any entries we missed as failed.
968 */
969 WARNING(rt2x00dev,
970 "TX status report missed for entry %d\n",
971 entry_done->entry_idx);
972 890
973 txdesc.flags = 0; 891 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
974 __set_bit(TXDONE_UNKNOWN, &txdesc.flags); 892 WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
975 txdesc.retry = 0;
976
977 rt2x00lib_txdone(entry_done, &txdesc);
978 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
979 }
980 893
981 /* 894 /*
982 * Obtain the status about this packet. 895 * Obtain the status about this packet.
983 */ 896 */
984 txdesc.flags = 0; 897 txdesc.flags = 0;
985 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) 898 rt2x00_desc_read(txwi, 0, &word);
986 __set_bit(TXDONE_SUCCESS, &txdesc.flags); 899 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
987 else 900 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
988 __set_bit(TXDONE_FAILURE, &txdesc.flags);
989 901
990 /* 902 /*
991 * Ralink has a retry mechanism using a global fallback 903 * Ralink has a retry mechanism using a global fallback
992 * table. We setup this fallback table to try immediate 904 * table. We setup this fallback table to try the immediate
993 * lower rate for all rates. In the TX_STA_FIFO, 905 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
994 * the MCS field contains the MCS used for the successfull 906 * always contains the MCS used for the last transmission, be
995 * transmission. If the first transmission succeed, 907 * it successful or not.
996 * we have mcs == tx_mcs. On the second transmission,
997 * we have mcs = tx_mcs - 1. So the number of
998 * retry is (tx_mcs - mcs).
999 */ 908 */
1000 mcs = rt2x00_get_field32(word, TXWI_W0_MCS); 909 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
1001 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS); 910 /*
911 * Transmission succeeded. The number of retries is
912 * mcs - real_mcs
913 */
914 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
915 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
916 } else {
917 /*
918 * Transmission failed. The number of retries is
919 * always 7 in this case (for a total number of 8
920 * frames sent).
921 */
922 __set_bit(TXDONE_FAILURE, &txdesc.flags);
923 txdesc.retry = 7;
924 }
925
1002 __set_bit(TXDONE_FALLBACK, &txdesc.flags); 926 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
1003 txdesc.retry = mcs - min(mcs, real_mcs); 927
1004 928
1005 rt2x00lib_txdone(entry, &txdesc); 929 rt2x00lib_txdone(entry, &txdesc);
1006 } 930 }
1007} 931}
1008 932
933static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
934{
935 struct ieee80211_conf conf = { .flags = 0 };
936 struct rt2x00lib_conf libconf = { .conf = &conf };
937
938 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
939}
940
1009static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 941static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1010{ 942{
1011 struct rt2x00_dev *rt2x00dev = dev_instance; 943 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -1030,6 +962,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1030 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 962 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
1031 rt2800pci_txdone(rt2x00dev); 963 rt2800pci_txdone(rt2x00dev);
1032 964
965 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
966 rt2800pci_wakeup(rt2x00dev);
967
1033 return IRQ_HANDLED; 968 return IRQ_HANDLED;
1034} 969}
1035 970
@@ -1128,7 +1063,7 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1128 .reset_tuner = rt2800_reset_tuner, 1063 .reset_tuner = rt2800_reset_tuner,
1129 .link_tuner = rt2800_link_tuner, 1064 .link_tuner = rt2800_link_tuner,
1130 .write_tx_desc = rt2800pci_write_tx_desc, 1065 .write_tx_desc = rt2800pci_write_tx_desc,
1131 .write_tx_data = rt2x00pci_write_tx_data, 1066 .write_tx_data = rt2800pci_write_tx_data,
1132 .write_beacon = rt2800pci_write_beacon, 1067 .write_beacon = rt2800pci_write_beacon,
1133 .kick_tx_queue = rt2800pci_kick_tx_queue, 1068 .kick_tx_queue = rt2800pci_kick_tx_queue,
1134 .kill_tx_queue = rt2800pci_kill_tx_queue, 1069 .kill_tx_queue = rt2800pci_kill_tx_queue,
@@ -1184,6 +1119,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1184/* 1119/*
1185 * RT2800pci module information. 1120 * RT2800pci module information.
1186 */ 1121 */
1122#ifdef CONFIG_RT2800PCI_PCI
1187static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 1123static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1188 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1124 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1189 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1125 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1208,9 +1144,11 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1208 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1144 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1209 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1145 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
1210 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1146 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1147 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
1211#endif 1148#endif
1212 { 0, } 1149 { 0, }
1213}; 1150};
1151#endif /* CONFIG_RT2800PCI_PCI */
1214 1152
1215MODULE_AUTHOR(DRV_PROJECT); 1153MODULE_AUTHOR(DRV_PROJECT);
1216MODULE_VERSION(DRV_VERSION); 1154MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index d27d7d5d850c..0f8b84b7224c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -400,60 +400,16 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
400 struct txentry_desc *txdesc) 400 struct txentry_desc *txdesc)
401{ 401{
402 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 402 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
403 __le32 *txi = skbdesc->desc; 403 __le32 *txi = (__le32 *)(skb->data - TXWI_DESC_SIZE - TXINFO_DESC_SIZE);
404 __le32 *txwi = &txi[TXINFO_DESC_SIZE / sizeof(__le32)];
405 u32 word; 404 u32 word;
406 405
407 /* 406 /*
408 * Initialize TX Info descriptor 407 * Initialize TXWI descriptor
409 */ 408 */
410 rt2x00_desc_read(txwi, 0, &word); 409 rt2800_write_txwi(skb, txdesc);
411 rt2x00_set_field32(&word, TXWI_W0_FRAG,
412 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
413 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
414 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
415 rt2x00_set_field32(&word, TXWI_W0_TS,
416 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
417 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
418 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
419 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
420 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
421 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
422 rt2x00_set_field32(&word, TXWI_W0_BW,
423 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
424 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
425 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
426 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
427 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
428 rt2x00_desc_write(txwi, 0, word);
429
430 rt2x00_desc_read(txwi, 1, &word);
431 rt2x00_set_field32(&word, TXWI_W1_ACK,
432 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
433 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
434 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
435 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
436 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
437 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
438 txdesc->key_idx : 0xff);
439 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
440 skb->len - txdesc->l2pad);
441 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
442 skbdesc->entry->queue->qid + 1);
443 rt2x00_desc_write(txwi, 1, word);
444 410
445 /* 411 /*
446 * Always write 0 to IV/EIV fields, hardware will insert the IV 412 * Initialize TXINFO descriptor
447 * from the IVEIV register when TXINFO_W0_WIV is set to 0.
448 * When TXINFO_W0_WIV is set to 1 it will use the IV data
449 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
450 * crypto entry in the registers should be used to encrypt the frame.
451 */
452 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
453 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
454
455 /*
456 * Initialize TX descriptor
457 */ 413 */
458 rt2x00_desc_read(txi, 0, &word); 414 rt2x00_desc_read(txi, 0, &word);
459 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 415 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
@@ -466,26 +422,25 @@ static void rt2800usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
466 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST, 422 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST,
467 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 423 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
468 rt2x00_desc_write(txi, 0, word); 424 rt2x00_desc_write(txi, 0, word);
425
426 /*
427 * Register descriptor details in skb frame descriptor.
428 */
429 skbdesc->desc = txi;
430 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
469} 431}
470 432
471/* 433/*
472 * TX data initialization 434 * TX data initialization
473 */ 435 */
474static void rt2800usb_write_beacon(struct queue_entry *entry) 436static void rt2800usb_write_beacon(struct queue_entry *entry,
437 struct txentry_desc *txdesc)
475{ 438{
476 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 439 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
477 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
478 unsigned int beacon_base; 440 unsigned int beacon_base;
479 u32 reg; 441 u32 reg;
480 442
481 /* 443 /*
482 * Add the descriptor in front of the skb.
483 */
484 skb_push(entry->skb, entry->queue->desc_size);
485 memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
486 skbdesc->desc = entry->skb->data;
487
488 /*
489 * Disable beaconing while we are reloading the beacon data, 444 * Disable beaconing while we are reloading the beacon data,
490 * otherwise we might be sending out invalid data. 445 * otherwise we might be sending out invalid data.
491 */ 446 */
@@ -494,6 +449,12 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
494 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 449 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
495 450
496 /* 451 /*
452 * Add the TXWI for the beacon to the skb.
453 */
454 rt2800_write_txwi(entry->skb, txdesc);
455 skb_push(entry->skb, TXWI_DESC_SIZE);
456
457 /*
497 * Write entire beacon with descriptor to register. 458 * Write entire beacon with descriptor to register.
498 */ 459 */
499 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 460 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -503,6 +464,14 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
503 REGISTER_TIMEOUT32(entry->skb->len)); 464 REGISTER_TIMEOUT32(entry->skb->len));
504 465
505 /* 466 /*
467 * Enable beaconing again.
468 */
469 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
470 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
471 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
472 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
473
474 /*
506 * Clean up the beacon skb. 475 * Clean up the beacon skb.
507 */ 476 */
508 dev_kfree_skb(entry->skb); 477 dev_kfree_skb(entry->skb);
@@ -524,84 +493,53 @@ static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
524 return length; 493 return length;
525} 494}
526 495
527static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
528 const enum data_queue_qid queue)
529{
530 u32 reg;
531
532 if (queue != QID_BEACON) {
533 rt2x00usb_kick_tx_queue(rt2x00dev, queue);
534 return;
535 }
536
537 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
538 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
539 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
540 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
541 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
542 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
543 }
544}
545
546/* 496/*
547 * RX control handlers 497 * RX control handlers
548 */ 498 */
549static void rt2800usb_fill_rxdone(struct queue_entry *entry, 499static void rt2800usb_fill_rxdone(struct queue_entry *entry,
550 struct rxdone_entry_desc *rxdesc) 500 struct rxdone_entry_desc *rxdesc)
551{ 501{
552 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
553 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 502 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
554 __le32 *rxi = (__le32 *)entry->skb->data; 503 __le32 *rxi = (__le32 *)entry->skb->data;
555 __le32 *rxwi;
556 __le32 *rxd; 504 __le32 *rxd;
557 u32 rxi0; 505 u32 word;
558 u32 rxwi0;
559 u32 rxwi1;
560 u32 rxwi2;
561 u32 rxwi3;
562 u32 rxd0;
563 int rx_pkt_len; 506 int rx_pkt_len;
564 507
565 /* 508 /*
509 * Copy descriptor to the skbdesc->desc buffer, making it safe from
510 * moving of frame data in rt2x00usb.
511 */
512 memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
513
514 /*
566 * RX frame format is : 515 * RX frame format is :
567 * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad | 516 * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
568 * |<------------ rx_pkt_len -------------->| 517 * |<------------ rx_pkt_len -------------->|
569 */ 518 */
570 rt2x00_desc_read(rxi, 0, &rxi0); 519 rt2x00_desc_read(rxi, 0, &word);
571 rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN); 520 rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN);
572
573 rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
574 521
575 /* 522 /*
576 * FIXME : we need to check for rx_pkt_len validity 523 * Remove the RXINFO structure from the sbk.
577 */ 524 */
578 rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len); 525 skb_pull(entry->skb, RXINFO_DESC_SIZE);
579 526
580 /* 527 /*
581 * Copy descriptor to the skbdesc->desc buffer, making it safe from 528 * FIXME: we need to check for rx_pkt_len validity
582 * moving of frame data in rt2x00usb.
583 */ 529 */
584 memcpy(skbdesc->desc, rxi, skbdesc->desc_len); 530 rxd = (__le32 *)(entry->skb->data + rx_pkt_len);
585 531
586 /* 532 /*
587 * It is now safe to read the descriptor on all architectures. 533 * It is now safe to read the descriptor on all architectures.
588 */ 534 */
589 rt2x00_desc_read(rxwi, 0, &rxwi0); 535 rt2x00_desc_read(rxd, 0, &word);
590 rt2x00_desc_read(rxwi, 1, &rxwi1);
591 rt2x00_desc_read(rxwi, 2, &rxwi2);
592 rt2x00_desc_read(rxwi, 3, &rxwi3);
593 rt2x00_desc_read(rxd, 0, &rxd0);
594 536
595 if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR)) 537 if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR))
596 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 538 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
597 539
598 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 540 rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR);
599 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
600 rxdesc->cipher_status =
601 rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
602 }
603 541
604 if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) { 542 if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) {
605 /* 543 /*
606 * Hardware has stripped IV/EIV data from 802.11 frame during 544 * Hardware has stripped IV/EIV data from 802.11 frame during
607 * decryption. Unfortunately the descriptor doesn't contain 545 * decryption. Unfortunately the descriptor doesn't contain
@@ -616,45 +554,21 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
616 rxdesc->flags |= RX_FLAG_MMIC_ERROR; 554 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
617 } 555 }
618 556
619 if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS)) 557 if (rt2x00_get_field32(word, RXD_W0_MY_BSS))
620 rxdesc->dev_flags |= RXDONE_MY_BSS; 558 rxdesc->dev_flags |= RXDONE_MY_BSS;
621 559
622 if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD)) 560 if (rt2x00_get_field32(word, RXD_W0_L2PAD))
623 rxdesc->dev_flags |= RXDONE_L2PAD; 561 rxdesc->dev_flags |= RXDONE_L2PAD;
624 562
625 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
626 rxdesc->flags |= RX_FLAG_SHORT_GI;
627
628 if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
629 rxdesc->flags |= RX_FLAG_40MHZ;
630
631 /* 563 /*
632 * Detect RX rate, always use MCS as signal type. 564 * Remove RXD descriptor from end of buffer.
633 */ 565 */
634 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS; 566 skb_trim(entry->skb, rx_pkt_len);
635 rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
636 rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
637 567
638 /* 568 /*
639 * Mask of 0x8 bit to remove the short preamble flag. 569 * Process the RXWI structure.
640 */ 570 */
641 if (rxdesc->rate_mode == RATE_MODE_CCK) 571 rt2800_process_rxwi(entry->skb, rxdesc);
642 rxdesc->signal &= ~0x8;
643
644 rxdesc->rssi =
645 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
646 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
647
648 rxdesc->noise =
649 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
650 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
651
652 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
653
654 /*
655 * Remove RXWI descriptor from start of buffer.
656 */
657 skb_pull(entry->skb, skbdesc->desc_len);
658} 572}
659 573
660/* 574/*
@@ -747,7 +661,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
747 .write_tx_data = rt2x00usb_write_tx_data, 661 .write_tx_data = rt2x00usb_write_tx_data,
748 .write_beacon = rt2800usb_write_beacon, 662 .write_beacon = rt2800usb_write_beacon,
749 .get_tx_data_len = rt2800usb_get_tx_data_len, 663 .get_tx_data_len = rt2800usb_get_tx_data_len,
750 .kick_tx_queue = rt2800usb_kick_tx_queue, 664 .kick_tx_queue = rt2x00usb_kick_tx_queue,
751 .kill_tx_queue = rt2x00usb_kill_tx_queue, 665 .kill_tx_queue = rt2x00usb_kill_tx_queue,
752 .fill_rxdone = rt2800usb_fill_rxdone, 666 .fill_rxdone = rt2800usb_fill_rxdone,
753 .config_shared_key = rt2800_config_shared_key, 667 .config_shared_key = rt2800_config_shared_key,
@@ -806,6 +720,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
806 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 720 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
807 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 721 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
808 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 722 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
723 /* Allwin */
724 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
725 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
726 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
809 /* Amit */ 727 /* Amit */
810 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 728 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
811 /* Askey */ 729 /* Askey */
@@ -841,13 +759,18 @@ static struct usb_device_id rt2800usb_device_table[] = {
841 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 759 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
842 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 760 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
843 /* EnGenius */ 761 /* EnGenius */
844 { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 762 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
845 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 763 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
846 /* Gigabyte */ 764 /* Gigabyte */
847 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, 765 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
848 /* Hawking */ 766 /* Hawking */
849 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, 767 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
850 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 768 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
769 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
770 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
771 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
772 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
773 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
851 /* Linksys */ 774 /* Linksys */
852 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 775 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 776 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -876,6 +799,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
876 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, 799 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
877 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, 800 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
878 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, 801 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
802 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
803 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 804 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
880 /* SMC */ 805 /* SMC */
881 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 806 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -905,8 +830,17 @@ static struct usb_device_id rt2800usb_device_table[] = {
905 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 830 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
906 /* AirTies */ 831 /* AirTies */
907 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, 832 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
833 /* Allwin */
834 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
835 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
836 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
837 /* ASUS */
838 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
908 /* AzureWave */ 839 /* AzureWave */
909 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 840 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
841 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
842 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
843 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
910 /* Conceptronic */ 844 /* Conceptronic */
911 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) }, 845 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
912 /* Corega */ 846 /* Corega */
@@ -916,20 +850,46 @@ static struct usb_device_id rt2800usb_device_table[] = {
916 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, 850 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
917 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, 851 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
918 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, 852 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
854 /* Draytek */
855 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
919 /* Edimax */ 856 /* Edimax */
920 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 857 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
921 /* Encore */ 858 /* Encore */
922 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, 859 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
860 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
923 /* EnGenius */ 861 /* EnGenius */
924 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, 862 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
925 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, 863 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
926 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, 864 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
865 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
866 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
867 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
927 /* Gigabyte */ 868 /* Gigabyte */
928 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, 869 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
929 /* I-O DATA */ 870 /* I-O DATA */
930 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, 871 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
872 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
873 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
874 /* Logitec */
875 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
931 /* MSI */ 876 /* MSI */
932 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, 877 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
878 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
880 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
881 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
882 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
883 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
884 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
885 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
886 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
887 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
888 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
889 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
890 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
891 /* Para */
892 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
933 /* Pegatron */ 893 /* Pegatron */
934 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 894 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
935 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, 895 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -944,14 +904,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
944 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 904 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
945 /* Sitecom */ 905 /* Sitecom */
946 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 906 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
907 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
947 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, 908 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
909 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
910 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
948 /* SMC */ 911 /* SMC */
949 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, 912 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
913 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
914 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
950 /* Zinwell */ 916 /* Zinwell */
951 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, 917 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
952 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
953#endif 919#endif
954#ifdef CONFIG_RT2800USB_RT35XX 920#ifdef CONFIG_RT2800USB_RT35XX
921 /* Allwin */
922 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
955 /* Askey */ 923 /* Askey */
956 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) }, 924 { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
957 /* Cisco */ 925 /* Cisco */
@@ -966,37 +934,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
966 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) }, 934 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
967 /* Sitecom */ 935 /* Sitecom */
968 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 936 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
937 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
969 /* Zinwell */ 938 /* Zinwell */
970 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 939 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
971#endif 940#endif
972#ifdef CONFIG_RT2800USB_UNKNOWN 941#ifdef CONFIG_RT2800USB_UNKNOWN
973 /* 942 /*
974 * Unclear what kind of devices these are (they aren't supported by the 943 * Unclear what kind of devices these are (they aren't supported by the
975 * vendor driver). 944 * vendor linux driver).
976 */ 945 */
977 /* Allwin */
978 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
979 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
980 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
981 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
982 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
983 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
984 { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
985 /* Amigo */ 946 /* Amigo */
986 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 947 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
987 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, 948 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
988 /* Askey */
989 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
990 /* ASUS */ 949 /* ASUS */
991 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 950 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
992 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 951 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
993 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
994 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 952 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
995 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, 953 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
996 /* AzureWave */ 954 /* AzureWave */
997 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 955 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
998 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 956 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
999 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) }, 957 { USB_DEVICE(0x13d3, 0x3322), USB_DEVICE_DATA(&rt2800usb_ops) },
1000 /* Belkin */ 958 /* Belkin */
1001 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, 959 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
1002 /* Buffalo */ 960 /* Buffalo */
@@ -1015,24 +973,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
1015 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, 973 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
1016 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 974 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
1017 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 975 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
1018 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) }, 976 { USB_DEVICE(0x07d1, 0x3c17), USB_DEVICE_DATA(&rt2800usb_ops) },
1019 /* Encore */ 977 /* Encore */
1020 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 978 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
1021 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
1022 /* EnGenius */
1023 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
1024 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
1025 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
1026 /* Gemtek */ 979 /* Gemtek */
1027 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 980 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
1028 /* Gigabyte */ 981 /* Gigabyte */
1029 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) }, 982 { USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
1030 /* Hawking */
1031 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
1032 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
1033 /* I-O DATA */
1034 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
1035 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
1036 /* LevelOne */ 983 /* LevelOne */
1037 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 984 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
1038 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 985 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1042,43 +989,23 @@ static struct usb_device_id rt2800usb_device_table[] = {
1042 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, 989 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
1043 /* Motorola */ 990 /* Motorola */
1044 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 991 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
1045 /* MSI */
1046 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
1047 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
1048 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
1049 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
1050 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
1051 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
1052 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
1053 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
1054 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
1055 /* Ovislink */ 992 /* Ovislink */
993 { USB_DEVICE(0x1b75, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
1056 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 994 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
1057 /* Para */
1058 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
1059 /* Pegatron */ 995 /* Pegatron */
1060 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, 996 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
1061 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 997 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
1062 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 998 { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
999 { USB_DEVICE(0x1d4d, 0x0011), USB_DEVICE_DATA(&rt2800usb_ops) },
1063 /* Planex */ 1000 /* Planex */
1064 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 1001 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
1065 /* Qcom */ 1002 /* Qcom */
1066 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 1003 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
1067 /* Sitecom */
1068 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
1069 { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
1070 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
1071 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
1072 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
1073 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
1074 { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
1075 { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
1076 /* SMC */ 1004 /* SMC */
1077 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 1005 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
1078 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
1079 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
1080 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1006 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
1081 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1007 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
1008 { USB_DEVICE(0x083a, 0xf511), USB_DEVICE_DATA(&rt2800usb_ops) },
1082 /* Sweex */ 1009 /* Sweex */
1083 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) }, 1010 { USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
1084 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) }, 1011 { USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index d1d8ae94b4d4..2bca6a71a7f5 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,8 +79,6 @@
79 */ 79 */
80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 80#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 81#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
82#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
83#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
84 82
85/* 83/*
86 * TX Info structure 84 * TX Info structure
@@ -113,44 +111,6 @@
113#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff) 111#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
114 112
115/* 113/*
116 * RX WI structure
117 */
118
119/*
120 * Word0
121 */
122#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
123#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
124#define RXWI_W0_BSSID FIELD32(0x00001c00)
125#define RXWI_W0_UDF FIELD32(0x0000e000)
126#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
127#define RXWI_W0_TID FIELD32(0xf0000000)
128
129/*
130 * Word1
131 */
132#define RXWI_W1_FRAG FIELD32(0x0000000f)
133#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
134#define RXWI_W1_MCS FIELD32(0x007f0000)
135#define RXWI_W1_BW FIELD32(0x00800000)
136#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
137#define RXWI_W1_STBC FIELD32(0x06000000)
138#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
139
140/*
141 * Word2
142 */
143#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
144#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
145#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
146
147/*
148 * Word3
149 */
150#define RXWI_W3_SNR0 FIELD32(0x000000ff)
151#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
152
153/*
154 * RX descriptor format for RX Ring. 114 * RX descriptor format for RX Ring.
155 */ 115 */
156 116
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d9daa9c406fa..6c1ff4c15c84 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -177,16 +177,15 @@ struct rt2x00_chip {
177#define RT2573 0x2573 177#define RT2573 0x2573
178#define RT2860 0x2860 /* 2.4GHz PCI/CB */ 178#define RT2860 0x2860 /* 2.4GHz PCI/CB */
179#define RT2870 0x2870 179#define RT2870 0x2870
180#define RT2872 0x2872 180#define RT2872 0x2872 /* WSOC */
181#define RT2880 0x2880 /* WSOC */
182#define RT2883 0x2883 /* WSOC */ 181#define RT2883 0x2883 /* WSOC */
183#define RT2890 0x2890 /* 2.4GHz PCIe */
184#define RT3052 0x3052 /* WSOC */
185#define RT3070 0x3070 182#define RT3070 0x3070
186#define RT3071 0x3071 183#define RT3071 0x3071
187#define RT3090 0x3090 /* 2.4GHz PCIe */ 184#define RT3090 0x3090 /* 2.4GHz PCIe */
188#define RT3390 0x3390 185#define RT3390 0x3390
189#define RT3572 0x3572 186#define RT3572 0x3572
187#define RT3593 0x3593 /* PCIe */
188#define RT3883 0x3883 /* WSOC */
190 189
191 u16 rf; 190 u16 rf;
192 u16 rev; 191 u16 rev;
@@ -550,8 +549,10 @@ struct rt2x00lib_ops {
550 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev, 549 void (*write_tx_desc) (struct rt2x00_dev *rt2x00dev,
551 struct sk_buff *skb, 550 struct sk_buff *skb,
552 struct txentry_desc *txdesc); 551 struct txentry_desc *txdesc);
553 int (*write_tx_data) (struct queue_entry *entry); 552 int (*write_tx_data) (struct queue_entry *entry,
554 void (*write_beacon) (struct queue_entry *entry); 553 struct txentry_desc *txdesc);
554 void (*write_beacon) (struct queue_entry *entry,
555 struct txentry_desc *txdesc);
555 int (*get_tx_data_len) (struct queue_entry *entry); 556 int (*get_tx_data_len) (struct queue_entry *entry);
556 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev, 557 void (*kick_tx_queue) (struct rt2x00_dev *rt2x00dev,
557 const enum data_queue_qid queue); 558 const enum data_queue_qid queue);
@@ -930,12 +931,12 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
930 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 931 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
931} 932}
932 933
933static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt) 934static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
934{ 935{
935 return (rt2x00dev->chip.rt == rt); 936 return (rt2x00dev->chip.rt == rt);
936} 937}
937 938
938static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf) 939static inline bool rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
939{ 940{
940 return (rt2x00dev->chip.rf == rf); 941 return (rt2x00dev->chip.rf == rf);
941} 942}
@@ -945,6 +946,24 @@ static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
945 return rt2x00dev->chip.rev; 946 return rt2x00dev->chip.rev;
946} 947}
947 948
949static inline bool rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev,
950 const u16 rt, const u16 rev)
951{
952 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
953}
954
955static inline bool rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev,
956 const u16 rt, const u16 rev)
957{
958 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
959}
960
961static inline bool rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev,
962 const u16 rt, const u16 rev)
963{
964 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
965}
966
948static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, 967static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
949 enum rt2x00_chip_intf intf) 968 enum rt2x00_chip_intf intf)
950{ 969{
diff --git a/drivers/net/wireless/rt2x00/rt2x00crypto.c b/drivers/net/wireless/rt2x00/rt2x00crypto.c
index d291c7862e10..583dacd8d241 100644
--- a/drivers/net/wireless/rt2x00/rt2x00crypto.c
+++ b/drivers/net/wireless/rt2x00/rt2x00crypto.c
@@ -128,6 +128,7 @@ void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc)
128 128
129 /* Pull buffer to correct size */ 129 /* Pull buffer to correct size */
130 skb_pull(skb, txdesc->iv_len); 130 skb_pull(skb, txdesc->iv_len);
131 txdesc->length -= txdesc->iv_len;
131 132
132 /* IV/EIV data has officially been stripped */ 133 /* IV/EIV data has officially been stripped */
133 skbdesc->flags |= SKBDESC_IV_STRIPPED; 134 skbdesc->flags |= SKBDESC_IV_STRIPPED;
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 9569fb4e5bc5..e9fe93fd8042 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -156,10 +156,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
156 enum rt2x00_dump_type type, struct sk_buff *skb) 156 enum rt2x00_dump_type type, struct sk_buff *skb)
157{ 157{
158 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; 158 struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf;
159 struct skb_frame_desc *desc = get_skb_frame_desc(skb); 159 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
160 struct sk_buff *skbcopy; 160 struct sk_buff *skbcopy;
161 struct rt2x00dump_hdr *dump_hdr; 161 struct rt2x00dump_hdr *dump_hdr;
162 struct timeval timestamp; 162 struct timeval timestamp;
163 u32 data_len;
163 164
164 do_gettimeofday(&timestamp); 165 do_gettimeofday(&timestamp);
165 166
@@ -171,7 +172,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
171 return; 172 return;
172 } 173 }
173 174
174 skbcopy = alloc_skb(sizeof(*dump_hdr) + desc->desc_len + skb->len, 175 data_len = skb->len;
176 if (skbdesc->flags & SKBDESC_DESC_IN_SKB)
177 data_len -= skbdesc->desc_len;
178
179 skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len,
175 GFP_ATOMIC); 180 GFP_ATOMIC);
176 if (!skbcopy) { 181 if (!skbcopy) {
177 DEBUG(rt2x00dev, "Failed to copy skb for dump.\n"); 182 DEBUG(rt2x00dev, "Failed to copy skb for dump.\n");
@@ -181,18 +186,20 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
181 dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr)); 186 dump_hdr = (struct rt2x00dump_hdr *)skb_put(skbcopy, sizeof(*dump_hdr));
182 dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION); 187 dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION);
183 dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr)); 188 dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr));
184 dump_hdr->desc_length = cpu_to_le32(desc->desc_len); 189 dump_hdr->desc_length = cpu_to_le32(skbdesc->desc_len);
185 dump_hdr->data_length = cpu_to_le32(skb->len); 190 dump_hdr->data_length = cpu_to_le32(data_len);
186 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); 191 dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
187 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); 192 dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
188 dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev); 193 dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
189 dump_hdr->type = cpu_to_le16(type); 194 dump_hdr->type = cpu_to_le16(type);
190 dump_hdr->queue_index = desc->entry->queue->qid; 195 dump_hdr->queue_index = skbdesc->entry->queue->qid;
191 dump_hdr->entry_index = desc->entry->entry_idx; 196 dump_hdr->entry_index = skbdesc->entry->entry_idx;
192 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); 197 dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec);
193 dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec); 198 dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_usec);
194 199
195 memcpy(skb_put(skbcopy, desc->desc_len), desc->desc, desc->desc_len); 200 if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB))
201 memcpy(skb_put(skbcopy, skbdesc->desc_len), skbdesc->desc,
202 skbdesc->desc_len);
196 memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len); 203 memcpy(skb_put(skbcopy, skb->len), skb->data, skb->len);
197 204
198 skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy); 205 skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy);
@@ -700,8 +707,6 @@ void rt2x00debug_register(struct rt2x00_dev *rt2x00dev)
700exit: 707exit:
701 rt2x00debug_deregister(rt2x00dev); 708 rt2x00debug_deregister(rt2x00dev);
702 ERROR(rt2x00dev, "Failed to register debug handler.\n"); 709 ERROR(rt2x00dev, "Failed to register debug handler.\n");
703
704 return;
705} 710}
706 711
707void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) 712void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev)
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index eda73ba735a6..3ae468c4d760 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -435,7 +435,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
435 rx_status->mactime = rxdesc.timestamp; 435 rx_status->mactime = rxdesc.timestamp;
436 rx_status->rate_idx = rate_idx; 436 rx_status->rate_idx = rate_idx;
437 rx_status->signal = rxdesc.rssi; 437 rx_status->signal = rxdesc.rssi;
438 rx_status->noise = rxdesc.noise;
439 rx_status->flag = rxdesc.flags; 438 rx_status->flag = rxdesc.flags;
440 rx_status->antenna = rt2x00dev->link.ant.active.rx; 439 rx_status->antenna = rt2x00dev->link.ant.active.rx;
441 440
diff --git a/drivers/net/wireless/rt2x00/rt2x00dump.h b/drivers/net/wireless/rt2x00/rt2x00dump.h
index 727019a748e7..ed303b423e41 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dump.h
+++ b/drivers/net/wireless/rt2x00/rt2x00dump.h
@@ -62,11 +62,14 @@
62 * the tx event which has either succeeded or failed. A frame 62 * the tx event which has either succeeded or failed. A frame
63 * with this type should also have been reported with as a 63 * with this type should also have been reported with as a
64 * %DUMP_FRAME_TX frame. 64 * %DUMP_FRAME_TX frame.
65 * @DUMP_FRAME_BEACON: This beacon frame is queued for transmission to the
66 * hardware.
65 */ 67 */
66enum rt2x00_dump_type { 68enum rt2x00_dump_type {
67 DUMP_FRAME_RXDONE = 1, 69 DUMP_FRAME_RXDONE = 1,
68 DUMP_FRAME_TX = 2, 70 DUMP_FRAME_TX = 2,
69 DUMP_FRAME_TXDONE = 3, 71 DUMP_FRAME_TXDONE = 3,
72 DUMP_FRAME_BEACON = 4,
70}; 73};
71 74
72/** 75/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00firmware.c b/drivers/net/wireless/rt2x00/rt2x00firmware.c
index 34beb00c4347..b818a43c4672 100644
--- a/drivers/net/wireless/rt2x00/rt2x00firmware.c
+++ b/drivers/net/wireless/rt2x00/rt2x00firmware.c
@@ -79,7 +79,7 @@ static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev)
79 ERROR(rt2x00dev, 79 ERROR(rt2x00dev,
80 "Current firmware does not support detected chipset.\n"); 80 "Current firmware does not support detected chipset.\n");
81 goto exit; 81 goto exit;
82 }; 82 }
83 83
84 rt2x00dev->fw = fw; 84 rt2x00dev->fw = fw;
85 85
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index 1056c92143a8..5a407602ce3e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -35,6 +35,7 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
35{ 35{
36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 36 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
37 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; 37 struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
38 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
38 39
39 if (tx_info->control.sta) 40 if (tx_info->control.sta)
40 txdesc->mpdu_density = 41 txdesc->mpdu_density =
@@ -66,4 +67,20 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
66 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); 67 __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags);
67 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) 68 if (txrate->flags & IEEE80211_TX_RC_SHORT_GI)
68 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); 69 __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags);
70
71 /*
72 * Determine IFS values
73 * - Use TXOP_BACKOFF for management frames
74 * - Use TXOP_SIFS for fragment bursts
75 * - Use TXOP_HTTXOP for everything else
76 *
77 * Note: rt2800 devices won't use CTS protection (if used)
78 * for frames not transmitted with TXOP_HTTXOP
79 */
80 if (ieee80211_is_mgmt(hdr->frame_control))
81 txdesc->txop = TXOP_BACKOFF;
82 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT))
83 txdesc->txop = TXOP_SIFS;
84 else
85 txdesc->txop = TXOP_HTTXOP;
69} 86}
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index cf3f1c0c4382..a016f7ccde29 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -63,11 +63,10 @@ EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
63/* 63/*
64 * TX data handlers. 64 * TX data handlers.
65 */ 65 */
66int rt2x00pci_write_tx_data(struct queue_entry *entry) 66int rt2x00pci_write_tx_data(struct queue_entry *entry,
67 struct txentry_desc *txdesc)
67{ 68{
68 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 69 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
69 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
70 struct skb_frame_desc *skbdesc;
71 70
72 /* 71 /*
73 * This should not happen, we already checked the entry 72 * This should not happen, we already checked the entry
@@ -82,13 +81,6 @@ int rt2x00pci_write_tx_data(struct queue_entry *entry)
82 return -EINVAL; 81 return -EINVAL;
83 } 82 }
84 83
85 /*
86 * Fill in skb descriptor
87 */
88 skbdesc = get_skb_frame_desc(entry->skb);
89 skbdesc->desc = entry_priv->desc;
90 skbdesc->desc_len = entry->queue->desc_size;
91
92 return 0; 84 return 0;
93} 85}
94EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data); 86EXPORT_SYMBOL_GPL(rt2x00pci_write_tx_data);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 8149ff68410a..51bcef3839ce 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -92,7 +92,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
92 * This function will initialize the DMA and skb descriptor 92 * This function will initialize the DMA and skb descriptor
93 * to prepare the entry for the actual TX operation. 93 * to prepare the entry for the actual TX operation.
94 */ 94 */
95int rt2x00pci_write_tx_data(struct queue_entry *entry); 95int rt2x00pci_write_tx_data(struct queue_entry *entry,
96 struct txentry_desc *txdesc);
96 97
97/** 98/**
98 * struct queue_entry_priv_pci: Per entry PCI specific information 99 * struct queue_entry_priv_pci: Per entry PCI specific information
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index a0bd36fc4d2e..20dbdd6fb904 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -334,12 +334,10 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
334 txdesc->aifs = entry->queue->aifs; 334 txdesc->aifs = entry->queue->aifs;
335 335
336 /* 336 /*
337 * Header and alignment information. 337 * Header and frame information.
338 */ 338 */
339 txdesc->length = entry->skb->len;
339 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb); 340 txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
340 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
341 (entry->skb->len > txdesc->header_length))
342 txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
343 341
344 /* 342 /*
345 * Check whether this frame is to be acked. 343 * Check whether this frame is to be acked.
@@ -423,6 +421,7 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
423{ 421{
424 struct data_queue *queue = entry->queue; 422 struct data_queue *queue = entry->queue;
425 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; 423 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
424 enum rt2x00_dump_type dump_type;
426 425
427 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc); 426 rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
428 427
@@ -430,21 +429,26 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
430 * All processing on the frame has been completed, this means 429 * All processing on the frame has been completed, this means
431 * it is now ready to be dumped to userspace through debugfs. 430 * it is now ready to be dumped to userspace through debugfs.
432 */ 431 */
433 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb); 432 dump_type = (txdesc->queue == QID_BEACON) ?
433 DUMP_FRAME_BEACON : DUMP_FRAME_TX;
434 rt2x00debug_dump_frame(rt2x00dev, dump_type, entry->skb);
435}
436
437static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
438 struct txentry_desc *txdesc)
439{
440 struct data_queue *queue = entry->queue;
441 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
434 442
435 /* 443 /*
436 * Check if we need to kick the queue, there are however a few rules 444 * Check if we need to kick the queue, there are however a few rules
437 * 1) Don't kick beacon queue 445 * 1) Don't kick unless this is the last in frame in a burst.
438 * 2) Don't kick unless this is the last in frame in a burst.
439 * When the burst flag is set, this frame is always followed 446 * When the burst flag is set, this frame is always followed
440 * by another frame which in some way are related to eachother. 447 * by another frame which in some way are related to eachother.
441 * This is true for fragments, RTS or CTS-to-self frames. 448 * This is true for fragments, RTS or CTS-to-self frames.
442 * 3) Rule 2 can be broken when the available entries 449 * 2) Rule 1 can be broken when the available entries
443 * in the queue are less then a certain threshold. 450 * in the queue are less then a certain threshold.
444 */ 451 */
445 if (entry->queue->qid == QID_BEACON)
446 return;
447
448 if (rt2x00queue_threshold(queue) || 452 if (rt2x00queue_threshold(queue) ||
449 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 453 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
450 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid); 454 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
@@ -526,7 +530,8 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
526 * call failed. Since we always return NETDEV_TX_OK to mac80211, 530 * call failed. Since we always return NETDEV_TX_OK to mac80211,
527 * this frame will simply be dropped. 531 * this frame will simply be dropped.
528 */ 532 */
529 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) { 533 if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry,
534 &txdesc))) {
530 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 535 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
531 entry->skb = NULL; 536 entry->skb = NULL;
532 return -EIO; 537 return -EIO;
@@ -539,6 +544,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
539 544
540 rt2x00queue_index_inc(queue, Q_INDEX); 545 rt2x00queue_index_inc(queue, Q_INDEX);
541 rt2x00queue_write_tx_descriptor(entry, &txdesc); 546 rt2x00queue_write_tx_descriptor(entry, &txdesc);
547 rt2x00queue_kick_tx_queue(entry, &txdesc);
542 548
543 return 0; 549 return 0;
544} 550}
@@ -550,7 +556,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
550 struct rt2x00_intf *intf = vif_to_intf(vif); 556 struct rt2x00_intf *intf = vif_to_intf(vif);
551 struct skb_frame_desc *skbdesc; 557 struct skb_frame_desc *skbdesc;
552 struct txentry_desc txdesc; 558 struct txentry_desc txdesc;
553 __le32 desc[16];
554 559
555 if (unlikely(!intf->beacon)) 560 if (unlikely(!intf->beacon))
556 return -ENOBUFS; 561 return -ENOBUFS;
@@ -583,19 +588,10 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
583 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc); 588 rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
584 589
585 /* 590 /*
586 * For the descriptor we use a local array from where the
587 * driver can move it to the correct location required for
588 * the hardware.
589 */
590 memset(desc, 0, sizeof(desc));
591
592 /*
593 * Fill in skb descriptor 591 * Fill in skb descriptor
594 */ 592 */
595 skbdesc = get_skb_frame_desc(intf->beacon->skb); 593 skbdesc = get_skb_frame_desc(intf->beacon->skb);
596 memset(skbdesc, 0, sizeof(*skbdesc)); 594 memset(skbdesc, 0, sizeof(*skbdesc));
597 skbdesc->desc = desc;
598 skbdesc->desc_len = intf->beacon->queue->desc_size;
599 skbdesc->entry = intf->beacon; 595 skbdesc->entry = intf->beacon;
600 596
601 /* 597 /*
@@ -604,12 +600,9 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
604 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc); 600 rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
605 601
606 /* 602 /*
607 * Send beacon to hardware. 603 * Send beacon to hardware and enable beacon genaration..
608 * Also enable beacon generation, which might have been disabled
609 * by the driver during the config_beacon() callback function.
610 */ 604 */
611 rt2x00dev->ops->lib->write_beacon(intf->beacon); 605 rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc);
612 rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
613 606
614 mutex_unlock(&intf->beacon_skb_mutex); 607 mutex_unlock(&intf->beacon_skb_mutex);
615 608
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index c1e482bb37b3..f79170849add 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -94,12 +94,15 @@ enum data_queue_qid {
94 * mac80211 but was stripped for processing by the driver. 94 * mac80211 but was stripped for processing by the driver.
95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211, 95 * @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
96 * don't try to pass it back. 96 * don't try to pass it back.
97 * @SKBDESC_DESC_IN_SKB: The descriptor is at the start of the
98 * skb, instead of in the desc field.
97 */ 99 */
98enum skb_frame_desc_flags { 100enum skb_frame_desc_flags {
99 SKBDESC_DMA_MAPPED_RX = 1 << 0, 101 SKBDESC_DMA_MAPPED_RX = 1 << 0,
100 SKBDESC_DMA_MAPPED_TX = 1 << 1, 102 SKBDESC_DMA_MAPPED_TX = 1 << 1,
101 SKBDESC_IV_STRIPPED = 1 << 2, 103 SKBDESC_IV_STRIPPED = 1 << 2,
102 SKBDESC_NOT_MAC80211 = 1 << 3, 104 SKBDESC_NOT_MAC80211 = 1 << 3,
105 SKBDESC_DESC_IN_SKB = 1 << 4,
103}; 106};
104 107
105/** 108/**
@@ -183,7 +186,6 @@ enum rxdone_entry_desc_flags {
183 * @timestamp: RX Timestamp 186 * @timestamp: RX Timestamp
184 * @signal: Signal of the received frame. 187 * @signal: Signal of the received frame.
185 * @rssi: RSSI of the received frame. 188 * @rssi: RSSI of the received frame.
186 * @noise: Measured noise during frame reception.
187 * @size: Data size of the received frame. 189 * @size: Data size of the received frame.
188 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags). 190 * @flags: MAC80211 receive flags (See &enum mac80211_rx_flags).
189 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags). 191 * @dev_flags: Ralink receive flags (See &enum rxdone_entry_desc_flags).
@@ -197,7 +199,6 @@ struct rxdone_entry_desc {
197 u64 timestamp; 199 u64 timestamp;
198 int signal; 200 int signal;
199 int rssi; 201 int rssi;
200 int noise;
201 int size; 202 int size;
202 int flags; 203 int flags;
203 int dev_flags; 204 int dev_flags;
@@ -287,8 +288,8 @@ enum txentry_desc_flags {
287 * 288 *
288 * @flags: Descriptor flags (See &enum queue_entry_flags). 289 * @flags: Descriptor flags (See &enum queue_entry_flags).
289 * @queue: Queue identification (See &enum data_queue_qid). 290 * @queue: Queue identification (See &enum data_queue_qid).
291 * @length: Length of the entire frame.
290 * @header_length: Length of 802.11 header. 292 * @header_length: Length of 802.11 header.
291 * @l2pad: Amount of padding to align 802.11 payload to 4-byte boundrary.
292 * @length_high: PLCP length high word. 293 * @length_high: PLCP length high word.
293 * @length_low: PLCP length low word. 294 * @length_low: PLCP length low word.
294 * @signal: PLCP signal. 295 * @signal: PLCP signal.
@@ -301,6 +302,7 @@ enum txentry_desc_flags {
301 * @retry_limit: Max number of retries. 302 * @retry_limit: Max number of retries.
302 * @aifs: AIFS value. 303 * @aifs: AIFS value.
303 * @ifs: IFS value. 304 * @ifs: IFS value.
305 * @txop: IFS value for 11n capable chips.
304 * @cw_min: cwmin value. 306 * @cw_min: cwmin value.
305 * @cw_max: cwmax value. 307 * @cw_max: cwmax value.
306 * @cipher: Cipher type used for encryption. 308 * @cipher: Cipher type used for encryption.
@@ -313,8 +315,8 @@ struct txentry_desc {
313 315
314 enum data_queue_qid queue; 316 enum data_queue_qid queue;
315 317
318 u16 length;
316 u16 header_length; 319 u16 header_length;
317 u16 l2pad;
318 320
319 u16 length_high; 321 u16 length_high;
320 u16 length_low; 322 u16 length_low;
@@ -330,6 +332,7 @@ struct txentry_desc {
330 short retry_limit; 332 short retry_limit;
331 short aifs; 333 short aifs;
332 short ifs; 334 short ifs;
335 short txop;
333 short cw_min; 336 short cw_min;
334 short cw_max; 337 short cw_max;
335 338
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index 603bfc0adaa3..b9fe94873ee0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -101,6 +101,16 @@ enum ifs {
101}; 101};
102 102
103/* 103/*
104 * IFS backoff values for HT devices
105 */
106enum txop {
107 TXOP_HTTXOP = 0,
108 TXOP_PIFS = 1,
109 TXOP_SIFS = 2,
110 TXOP_BACKOFF = 3,
111};
112
113/*
104 * Cipher types for hardware encryption 114 * Cipher types for hardware encryption
105 */ 115 */
106enum cipher { 116enum cipher {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f9a7f8b17411..bd1546ba7ad2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -216,12 +216,12 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
216 rt2x00lib_txdone(entry, &txdesc); 216 rt2x00lib_txdone(entry, &txdesc);
217} 217}
218 218
219int rt2x00usb_write_tx_data(struct queue_entry *entry) 219int rt2x00usb_write_tx_data(struct queue_entry *entry,
220 struct txentry_desc *txdesc)
220{ 221{
221 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 222 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
222 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 223 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
223 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 224 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
224 struct skb_frame_desc *skbdesc;
225 u32 length; 225 u32 length;
226 226
227 /* 227 /*
@@ -231,13 +231,6 @@ int rt2x00usb_write_tx_data(struct queue_entry *entry)
231 memset(entry->skb->data, 0, entry->queue->desc_size); 231 memset(entry->skb->data, 0, entry->queue->desc_size);
232 232
233 /* 233 /*
234 * Fill in skb descriptor
235 */
236 skbdesc = get_skb_frame_desc(entry->skb);
237 skbdesc->desc = entry->skb->data;
238 skbdesc->desc_len = entry->queue->desc_size;
239
240 /*
241 * USB devices cannot blindly pass the skb->len as the 234 * USB devices cannot blindly pass the skb->len as the
242 * length of the data to usb_fill_bulk_urb. Pass the skb 235 * length of the data to usb_fill_bulk_urb. Pass the skb
243 * to the driver to determine what the length should be. 236 * to the driver to determine what the length should be.
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index 3da6841b5d42..621d0f829251 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -376,7 +376,8 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev);
376 * This function will initialize the URB and skb descriptor 376 * This function will initialize the URB and skb descriptor
377 * to prepare the entry for the actual TX operation. 377 * to prepare the entry for the actual TX operation.
378 */ 378 */
379int rt2x00usb_write_tx_data(struct queue_entry *entry); 379int rt2x00usb_write_tx_data(struct queue_entry *entry,
380 struct txentry_desc *txdesc);
380 381
381/** 382/**
382 * struct queue_entry_priv_usb: Per entry USB specific information 383 * struct queue_entry_priv_usb: Per entry USB specific information
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 432e75f960b7..2e3076f67535 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1764,7 +1764,8 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1764 struct txentry_desc *txdesc) 1764 struct txentry_desc *txdesc)
1765{ 1765{
1766 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1766 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1767 __le32 *txd = skbdesc->desc; 1767 struct queue_entry_priv_pci *entry_priv = skbdesc->entry->priv_data;
1768 __le32 *txd = entry_priv->desc;
1768 u32 word; 1769 u32 word;
1769 1770
1770 /* 1771 /*
@@ -1802,17 +1803,23 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1802 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1803 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1803 rt2x00_desc_write(txd, 5, word); 1804 rt2x00_desc_write(txd, 5, word);
1804 1805
1805 rt2x00_desc_read(txd, 6, &word); 1806 if (txdesc->queue != QID_BEACON) {
1806 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1807 rt2x00_desc_read(txd, 6, &word);
1807 skbdesc->skb_dma); 1808 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1808 rt2x00_desc_write(txd, 6, word); 1809 skbdesc->skb_dma);
1810 rt2x00_desc_write(txd, 6, word);
1809 1811
1810 if (skbdesc->desc_len > TXINFO_SIZE) {
1811 rt2x00_desc_read(txd, 11, &word); 1812 rt2x00_desc_read(txd, 11, &word);
1812 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, skb->len); 1813 rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0,
1814 txdesc->length);
1813 rt2x00_desc_write(txd, 11, word); 1815 rt2x00_desc_write(txd, 11, word);
1814 } 1816 }
1815 1817
1818 /*
1819 * Writing TXD word 0 must the last to prevent a race condition with
1820 * the device, whereby the device may take hold of the TXD before we
1821 * finished updating it.
1822 */
1816 rt2x00_desc_read(txd, 0, &word); 1823 rt2x00_desc_read(txd, 0, &word);
1817 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); 1824 rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1);
1818 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1825 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
@@ -1832,20 +1839,28 @@ static void rt61pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1832 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, 1839 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1833 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); 1840 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1834 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); 1841 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1835 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len); 1842 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1836 rt2x00_set_field32(&word, TXD_W0_BURST, 1843 rt2x00_set_field32(&word, TXD_W0_BURST,
1837 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1844 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1838 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); 1845 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1839 rt2x00_desc_write(txd, 0, word); 1846 rt2x00_desc_write(txd, 0, word);
1847
1848 /*
1849 * Register descriptor details in skb frame descriptor.
1850 */
1851 skbdesc->desc = txd;
1852 skbdesc->desc_len =
1853 (txdesc->queue == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE;
1840} 1854}
1841 1855
1842/* 1856/*
1843 * TX data initialization 1857 * TX data initialization
1844 */ 1858 */
1845static void rt61pci_write_beacon(struct queue_entry *entry) 1859static void rt61pci_write_beacon(struct queue_entry *entry,
1860 struct txentry_desc *txdesc)
1846{ 1861{
1847 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1862 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1848 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 1863 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1849 unsigned int beacon_base; 1864 unsigned int beacon_base;
1850 u32 reg; 1865 u32 reg;
1851 1866
@@ -1861,14 +1876,25 @@ static void rt61pci_write_beacon(struct queue_entry *entry)
1861 * Write entire beacon with descriptor to register. 1876 * Write entire beacon with descriptor to register.
1862 */ 1877 */
1863 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1878 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1864 rt2x00pci_register_multiwrite(rt2x00dev, 1879 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
1865 beacon_base, 1880 entry_priv->desc, TXINFO_SIZE);
1866 skbdesc->desc, skbdesc->desc_len); 1881 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
1867 rt2x00pci_register_multiwrite(rt2x00dev,
1868 beacon_base + skbdesc->desc_len,
1869 entry->skb->data, entry->skb->len); 1882 entry->skb->data, entry->skb->len);
1870 1883
1871 /* 1884 /*
1885 * Enable beaconing again.
1886 *
1887 * For Wi-Fi faily generated beacons between participating
1888 * stations. Set TBTT phase adaptive adjustment step to 8us.
1889 */
1890 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1891
1892 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1893 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1894 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1895 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1896
1897 /*
1872 * Clean up beacon skb. 1898 * Clean up beacon skb.
1873 */ 1899 */
1874 dev_kfree_skb_any(entry->skb); 1900 dev_kfree_skb_any(entry->skb);
@@ -1880,23 +1906,6 @@ static void rt61pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1880{ 1906{
1881 u32 reg; 1907 u32 reg;
1882 1908
1883 if (queue == QID_BEACON) {
1884 /*
1885 * For Wi-Fi faily generated beacons between participating
1886 * stations. Set TBTT phase adaptive adjustment step to 8us.
1887 */
1888 rt2x00pci_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1889
1890 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1891 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
1892 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1893 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1894 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1895 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1896 }
1897 return;
1898 }
1899
1900 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg); 1909 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1901 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE)); 1910 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue == QID_AC_BE));
1902 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK)); 1911 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue == QID_AC_BK));
@@ -1968,12 +1977,8 @@ static void rt61pci_fill_rxdone(struct queue_entry *entry,
1968 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1977 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1969 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1978 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1970 1979
1971 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 1980 rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1972 rxdesc->cipher = 1981 rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1973 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1974 rxdesc->cipher_status =
1975 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1976 }
1977 1982
1978 if (rxdesc->cipher != CIPHER_NONE) { 1983 if (rxdesc->cipher != CIPHER_NONE) {
1979 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]); 1984 _rt2x00_desc_read(entry_priv->desc, 2, &rxdesc->iv[0]);
@@ -2118,6 +2123,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2118 } 2123 }
2119} 2124}
2120 2125
2126static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2127{
2128 struct ieee80211_conf conf = { .flags = 0 };
2129 struct rt2x00lib_conf libconf = { .conf = &conf };
2130
2131 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2132}
2133
2121static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) 2134static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2122{ 2135{
2123 struct rt2x00_dev *rt2x00dev = dev_instance; 2136 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -2165,6 +2178,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2165 rt2x00pci_register_write(rt2x00dev, 2178 rt2x00pci_register_write(rt2x00dev,
2166 M2H_CMD_DONE_CSR, 0xffffffff); 2179 M2H_CMD_DONE_CSR, 0xffffffff);
2167 2180
2181 /*
2182 * 4 - MCU Autowakeup interrupt.
2183 */
2184 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
2185 rt61pci_wakeup(rt2x00dev);
2186
2168 return IRQ_HANDLED; 2187 return IRQ_HANDLED;
2169} 2188}
2170 2189
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index bb58d797fb72..e35bd19c3c5a 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -861,15 +861,15 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
861 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 861 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
862 USB_MODE_SLEEP, REGISTER_TIMEOUT); 862 USB_MODE_SLEEP, REGISTER_TIMEOUT);
863 } else { 863 } else {
864 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
865 USB_MODE_WAKEUP, REGISTER_TIMEOUT);
866
867 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg); 864 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
868 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); 865 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
869 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); 866 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
870 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); 867 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
871 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); 868 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
872 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); 869 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
870
871 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
872 USB_MODE_WAKEUP, REGISTER_TIMEOUT);
873 } 873 }
874} 874}
875 875
@@ -1441,12 +1441,38 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1441 struct txentry_desc *txdesc) 1441 struct txentry_desc *txdesc)
1442{ 1442{
1443 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); 1443 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
1444 __le32 *txd = skbdesc->desc; 1444 __le32 *txd = (__le32 *)(skb->data - TXD_DESC_SIZE);
1445 u32 word; 1445 u32 word;
1446 1446
1447 /* 1447 /*
1448 * Start writing the descriptor words. 1448 * Start writing the descriptor words.
1449 */ 1449 */
1450 rt2x00_desc_read(txd, 0, &word);
1451 rt2x00_set_field32(&word, TXD_W0_BURST,
1452 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1453 rt2x00_set_field32(&word, TXD_W0_VALID, 1);
1454 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG,
1455 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1456 rt2x00_set_field32(&word, TXD_W0_ACK,
1457 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1458 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1459 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1460 rt2x00_set_field32(&word, TXD_W0_OFDM,
1461 (txdesc->rate_mode == RATE_MODE_OFDM));
1462 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1463 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1464 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1465 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1466 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1467 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1468 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1469 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1470 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length);
1471 rt2x00_set_field32(&word, TXD_W0_BURST2,
1472 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1473 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1474 rt2x00_desc_write(txd, 0, word);
1475
1450 rt2x00_desc_read(txd, 1, &word); 1476 rt2x00_desc_read(txd, 1, &word);
1451 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue); 1477 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->queue);
1452 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1478 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs);
@@ -1475,51 +1501,24 @@ static void rt73usb_write_tx_desc(struct rt2x00_dev *rt2x00dev,
1475 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1501 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1476 rt2x00_desc_write(txd, 5, word); 1502 rt2x00_desc_write(txd, 5, word);
1477 1503
1478 rt2x00_desc_read(txd, 0, &word); 1504 /*
1479 rt2x00_set_field32(&word, TXD_W0_BURST, 1505 * Register descriptor details in skb frame descriptor.
1480 test_bit(ENTRY_TXD_BURST, &txdesc->flags)); 1506 */
1481 rt2x00_set_field32(&word, TXD_W0_VALID, 1); 1507 skbdesc->desc = txd;
1482 rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, 1508 skbdesc->desc_len = TXD_DESC_SIZE;
1483 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
1484 rt2x00_set_field32(&word, TXD_W0_ACK,
1485 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
1486 rt2x00_set_field32(&word, TXD_W0_TIMESTAMP,
1487 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
1488 rt2x00_set_field32(&word, TXD_W0_OFDM,
1489 (txdesc->rate_mode == RATE_MODE_OFDM));
1490 rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->ifs);
1491 rt2x00_set_field32(&word, TXD_W0_RETRY_MODE,
1492 test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags));
1493 rt2x00_set_field32(&word, TXD_W0_TKIP_MIC,
1494 test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags));
1495 rt2x00_set_field32(&word, TXD_W0_KEY_TABLE,
1496 test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags));
1497 rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx);
1498 rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, skb->len);
1499 rt2x00_set_field32(&word, TXD_W0_BURST2,
1500 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
1501 rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher);
1502 rt2x00_desc_write(txd, 0, word);
1503} 1509}
1504 1510
1505/* 1511/*
1506 * TX data initialization 1512 * TX data initialization
1507 */ 1513 */
1508static void rt73usb_write_beacon(struct queue_entry *entry) 1514static void rt73usb_write_beacon(struct queue_entry *entry,
1515 struct txentry_desc *txdesc)
1509{ 1516{
1510 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1517 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1511 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
1512 unsigned int beacon_base; 1518 unsigned int beacon_base;
1513 u32 reg; 1519 u32 reg;
1514 1520
1515 /* 1521 /*
1516 * Add the descriptor in front of the skb.
1517 */
1518 skb_push(entry->skb, entry->queue->desc_size);
1519 memcpy(entry->skb->data, skbdesc->desc, skbdesc->desc_len);
1520 skbdesc->desc = entry->skb->data;
1521
1522 /*
1523 * Disable beaconing while we are reloading the beacon data, 1522 * Disable beaconing while we are reloading the beacon data,
1524 * otherwise we might be sending out invalid data. 1523 * otherwise we might be sending out invalid data.
1525 */ 1524 */
@@ -1528,6 +1527,11 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1528 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); 1527 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1529 1528
1530 /* 1529 /*
1530 * Take the descriptor in front of the skb into account.
1531 */
1532 skb_push(entry->skb, TXD_DESC_SIZE);
1533
1534 /*
1531 * Write entire beacon with descriptor to register. 1535 * Write entire beacon with descriptor to register.
1532 */ 1536 */
1533 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1537 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
@@ -1537,6 +1541,19 @@ static void rt73usb_write_beacon(struct queue_entry *entry)
1537 REGISTER_TIMEOUT32(entry->skb->len)); 1541 REGISTER_TIMEOUT32(entry->skb->len));
1538 1542
1539 /* 1543 /*
1544 * Enable beaconing again.
1545 *
1546 * For Wi-Fi faily generated beacons between participating stations.
1547 * Set TBTT phase adaptive adjustment step to 8us (default 16us)
1548 */
1549 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1550
1551 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1552 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1553 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1554 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1555
1556 /*
1540 * Clean up the beacon skb. 1557 * Clean up the beacon skb.
1541 */ 1558 */
1542 dev_kfree_skb(entry->skb); 1559 dev_kfree_skb(entry->skb);
@@ -1557,31 +1574,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1557 return length; 1574 return length;
1558} 1575}
1559 1576
1560static void rt73usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
1561 const enum data_queue_qid queue)
1562{
1563 u32 reg;
1564
1565 if (queue != QID_BEACON) {
1566 rt2x00usb_kick_tx_queue(rt2x00dev, queue);
1567 return;
1568 }
1569
1570 /*
1571 * For Wi-Fi faily generated beacons between participating stations.
1572 * Set TBTT phase adaptive adjustment step to 8us (default 16us)
1573 */
1574 rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008);
1575
1576 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1577 if (!rt2x00_get_field32(reg, TXRX_CSR9_BEACON_GEN)) {
1578 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1579 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1580 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1581 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1582 }
1583}
1584
1585/* 1577/*
1586 * RX control handlers 1578 * RX control handlers
1587 */ 1579 */
@@ -1645,12 +1637,8 @@ static void rt73usb_fill_rxdone(struct queue_entry *entry,
1645 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) 1637 if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR))
1646 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; 1638 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
1647 1639
1648 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) { 1640 rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1649 rxdesc->cipher = 1641 rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1650 rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG);
1651 rxdesc->cipher_status =
1652 rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR);
1653 }
1654 1642
1655 if (rxdesc->cipher != CIPHER_NONE) { 1643 if (rxdesc->cipher != CIPHER_NONE) {
1656 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]); 1644 _rt2x00_desc_read(rxd, 2, &rxdesc->iv[0]);
@@ -2266,7 +2254,7 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2266 .write_tx_data = rt2x00usb_write_tx_data, 2254 .write_tx_data = rt2x00usb_write_tx_data,
2267 .write_beacon = rt73usb_write_beacon, 2255 .write_beacon = rt73usb_write_beacon,
2268 .get_tx_data_len = rt73usb_get_tx_data_len, 2256 .get_tx_data_len = rt73usb_get_tx_data_len,
2269 .kick_tx_queue = rt73usb_kick_tx_queue, 2257 .kick_tx_queue = rt2x00usb_kick_tx_queue,
2270 .kill_tx_queue = rt2x00usb_kill_tx_queue, 2258 .kill_tx_queue = rt2x00usb_kill_tx_queue,
2271 .fill_rxdone = rt73usb_fill_rxdone, 2259 .fill_rxdone = rt73usb_fill_rxdone,
2272 .config_shared_key = rt73usb_config_shared_key, 2260 .config_shared_key = rt73usb_config_shared_key,
diff --git a/drivers/net/wireless/rtl818x/Kconfig b/drivers/net/wireless/rtl818x/Kconfig
new file mode 100644
index 000000000000..17d80fe556de
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/Kconfig
@@ -0,0 +1,88 @@
1#
2# RTL818X Wireless LAN device configuration
3#
4config RTL8180
5 tristate "Realtek 8180/8185 PCI support"
6 depends on MAC80211 && PCI && EXPERIMENTAL
7 select EEPROM_93CX6
8 ---help---
9 This is a driver for RTL8180 and RTL8185 based cards.
10 These are PCI based chips found in cards such as:
11
12 (RTL8185 802.11g)
13 A-Link WL54PC
14
15 (RTL8180 802.11b)
16 Belkin F5D6020 v3
17 Belkin F5D6020 v3
18 Dlink DWL-610
19 Dlink DWL-510
20 Netgear MA521
21 Level-One WPC-0101
22 Acer Aspire 1357 LMi
23 VCTnet PC-11B1
24 Ovislink AirLive WL-1120PCM
25 Mentor WL-PCI
26 Linksys WPC11 v4
27 TrendNET TEW-288PI
28 D-Link DWL-520 Rev D
29 Repotec RP-WP7126
30 TP-Link TL-WN250/251
31 Zonet ZEW1000
32 Longshine LCS-8031-R
33 HomeLine HLW-PCC200
34 GigaFast WF721-AEX
35 Planet WL-3553
36 Encore ENLWI-PCI1-NT
37 TrendNET TEW-266PC
38 Gigabyte GN-WLMR101
39 Siemens-fujitsu Amilo D1840W
40 Edimax EW-7126
41 PheeNet WL-11PCIR
42 Tonze PC-2100T
43 Planet WL-8303
44 Dlink DWL-650 v M1
45 Edimax EW-7106
46 Q-Tec 770WC
47 Topcom Skyr@cer 4011b
48 Roper FreeLan 802.11b (edition 2004)
49 Wistron Neweb Corp CB-200B
50 Pentagram HorNET
51 QTec 775WC
52 TwinMOS Booming B Series
53 Micronet SP906BB
54 Sweex LC700010
55 Surecom EP-9428
56 Safecom SWLCR-1100
57
58 Thanks to Realtek for their support!
59
60config RTL8187
61 tristate "Realtek 8187 and 8187B USB support"
62 depends on MAC80211 && USB
63 select EEPROM_93CX6
64 ---help---
65 This is a driver for RTL8187 and RTL8187B based cards.
66 These are USB based chips found in devices such as:
67
68 Netgear WG111v2
69 Level 1 WNC-0301USB
70 Micronet SP907GK V5
71 Encore ENUWI-G2
72 Trendnet TEW-424UB
73 ASUS P5B Deluxe/P5K Premium motherboards
74 Toshiba Satellite Pro series of laptops
75 Asus Wireless Link
76 Linksys WUSB54GC-EU v2
77 (v1 = rt73usb; v3 is rt2070-based,
78 use staging/rt3070 or try rt2800usb)
79
80 Thanks to Realtek for their support!
81
82# If possible, automatically enable LEDs for RTL8187.
83
84config RTL8187_LEDS
85 bool
86 depends on RTL8187 && MAC80211_LEDS && (LEDS_CLASS = y || LEDS_CLASS = RTL8187)
87 default y
88
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index de3844fe06d8..4baf0cf0826f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -55,6 +55,14 @@ struct rtl8180_tx_ring {
55 struct sk_buff_head queue; 55 struct sk_buff_head queue;
56}; 56};
57 57
58struct rtl8180_vif {
59 struct ieee80211_hw *dev;
60
61 /* beaconing */
62 struct delayed_work beacon_work;
63 bool enable_beacon;
64};
65
58struct rtl8180_priv { 66struct rtl8180_priv {
59 /* common between rtl818x drivers */ 67 /* common between rtl818x drivers */
60 struct rtl818x_csr __iomem *map; 68 struct rtl818x_csr __iomem *map;
@@ -78,6 +86,9 @@ struct rtl8180_priv {
78 u32 anaparam; 86 u32 anaparam;
79 u16 rfparam; 87 u16 rfparam;
80 u8 csthreshold; 88 u8 csthreshold;
89
90 /* sequence # */
91 u16 seqno;
81}; 92};
82 93
83void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); 94void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a442831a..515817de2905 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio)
188 info->flags |= IEEE80211_TX_STAT_ACK; 188 info->flags |= IEEE80211_TX_STAT_ACK;
189 189
190 info->status.rates[0].count = (flags & 0xFF) + 1; 190 info->status.rates[0].count = (flags & 0xFF) + 1;
191 info->status.rates[1].idx = -1;
191 192
192 ieee80211_tx_status_irqsafe(dev, skb); 193 ieee80211_tx_status_irqsafe(dev, skb);
193 if (ring->entries - skb_queue_len(&ring->queue) == 2) 194 if (ring->entries - skb_queue_len(&ring->queue) == 2)
@@ -233,6 +234,7 @@ static irqreturn_t rtl8180_interrupt(int irq, void *dev_id)
233static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb) 234static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
234{ 235{
235 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 236 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
237 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
236 struct rtl8180_priv *priv = dev->priv; 238 struct rtl8180_priv *priv = dev->priv;
237 struct rtl8180_tx_ring *ring; 239 struct rtl8180_tx_ring *ring;
238 struct rtl8180_tx_desc *entry; 240 struct rtl8180_tx_desc *entry;
@@ -284,6 +286,14 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
284 } 286 }
285 287
286 spin_lock_irqsave(&priv->lock, flags); 288 spin_lock_irqsave(&priv->lock, flags);
289
290 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
291 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
292 priv->seqno += 0x10;
293 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
294 hdr->seq_ctrl |= cpu_to_le16(priv->seqno);
295 }
296
287 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries; 297 idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries;
288 entry = &ring->desc[idx]; 298 entry = &ring->desc[idx];
289 299
@@ -297,7 +307,8 @@ static int rtl8180_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
297 entry->flags = cpu_to_le32(tx_flags); 307 entry->flags = cpu_to_le32(tx_flags);
298 __skb_queue_tail(&ring->queue, skb); 308 __skb_queue_tail(&ring->queue, skb);
299 if (ring->entries - skb_queue_len(&ring->queue) < 2) 309 if (ring->entries - skb_queue_len(&ring->queue) < 2)
300 ieee80211_stop_queue(dev, skb_get_queue_mapping(skb)); 310 ieee80211_stop_queue(dev, prio);
311
301 spin_unlock_irqrestore(&priv->lock, flags); 312 spin_unlock_irqrestore(&priv->lock, flags);
302 313
303 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4))); 314 rtl818x_iowrite8(priv, &priv->map->TX_DMA_POLLING, (1 << (prio + 4)));
@@ -652,10 +663,59 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
652 rtl8180_free_tx_ring(dev, i); 663 rtl8180_free_tx_ring(dev, i);
653} 664}
654 665
666static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
667{
668 struct rtl8180_priv *priv = dev->priv;
669
670 return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
671 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
672}
673
674void rtl8180_beacon_work(struct work_struct *work)
675{
676 struct rtl8180_vif *vif_priv =
677 container_of(work, struct rtl8180_vif, beacon_work.work);
678 struct ieee80211_vif *vif =
679 container_of((void *)vif_priv, struct ieee80211_vif, drv_priv);
680 struct ieee80211_hw *dev = vif_priv->dev;
681 struct ieee80211_mgmt *mgmt;
682 struct sk_buff *skb;
683 int err = 0;
684
685 /* don't overflow the tx ring */
686 if (ieee80211_queue_stopped(dev, 0))
687 goto resched;
688
689 /* grab a fresh beacon */
690 skb = ieee80211_beacon_get(dev, vif);
691
692 /*
693 * update beacon timestamp w/ TSF value
694 * TODO: make hardware update beacon timestamp
695 */
696 mgmt = (struct ieee80211_mgmt *)skb->data;
697 mgmt->u.beacon.timestamp = cpu_to_le64(rtl8180_get_tsf(dev));
698
699 /* TODO: use actual beacon queue */
700 skb_set_queue_mapping(skb, 0);
701
702 err = rtl8180_tx(dev, skb);
703 WARN_ON(err);
704
705resched:
706 /*
707 * schedule next beacon
708 * TODO: use hardware support for beacon timing
709 */
710 schedule_delayed_work(&vif_priv->beacon_work,
711 usecs_to_jiffies(1024 * vif->bss_conf.beacon_int));
712}
713
655static int rtl8180_add_interface(struct ieee80211_hw *dev, 714static int rtl8180_add_interface(struct ieee80211_hw *dev,
656 struct ieee80211_vif *vif) 715 struct ieee80211_vif *vif)
657{ 716{
658 struct rtl8180_priv *priv = dev->priv; 717 struct rtl8180_priv *priv = dev->priv;
718 struct rtl8180_vif *vif_priv;
659 719
660 /* 720 /*
661 * We only support one active interface at a time. 721 * We only support one active interface at a time.
@@ -665,6 +725,7 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
665 725
666 switch (vif->type) { 726 switch (vif->type) {
667 case NL80211_IFTYPE_STATION: 727 case NL80211_IFTYPE_STATION:
728 case NL80211_IFTYPE_ADHOC:
668 break; 729 break;
669 default: 730 default:
670 return -EOPNOTSUPP; 731 return -EOPNOTSUPP;
@@ -672,6 +733,12 @@ static int rtl8180_add_interface(struct ieee80211_hw *dev,
672 733
673 priv->vif = vif; 734 priv->vif = vif;
674 735
736 /* Initialize driver private area */
737 vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
738 vif_priv->dev = dev;
739 INIT_DELAYED_WORK(&vif_priv->beacon_work, rtl8180_beacon_work);
740 vif_priv->enable_beacon = false;
741
675 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 742 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
676 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0], 743 rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
677 le32_to_cpu(*(__le32 *)vif->addr)); 744 le32_to_cpu(*(__le32 *)vif->addr));
@@ -705,8 +772,11 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
705 u32 changed) 772 u32 changed)
706{ 773{
707 struct rtl8180_priv *priv = dev->priv; 774 struct rtl8180_priv *priv = dev->priv;
775 struct rtl8180_vif *vif_priv;
708 int i; 776 int i;
709 777
778 vif_priv = (struct rtl8180_vif *)&vif->drv_priv;
779
710 if (changed & BSS_CHANGED_BSSID) { 780 if (changed & BSS_CHANGED_BSSID) {
711 for (i = 0; i < ETH_ALEN; i++) 781 for (i = 0; i < ETH_ALEN; i++)
712 rtl818x_iowrite8(priv, &priv->map->BSSID[i], 782 rtl818x_iowrite8(priv, &priv->map->BSSID[i],
@@ -721,13 +791,22 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
721 } 791 }
722 792
723 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp) 793 if (changed & BSS_CHANGED_ERP_SLOT && priv->rf->conf_erp)
724 priv->rf->conf_erp(dev, info); 794 priv->rf->conf_erp(dev, info);
795
796 if (changed & BSS_CHANGED_BEACON_ENABLED)
797 vif_priv->enable_beacon = info->enable_beacon;
798
799 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON)) {
800 cancel_delayed_work_sync(&vif_priv->beacon_work);
801 if (vif_priv->enable_beacon)
802 schedule_work(&vif_priv->beacon_work.work);
803 }
725} 804}
726 805
727static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count, 806static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev,
728 struct dev_addr_list *mc_list) 807 struct netdev_hw_addr_list *mc_list)
729{ 808{
730 return mc_count; 809 return netdev_hw_addr_list_count(mc_list);
731} 810}
732 811
733static void rtl8180_configure_filter(struct ieee80211_hw *dev, 812static void rtl8180_configure_filter(struct ieee80211_hw *dev,
@@ -762,14 +841,6 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
762 rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf); 841 rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
763} 842}
764 843
765static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
766{
767 struct rtl8180_priv *priv = dev->priv;
768
769 return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
770 (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
771}
772
773static const struct ieee80211_ops rtl8180_ops = { 844static const struct ieee80211_ops rtl8180_ops = {
774 .tx = rtl8180_tx, 845 .tx = rtl8180_tx,
775 .start = rtl8180_start, 846 .start = rtl8180_start,
@@ -827,6 +898,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
827 const char *chip_name, *rf_name = NULL; 898 const char *chip_name, *rf_name = NULL;
828 u32 reg; 899 u32 reg;
829 u16 eeprom_val; 900 u16 eeprom_val;
901 u8 mac_addr[ETH_ALEN];
830 902
831 err = pci_enable_device(pdev); 903 err = pci_enable_device(pdev);
832 if (err) { 904 if (err) {
@@ -855,8 +927,8 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
855 goto err_free_reg; 927 goto err_free_reg;
856 } 928 }
857 929
858 if ((err = pci_set_dma_mask(pdev, 0xFFFFFF00ULL)) || 930 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
859 (err = pci_set_consistent_dma_mask(pdev, 0xFFFFFF00ULL))) { 931 (err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
860 printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n", 932 printk(KERN_ERR "%s (rtl8180): No suitable DMA available\n",
861 pci_name(pdev)); 933 pci_name(pdev));
862 goto err_free_reg; 934 goto err_free_reg;
@@ -905,7 +977,9 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
905 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING | 977 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
906 IEEE80211_HW_RX_INCLUDES_FCS | 978 IEEE80211_HW_RX_INCLUDES_FCS |
907 IEEE80211_HW_SIGNAL_UNSPEC; 979 IEEE80211_HW_SIGNAL_UNSPEC;
908 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 980 dev->vif_data_size = sizeof(struct rtl8180_vif);
981 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
982 BIT(NL80211_IFTYPE_ADHOC);
909 dev->queues = 1; 983 dev->queues = 1;
910 dev->max_signal = 65; 984 dev->max_signal = 65;
911 985
@@ -987,12 +1061,13 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
987 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam); 1061 eeprom_93cx6_read(&eeprom, 0x19, &priv->rfparam);
988 } 1062 }
989 1063
990 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)dev->wiphy->perm_addr, 3); 1064 eeprom_93cx6_multiread(&eeprom, 0x7, (__le16 *)mac_addr, 3);
991 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 1065 if (!is_valid_ether_addr(mac_addr)) {
992 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using" 1066 printk(KERN_WARNING "%s (rtl8180): Invalid hwaddr! Using"
993 " randomly generated MAC addr\n", pci_name(pdev)); 1067 " randomly generated MAC addr\n", pci_name(pdev));
994 random_ether_addr(dev->wiphy->perm_addr); 1068 random_ether_addr(mac_addr);
995 } 1069 }
1070 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
996 1071
997 /* CCK TX power */ 1072 /* CCK TX power */
998 for (i = 0; i < 14; i += 2) { 1073 for (i = 0; i < 14; i += 2) {
@@ -1024,7 +1099,7 @@ static int __devinit rtl8180_probe(struct pci_dev *pdev,
1024 } 1099 }
1025 1100
1026 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n", 1101 printk(KERN_INFO "%s: hwaddr %pM, %s + %s\n",
1027 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, 1102 wiphy_name(dev->wiphy), mac_addr,
1028 chip_name, priv->rf->name); 1103 chip_name, priv->rf->name);
1029 1104
1030 return 0; 1105 return 0;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 1d30792973f5..891b8490e349 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1194,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1194} 1194}
1195 1195
1196static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev, 1196static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
1197 int mc_count, struct dev_addr_list *mc_list) 1197 struct netdev_hw_addr_list *mc_list)
1198{ 1198{
1199 return mc_count; 1199 return netdev_hw_addr_list_count(mc_list);
1200} 1200}
1201 1201
1202static void rtl8187_configure_filter(struct ieee80211_hw *dev, 1202static void rtl8187_configure_filter(struct ieee80211_hw *dev,
@@ -1333,6 +1333,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1333 u16 txpwr, reg; 1333 u16 txpwr, reg;
1334 u16 product_id = le16_to_cpu(udev->descriptor.idProduct); 1334 u16 product_id = le16_to_cpu(udev->descriptor.idProduct);
1335 int err, i; 1335 int err, i;
1336 u8 mac_addr[ETH_ALEN];
1336 1337
1337 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops); 1338 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
1338 if (!dev) { 1339 if (!dev) {
@@ -1390,12 +1391,13 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1390 udelay(10); 1391 udelay(10);
1391 1392
1392 eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR, 1393 eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
1393 (__le16 __force *)dev->wiphy->perm_addr, 3); 1394 (__le16 __force *)mac_addr, 3);
1394 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { 1395 if (!is_valid_ether_addr(mac_addr)) {
1395 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly " 1396 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
1396 "generated MAC address\n"); 1397 "generated MAC address\n");
1397 random_ether_addr(dev->wiphy->perm_addr); 1398 random_ether_addr(mac_addr);
1398 } 1399 }
1400 SET_IEEE80211_PERM_ADDR(dev, mac_addr);
1399 1401
1400 channel = priv->channels; 1402 channel = priv->channels;
1401 for (i = 0; i < 3; i++) { 1403 for (i = 0; i < 3; i++) {
@@ -1526,7 +1528,7 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
1526 skb_queue_head_init(&priv->b_tx_status.queue); 1528 skb_queue_head_init(&priv->b_tx_status.queue);
1527 1529
1528 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n", 1530 printk(KERN_INFO "%s: hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
1529 wiphy_name(dev->wiphy), dev->wiphy->perm_addr, 1531 wiphy_name(dev->wiphy), mac_addr,
1530 chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask); 1532 chip_name, priv->asic_rev, priv->rf->name, priv->rfkill_mask);
1531 1533
1532#ifdef CONFIG_RTL8187_LEDS 1534#ifdef CONFIG_RTL8187_LEDS
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 785e0244e305..337fc7bec5a5 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -51,3 +51,27 @@ config WL1271
51 51
52 If you choose to build a module, it'll be called wl1271. Say N if 52 If you choose to build a module, it'll be called wl1271. Say N if
53 unsure. 53 unsure.
54
55config WL1271_SPI
56 tristate "TI wl1271 SPI support"
57 depends on WL1271 && SPI_MASTER
58 ---help---
59 This module adds support for the SPI interface of adapters using
60 TI wl1271 chipset. Select this if your platform is using
61 the SPI bus.
62
63 If you choose to build a module, it'll be called wl1251_spi.
64 Say N if unsure.
65
66config WL1271_SDIO
67 tristate "TI wl1271 SDIO support"
68 depends on WL1271 && MMC && ARM
69 ---help---
70 This module adds support for the SDIO interface of adapters using
71 TI wl1271 chipset. Select this if your platform is using
72 the SDIO bus.
73
74 If you choose to build a module, it'll be called
75 wl1271_sdio. Say N if unsure.
76
77
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index f47ec94c16dc..27ddd2be0a91 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_WL1251) += wl1251.o
7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o 7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o 8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
9 9
10wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \ 10wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
11 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 11 wl1271_event.o wl1271_tx.o wl1271_rx.o \
12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
13 wl1271_init.o wl1271_debugfs.o wl1271_io.o 13 wl1271_init.o wl1271_debugfs.o
14 14
15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o 15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
16obj-$(CONFIG_WL1271) += wl1271.o 16obj-$(CONFIG_WL1271) += wl1271.o
17obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
18obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 37c61c19cae5..4f5f02a26e62 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -256,6 +256,8 @@ struct wl1251_debugfs {
256struct wl1251_if_operations { 256struct wl1251_if_operations {
257 void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len); 257 void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len);
258 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len); 258 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
259 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
260 void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
259 void (*reset)(struct wl1251 *wl); 261 void (*reset)(struct wl1251 *wl);
260 void (*enable_irq)(struct wl1251 *wl); 262 void (*enable_irq)(struct wl1251 *wl);
261 void (*disable_irq)(struct wl1251 *wl); 263 void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index d5ac79aeaa73..2545123931e8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -497,7 +497,8 @@ int wl1251_boot(struct wl1251 *wl)
497 /* 2. start processing NVS file */ 497 /* 2. start processing NVS file */
498 if (wl->use_eeprom) { 498 if (wl->use_eeprom) {
499 wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR); 499 wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
500 msleep(4000); 500 /* Wait for EEPROM NVS burst read to complete */
501 msleep(40);
501 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM); 502 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
502 } else { 503 } else {
503 ret = wl1251_boot_upload_nvs(wl); 504 ret = wl1251_boot_upload_nvs(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h
index b89d2ac62efb..c545e9d5f512 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl12xx/wl1251_io.h
@@ -48,6 +48,26 @@ static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
48 wl->if_ops->write(wl, addr, &val, sizeof(u32)); 48 wl->if_ops->write(wl, addr, &val, sizeof(u32));
49} 49}
50 50
51static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
52{
53 u32 response;
54
55 if (wl->if_ops->read_elp)
56 wl->if_ops->read_elp(wl, addr, &response);
57 else
58 wl->if_ops->read(wl, addr, &response, sizeof(u32));
59
60 return response;
61}
62
63static inline void wl1251_write_elp(struct wl1251 *wl, int addr, u32 val)
64{
65 if (wl->if_ops->write_elp)
66 wl->if_ops->write_elp(wl, addr, val);
67 else
68 wl->if_ops->write(wl, addr, &val, sizeof(u32));
69}
70
51/* Memory target IO, address is translated to partition 0 */ 71/* Memory target IO, address is translated to partition 0 */
52void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len); 72void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len);
53void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len); 73void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1c8226eee409..00b24282fc73 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -147,8 +147,8 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
147 u32 elp_reg; 147 u32 elp_reg;
148 148
149 elp_reg = ELPCTRL_WAKE_UP; 149 elp_reg = ELPCTRL_WAKE_UP;
150 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); 150 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
151 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 151 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
152 152
153 if (!(elp_reg & ELPCTRL_WLAN_READY)) 153 if (!(elp_reg & ELPCTRL_WLAN_READY))
154 wl1251_warning("WLAN not ready"); 154 wl1251_warning("WLAN not ready");
@@ -202,8 +202,8 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
202 goto out; 202 goto out;
203 } 203 }
204 204
205 /* No NVS from netlink, try to get it from the filesystem */ 205 if (wl->nvs == NULL && !wl->use_eeprom) {
206 if (wl->nvs == NULL) { 206 /* No NVS from netlink, try to get it from the filesystem */
207 ret = wl1251_fetch_nvs(wl); 207 ret = wl1251_fetch_nvs(wl);
208 if (ret < 0) 208 if (ret < 0)
209 goto out; 209 goto out;
@@ -857,6 +857,7 @@ out:
857} 857}
858 858
859static int wl1251_op_hw_scan(struct ieee80211_hw *hw, 859static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
860 struct ieee80211_vif *vif,
860 struct cfg80211_scan_request *req) 861 struct cfg80211_scan_request *req)
861{ 862{
862 struct wl1251 *wl = hw->priv; 863 struct wl1251 *wl = hw->priv;
@@ -1196,6 +1197,66 @@ static const struct ieee80211_ops wl1251_ops = {
1196 .conf_tx = wl1251_op_conf_tx, 1197 .conf_tx = wl1251_op_conf_tx,
1197}; 1198};
1198 1199
1200static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
1201{
1202 unsigned long timeout;
1203
1204 wl1251_reg_write32(wl, EE_ADDR, offset);
1205 wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
1206
1207 /* EE_CTL_READ clears when data is ready */
1208 timeout = jiffies + msecs_to_jiffies(100);
1209 while (1) {
1210 if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
1211 break;
1212
1213 if (time_after(jiffies, timeout))
1214 return -ETIMEDOUT;
1215
1216 msleep(1);
1217 }
1218
1219 *data = wl1251_reg_read32(wl, EE_DATA);
1220 return 0;
1221}
1222
1223static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
1224 u8 *data, size_t len)
1225{
1226 size_t i;
1227 int ret;
1228
1229 wl1251_reg_write32(wl, EE_START, 0);
1230
1231 for (i = 0; i < len; i++) {
1232 ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
1233 if (ret < 0)
1234 return ret;
1235 }
1236
1237 return 0;
1238}
1239
1240static int wl1251_read_eeprom_mac(struct wl1251 *wl)
1241{
1242 u8 mac[ETH_ALEN];
1243 int i, ret;
1244
1245 wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
1246
1247 ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
1248 if (ret < 0) {
1249 wl1251_warning("failed to read MAC address from EEPROM");
1250 return ret;
1251 }
1252
1253 /* MAC is stored in reverse order */
1254 for (i = 0; i < ETH_ALEN; i++)
1255 wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
1256
1257 return 0;
1258}
1259
1199static int wl1251_register_hw(struct wl1251 *wl) 1260static int wl1251_register_hw(struct wl1251 *wl)
1200{ 1261{
1201 int ret; 1262 int ret;
@@ -1231,7 +1292,6 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1231 wl->hw->channel_change_time = 10000; 1292 wl->hw->channel_change_time = 10000;
1232 1293
1233 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1294 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1234 IEEE80211_HW_NOISE_DBM |
1235 IEEE80211_HW_SUPPORTS_PS | 1295 IEEE80211_HW_SUPPORTS_PS |
1236 IEEE80211_HW_BEACON_FILTER | 1296 IEEE80211_HW_BEACON_FILTER |
1237 IEEE80211_HW_SUPPORTS_UAPSD; 1297 IEEE80211_HW_SUPPORTS_UAPSD;
@@ -1242,6 +1302,9 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1242 1302
1243 wl->hw->queues = 4; 1303 wl->hw->queues = 4;
1244 1304
1305 if (wl->use_eeprom)
1306 wl1251_read_eeprom_mac(wl);
1307
1245 ret = wl1251_register_hw(wl); 1308 ret = wl1251_register_hw(wl);
1246 if (ret) 1309 if (ret)
1247 goto out; 1310 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 851dfb65e474..b55cb2bd459a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -45,7 +45,7 @@ void wl1251_elp_work(struct work_struct *work)
45 goto out; 45 goto out;
46 46
47 wl1251_debug(DEBUG_PSM, "chip to elp"); 47 wl1251_debug(DEBUG_PSM, "chip to elp");
48 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 48 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
49 wl->elp = true; 49 wl->elp = true;
50 50
51out: 51out:
@@ -79,9 +79,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
79 start = jiffies; 79 start = jiffies;
80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
81 81
82 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 82 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
83 83
84 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 84 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
85 85
86 /* 86 /*
87 * FIXME: we should wait for irq from chip but, as a temporary 87 * FIXME: we should wait for irq from chip but, as a temporary
@@ -93,7 +93,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
93 return -ETIMEDOUT; 93 return -ETIMEDOUT;
94 } 94 }
95 msleep(1); 95 msleep(1);
96 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 96 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
97 } 97 }
98 98
99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", 99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
diff --git a/drivers/net/wireless/wl12xx/wl1251_reg.h b/drivers/net/wireless/wl12xx/wl1251_reg.h
index 0ca3b4326056..d16edd9bf06c 100644
--- a/drivers/net/wireless/wl12xx/wl1251_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1251_reg.h
@@ -46,7 +46,14 @@
46#define SOR_CFG (REGISTERS_BASE + 0x0800) 46#define SOR_CFG (REGISTERS_BASE + 0x0800)
47#define ECPU_CTRL (REGISTERS_BASE + 0x0804) 47#define ECPU_CTRL (REGISTERS_BASE + 0x0804)
48#define HI_CFG (REGISTERS_BASE + 0x0808) 48#define HI_CFG (REGISTERS_BASE + 0x0808)
49
50/* EEPROM registers */
49#define EE_START (REGISTERS_BASE + 0x080C) 51#define EE_START (REGISTERS_BASE + 0x080C)
52#define EE_CTL (REGISTERS_BASE + 0x2000)
53#define EE_DATA (REGISTERS_BASE + 0x2004)
54#define EE_ADDR (REGISTERS_BASE + 0x2008)
55
56#define EE_CTL_READ 2
50 57
51#define CHIP_ID_B (REGISTERS_BASE + 0x5674) 58#define CHIP_ID_B (REGISTERS_BASE + 0x5674)
52 59
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 6f229e0990f4..851515836a7f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -74,12 +74,6 @@ static void wl1251_rx_status(struct wl1251 *wl,
74 74
75 status->signal = desc->rssi; 75 status->signal = desc->rssi;
76 76
77 /*
78 * FIXME: guessing that snr needs to be divided by two, otherwise
79 * the values don't make any sense
80 */
81 status->noise = desc->rssi - desc->snr / 2;
82
83 status->freq = ieee80211_channel_to_frequency(desc->channel); 77 status->freq = ieee80211_channel_to_frequency(desc->channel);
84 78
85 status->flag |= RX_FLAG_TSFT; 79 status->flag |= RX_FLAG_TSFT;
@@ -189,6 +183,4 @@ void wl1251_rx(struct wl1251 *wl)
189 183
190 /* Finally, we need to ACK the RX */ 184 /* Finally, we need to ACK the RX */
191 wl1251_rx_ack(wl); 185 wl1251_rx_ack(wl);
192
193 return;
194} 186}
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22bdced..d234285c2c81 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -20,20 +20,14 @@
20 * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com) 20 * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
21 */ 21 */
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/crc7.h>
24#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
25#include <linux/irq.h>
26#include <linux/mmc/sdio_func.h> 24#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
28#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/spi/wl12xx.h>
28#include <linux/irq.h>
29 29
30#include "wl1251.h" 30#include "wl1251.h"
31#include "wl12xx_80211.h"
32#include "wl1251_reg.h"
33#include "wl1251_ps.h"
34#include "wl1251_io.h"
35#include "wl1251_tx.h"
36#include "wl1251_debugfs.h"
37 31
38#ifndef SDIO_VENDOR_ID_TI 32#ifndef SDIO_VENDOR_ID_TI
39#define SDIO_VENDOR_ID_TI 0x104c 33#define SDIO_VENDOR_ID_TI 0x104c
@@ -43,6 +37,8 @@
43#define SDIO_DEVICE_ID_TI_WL1251 0x9066 37#define SDIO_DEVICE_ID_TI_WL1251 0x9066
44#endif 38#endif
45 39
40static struct wl12xx_platform_data *wl12xx_board_data;
41
46static struct sdio_func *wl_to_func(struct wl1251 *wl) 42static struct sdio_func *wl_to_func(struct wl1251 *wl)
47{ 43{
48 return wl->if_priv; 44 return wl->if_priv;
@@ -65,7 +61,8 @@ static const struct sdio_device_id wl1251_devices[] = {
65MODULE_DEVICE_TABLE(sdio, wl1251_devices); 61MODULE_DEVICE_TABLE(sdio, wl1251_devices);
66 62
67 63
68void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len) 64static void wl1251_sdio_read(struct wl1251 *wl, int addr,
65 void *buf, size_t len)
69{ 66{
70 int ret; 67 int ret;
71 struct sdio_func *func = wl_to_func(wl); 68 struct sdio_func *func = wl_to_func(wl);
@@ -77,7 +74,8 @@ void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
77 sdio_release_host(func); 74 sdio_release_host(func);
78} 75}
79 76
80void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len) 77static void wl1251_sdio_write(struct wl1251 *wl, int addr,
78 void *buf, size_t len)
81{ 79{
82 int ret; 80 int ret;
83 struct sdio_func *func = wl_to_func(wl); 81 struct sdio_func *func = wl_to_func(wl);
@@ -89,7 +87,33 @@ void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
89 sdio_release_host(func); 87 sdio_release_host(func);
90} 88}
91 89
92void wl1251_sdio_reset(struct wl1251 *wl) 90static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
91{
92 int ret = 0;
93 struct sdio_func *func = wl_to_func(wl);
94
95 sdio_claim_host(func);
96 *val = sdio_readb(func, addr, &ret);
97 sdio_release_host(func);
98
99 if (ret)
100 wl1251_error("sdio_readb failed (%d)", ret);
101}
102
103static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
104{
105 int ret = 0;
106 struct sdio_func *func = wl_to_func(wl);
107
108 sdio_claim_host(func);
109 sdio_writeb(func, val, addr, &ret);
110 sdio_release_host(func);
111
112 if (ret)
113 wl1251_error("sdio_writeb failed (%d)", ret);
114}
115
116static void wl1251_sdio_reset(struct wl1251 *wl)
93{ 117{
94} 118}
95 119
@@ -111,19 +135,64 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
111 sdio_release_host(func); 135 sdio_release_host(func);
112} 136}
113 137
114void wl1251_sdio_set_power(bool enable) 138/* Interrupts when using dedicated WLAN_IRQ pin */
139static irqreturn_t wl1251_line_irq(int irq, void *cookie)
140{
141 struct wl1251 *wl = cookie;
142
143 ieee80211_queue_work(wl->hw, &wl->irq_work);
144
145 return IRQ_HANDLED;
146}
147
148static void wl1251_enable_line_irq(struct wl1251 *wl)
115{ 149{
150 return enable_irq(wl->irq);
116} 151}
117 152
118struct wl1251_if_operations wl1251_sdio_ops = { 153static void wl1251_disable_line_irq(struct wl1251 *wl)
154{
155 return disable_irq(wl->irq);
156}
157
158static void wl1251_sdio_set_power(bool enable)
159{
160}
161
162static struct wl1251_if_operations wl1251_sdio_ops = {
119 .read = wl1251_sdio_read, 163 .read = wl1251_sdio_read,
120 .write = wl1251_sdio_write, 164 .write = wl1251_sdio_write,
165 .write_elp = wl1251_sdio_write_elp,
166 .read_elp = wl1251_sdio_read_elp,
121 .reset = wl1251_sdio_reset, 167 .reset = wl1251_sdio_reset,
122 .enable_irq = wl1251_sdio_enable_irq,
123 .disable_irq = wl1251_sdio_disable_irq,
124}; 168};
125 169
126int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) 170static int wl1251_platform_probe(struct platform_device *pdev)
171{
172 if (pdev->id != -1) {
173 wl1251_error("can only handle single device");
174 return -ENODEV;
175 }
176
177 wl12xx_board_data = pdev->dev.platform_data;
178 return 0;
179}
180
181/*
182 * Dummy platform_driver for passing platform_data to this driver,
183 * until we have a way to pass this through SDIO subsystem or
184 * some other way.
185 */
186static struct platform_driver wl1251_platform_driver = {
187 .driver = {
188 .name = "wl1251_data",
189 .owner = THIS_MODULE,
190 },
191 .probe = wl1251_platform_probe,
192};
193
194static int wl1251_sdio_probe(struct sdio_func *func,
195 const struct sdio_device_id *id)
127{ 196{
128 int ret; 197 int ret;
129 struct wl1251 *wl; 198 struct wl1251 *wl;
@@ -141,20 +210,50 @@ int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
141 goto release; 210 goto release;
142 211
143 sdio_set_block_size(func, 512); 212 sdio_set_block_size(func, 512);
213 sdio_release_host(func);
144 214
145 SET_IEEE80211_DEV(hw, &func->dev); 215 SET_IEEE80211_DEV(hw, &func->dev);
146 wl->if_priv = func; 216 wl->if_priv = func;
147 wl->if_ops = &wl1251_sdio_ops; 217 wl->if_ops = &wl1251_sdio_ops;
148 wl->set_power = wl1251_sdio_set_power; 218 wl->set_power = wl1251_sdio_set_power;
149 219
150 sdio_release_host(func); 220 if (wl12xx_board_data != NULL) {
221 wl->set_power = wl12xx_board_data->set_power;
222 wl->irq = wl12xx_board_data->irq;
223 wl->use_eeprom = wl12xx_board_data->use_eeprom;
224 }
225
226 if (wl->irq) {
227 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
228 if (ret < 0) {
229 wl1251_error("request_irq() failed: %d", ret);
230 goto disable;
231 }
232
233 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
234 disable_irq(wl->irq);
235
236 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
237 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
238
239 wl1251_info("using dedicated interrupt line");
240 } else {
241 wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
242 wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
243
244 wl1251_info("using SDIO interrupt");
245 }
246
151 ret = wl1251_init_ieee80211(wl); 247 ret = wl1251_init_ieee80211(wl);
152 if (ret) 248 if (ret)
153 goto disable; 249 goto out_free_irq;
154 250
155 sdio_set_drvdata(func, wl); 251 sdio_set_drvdata(func, wl);
156 return ret; 252 return ret;
157 253
254out_free_irq:
255 if (wl->irq)
256 free_irq(wl->irq, wl);
158disable: 257disable:
159 sdio_claim_host(func); 258 sdio_claim_host(func);
160 sdio_disable_func(func); 259 sdio_disable_func(func);
@@ -167,6 +266,8 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
167{ 266{
168 struct wl1251 *wl = sdio_get_drvdata(func); 267 struct wl1251 *wl = sdio_get_drvdata(func);
169 268
269 if (wl->irq)
270 free_irq(wl->irq, wl);
170 wl1251_free_hw(wl); 271 wl1251_free_hw(wl);
171 272
172 sdio_claim_host(func); 273 sdio_claim_host(func);
@@ -186,6 +287,12 @@ static int __init wl1251_sdio_init(void)
186{ 287{
187 int err; 288 int err;
188 289
290 err = platform_driver_register(&wl1251_platform_driver);
291 if (err) {
292 wl1251_error("failed to register platform driver: %d", err);
293 return err;
294 }
295
189 err = sdio_register_driver(&wl1251_sdio_driver); 296 err = sdio_register_driver(&wl1251_sdio_driver);
190 if (err) 297 if (err)
191 wl1251_error("failed to register sdio driver: %d", err); 298 wl1251_error("failed to register sdio driver: %d", err);
@@ -195,6 +302,7 @@ static int __init wl1251_sdio_init(void)
195static void __exit wl1251_sdio_exit(void) 302static void __exit wl1251_sdio_exit(void)
196{ 303{
197 sdio_unregister_driver(&wl1251_sdio_driver); 304 sdio_unregister_driver(&wl1251_sdio_driver);
305 platform_driver_unregister(&wl1251_platform_driver);
198 wl1251_notice("unloaded"); 306 wl1251_notice("unloaded");
199} 307}
200 308
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 3bfb59bd4635..e81474203a23 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -310,7 +310,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
310 310
311static struct spi_driver wl1251_spi_driver = { 311static struct spi_driver wl1251_spi_driver = {
312 .driver = { 312 .driver = {
313 .name = "wl1251", 313 .name = DRIVER_NAME,
314 .bus = &spi_bus_type, 314 .bus = &spi_bus_type,
315 .owner = THIS_MODULE, 315 .owner = THIS_MODULE,
316 }, 316 },
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 97ea5096bc8c..6f1b6b5640c0 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -53,6 +53,9 @@ enum {
53 DEBUG_MAC80211 = BIT(11), 53 DEBUG_MAC80211 = BIT(11),
54 DEBUG_CMD = BIT(12), 54 DEBUG_CMD = BIT(12),
55 DEBUG_ACX = BIT(13), 55 DEBUG_ACX = BIT(13),
56 DEBUG_SDIO = BIT(14),
57 DEBUG_FILTERS = BIT(15),
58 DEBUG_ADHOC = BIT(16),
56 DEBUG_ALL = ~0, 59 DEBUG_ALL = ~0,
57}; 60};
58 61
@@ -110,6 +113,9 @@ enum {
110#define WL1271_FW_NAME "wl1271-fw.bin" 113#define WL1271_FW_NAME "wl1271-fw.bin"
111#define WL1271_NVS_NAME "wl1271-nvs.bin" 114#define WL1271_NVS_NAME "wl1271-nvs.bin"
112 115
116#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
117#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
118
113/* NVS data structure */ 119/* NVS data structure */
114#define WL1271_NVS_SECTION_SIZE 468 120#define WL1271_NVS_SECTION_SIZE 468
115 121
@@ -142,14 +148,7 @@ struct wl1271_nvs_file {
142 */ 148 */
143#undef WL1271_80211A_ENABLED 149#undef WL1271_80211A_ENABLED
144 150
145/* 151#define WL1271_BUSY_WORD_CNT 1
146 * FIXME: for the wl1271, a busy word count of 1 here will result in a more
147 * optimal SPI interface. There is some SPI bug however, causing RXS time outs
148 * with this mode occasionally on boot, so lets have three for now. A value of
149 * three should make sure, that the chipset will always be ready, though this
150 * will impact throughput and latencies slightly.
151 */
152#define WL1271_BUSY_WORD_CNT 3
153#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32)) 152#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
154 153
155#define WL1271_ELP_HW_STATE_ASLEEP 0 154#define WL1271_ELP_HW_STATE_ASLEEP 0
@@ -334,11 +333,27 @@ struct wl1271_scan {
334 u8 probe_requests; 333 u8 probe_requests;
335}; 334};
336 335
336struct wl1271_if_operations {
337 void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
338 bool fixed);
339 void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
340 bool fixed);
341 void (*reset)(struct wl1271 *wl);
342 void (*init)(struct wl1271 *wl);
343 void (*power)(struct wl1271 *wl, bool enable);
344 struct device* (*dev)(struct wl1271 *wl);
345 void (*enable_irq)(struct wl1271 *wl);
346 void (*disable_irq)(struct wl1271 *wl);
347};
348
337struct wl1271 { 349struct wl1271 {
350 struct platform_device *plat_dev;
338 struct ieee80211_hw *hw; 351 struct ieee80211_hw *hw;
339 bool mac80211_registered; 352 bool mac80211_registered;
340 353
341 struct spi_device *spi; 354 void *if_priv;
355
356 struct wl1271_if_operations *if_ops;
342 357
343 void (*set_power)(bool enable); 358 void (*set_power)(bool enable);
344 int irq; 359 int irq;
@@ -357,6 +372,9 @@ struct wl1271 {
357#define WL1271_FLAG_IN_ELP (6) 372#define WL1271_FLAG_IN_ELP (6)
358#define WL1271_FLAG_PSM (7) 373#define WL1271_FLAG_PSM (7)
359#define WL1271_FLAG_PSM_REQUESTED (8) 374#define WL1271_FLAG_PSM_REQUESTED (8)
375#define WL1271_FLAG_IRQ_PENDING (9)
376#define WL1271_FLAG_IRQ_RUNNING (10)
377#define WL1271_FLAG_IDLE (11)
360 unsigned long flags; 378 unsigned long flags;
361 379
362 struct wl1271_partition_set part; 380 struct wl1271_partition_set part;
@@ -370,9 +388,12 @@ struct wl1271 {
370 size_t fw_len; 388 size_t fw_len;
371 struct wl1271_nvs_file *nvs; 389 struct wl1271_nvs_file *nvs;
372 390
391 s8 hw_pg_ver;
392
373 u8 bssid[ETH_ALEN]; 393 u8 bssid[ETH_ALEN];
374 u8 mac_addr[ETH_ALEN]; 394 u8 mac_addr[ETH_ALEN];
375 u8 bss_type; 395 u8 bss_type;
396 u8 set_bss_type;
376 u8 ssid[IW_ESSID_MAX_SIZE + 1]; 397 u8 ssid[IW_ESSID_MAX_SIZE + 1];
377 u8 ssid_len; 398 u8 ssid_len;
378 int channel; 399 int channel;
@@ -382,13 +403,13 @@ struct wl1271 {
382 /* Accounting for allocated / available TX blocks on HW */ 403 /* Accounting for allocated / available TX blocks on HW */
383 u32 tx_blocks_freed[NUM_TX_QUEUES]; 404 u32 tx_blocks_freed[NUM_TX_QUEUES];
384 u32 tx_blocks_available; 405 u32 tx_blocks_available;
385 u8 tx_results_count; 406 u32 tx_results_count;
386 407
387 /* Transmitted TX packets counter for chipset interface */ 408 /* Transmitted TX packets counter for chipset interface */
388 int tx_packets_count; 409 u32 tx_packets_count;
389 410
390 /* Time-offset between host and chipset clocks */ 411 /* Time-offset between host and chipset clocks */
391 int time_offset; 412 s64 time_offset;
392 413
393 /* Session counter for the chipset */ 414 /* Session counter for the chipset */
394 int session_counter; 415 int session_counter;
@@ -403,8 +424,7 @@ struct wl1271 {
403 424
404 /* Security sequence number counters */ 425 /* Security sequence number counters */
405 u8 tx_security_last_seq; 426 u8 tx_security_last_seq;
406 u16 tx_security_seq_16; 427 s64 tx_security_seq;
407 u32 tx_security_seq_32;
408 428
409 /* FW Rx counter */ 429 /* FW Rx counter */
410 u32 rx_counter; 430 u32 rx_counter;
@@ -430,14 +450,19 @@ struct wl1271 {
430 /* currently configured rate set */ 450 /* currently configured rate set */
431 u32 sta_rate_set; 451 u32 sta_rate_set;
432 u32 basic_rate_set; 452 u32 basic_rate_set;
453 u32 basic_rate;
433 u32 rate_set; 454 u32 rate_set;
434 455
435 /* The current band */ 456 /* The current band */
436 enum ieee80211_band band; 457 enum ieee80211_band band;
437 458
459 /* Beaconing interval (needed for ad-hoc) */
460 u32 beacon_int;
461
438 /* Default key (for WEP) */ 462 /* Default key (for WEP) */
439 u32 default_key; 463 u32 default_key;
440 464
465 unsigned int filters;
441 unsigned int rx_config; 466 unsigned int rx_config;
442 unsigned int rx_filter; 467 unsigned int rx_filter;
443 468
@@ -450,10 +475,13 @@ struct wl1271 {
450 /* in dBm */ 475 /* in dBm */
451 int power_level; 476 int power_level;
452 477
478 int rssi_thold;
479 int last_rssi_event;
480
453 struct wl1271_stats stats; 481 struct wl1271_stats stats;
454 struct wl1271_debugfs debugfs; 482 struct wl1271_debugfs debugfs;
455 483
456 u32 buffer_32; 484 __le32 buffer_32;
457 u32 buffer_cmd; 485 u32 buffer_cmd;
458 u32 buffer_busyword[WL1271_BUSY_WORD_CNT]; 486 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
459 487
@@ -465,6 +493,8 @@ struct wl1271 {
465 /* Current chipset configuration */ 493 /* Current chipset configuration */
466 struct conf_drv_settings conf; 494 struct conf_drv_settings conf;
467 495
496 bool sg_enabled;
497
468 struct list_head list; 498 struct list_head list;
469}; 499};
470 500
@@ -477,7 +507,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
477 507
478#define WL1271_DEFAULT_POWER_LEVEL 0 508#define WL1271_DEFAULT_POWER_LEVEL 0
479 509
480#define WL1271_TX_QUEUE_MAX_LENGTH 20 510#define WL1271_TX_QUEUE_LOW_WATERMARK 10
511#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
481 512
482/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power 513/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
483 on in case is has been shut down shortly before */ 514 on in case is has been shut down shortly before */
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 308782421fce..e19e2f8f1e52 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -32,7 +32,6 @@
32#include "wl1271.h" 32#include "wl1271.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "wl1271_reg.h" 34#include "wl1271_reg.h"
35#include "wl1271_spi.h"
36#include "wl1271_ps.h" 35#include "wl1271_ps.h"
37 36
38int wl1271_acx_wake_up_conditions(struct wl1271 *wl) 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
@@ -137,12 +136,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
137 goto out; 136 goto out;
138 } 137 }
139 138
140 /* 139 acx->current_tx_power = power * 10;
141 * FIXME: This is a workaround needed while we don't the correct
142 * calibration, to avoid distortions
143 */
144 /* acx->current_tx_power = power * 10; */
145 acx->current_tx_power = 120;
146 140
147 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 141 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
148 if (ret < 0) { 142 if (ret < 0) {
@@ -511,12 +505,17 @@ out:
511 return ret; 505 return ret;
512} 506}
513 507
514int wl1271_acx_conn_monit_params(struct wl1271 *wl) 508#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
509
510int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
515{ 511{
516 struct acx_conn_monit_params *acx; 512 struct acx_conn_monit_params *acx;
513 u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
514 u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
517 int ret; 515 int ret;
518 516
519 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters"); 517 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
518 enable ? "enabled" : "disabled");
520 519
521 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 520 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
522 if (!acx) { 521 if (!acx) {
@@ -524,8 +523,13 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl)
524 goto out; 523 goto out;
525 } 524 }
526 525
527 acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold); 526 if (enable) {
528 acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout); 527 threshold = wl->conf.conn.synch_fail_thold;
528 timeout = wl->conf.conn.bss_lose_timeout;
529 }
530
531 acx->synch_fail_thold = cpu_to_le32(threshold);
532 acx->bss_lose_timeout = cpu_to_le32(timeout);
529 533
530 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, 534 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
531 acx, sizeof(*acx)); 535 acx, sizeof(*acx));
@@ -541,7 +545,7 @@ out:
541} 545}
542 546
543 547
544int wl1271_acx_sg_enable(struct wl1271 *wl) 548int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
545{ 549{
546 struct acx_bt_wlan_coex *pta; 550 struct acx_bt_wlan_coex *pta;
547 int ret; 551 int ret;
@@ -554,7 +558,10 @@ int wl1271_acx_sg_enable(struct wl1271 *wl)
554 goto out; 558 goto out;
555 } 559 }
556 560
557 pta->enable = SG_ENABLE; 561 if (enable)
562 pta->enable = wl->conf.sg.state;
563 else
564 pta->enable = CONF_SG_DISABLE;
558 565
559 ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); 566 ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
560 if (ret < 0) { 567 if (ret < 0) {
@@ -571,7 +578,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
571{ 578{
572 struct acx_bt_wlan_coex_param *param; 579 struct acx_bt_wlan_coex_param *param;
573 struct conf_sg_settings *c = &wl->conf.sg; 580 struct conf_sg_settings *c = &wl->conf.sg;
574 int ret; 581 int i, ret;
575 582
576 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 583 wl1271_debug(DEBUG_ACX, "acx sg cfg");
577 584
@@ -582,19 +589,9 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
582 } 589 }
583 590
584 /* BT-WLAN coext parameters */ 591 /* BT-WLAN coext parameters */
585 param->per_threshold = cpu_to_le32(c->per_threshold); 592 for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
586 param->max_scan_compensation_time = 593 param->params[i] = cpu_to_le32(c->params[i]);
587 cpu_to_le32(c->max_scan_compensation_time); 594 param->param_idx = CONF_SG_PARAMS_ALL;
588 param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
589 param->load_ratio = c->load_ratio;
590 param->auto_ps_mode = c->auto_ps_mode;
591 param->probe_req_compensation = c->probe_req_compensation;
592 param->scan_window_compensation = c->scan_window_compensation;
593 param->antenna_config = c->antenna_config;
594 param->beacon_miss_threshold = c->beacon_miss_threshold;
595 param->rate_adaptation_threshold =
596 cpu_to_le32(c->rate_adaptation_threshold);
597 param->rate_adaptation_snr = c->rate_adaptation_snr;
598 595
599 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 596 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
600 if (ret < 0) { 597 if (ret < 0) {
@@ -806,7 +803,7 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
806 803
807 /* configure one basic rate class */ 804 /* configure one basic rate class */
808 idx = ACX_TX_BASIC_RATE; 805 idx = ACX_TX_BASIC_RATE;
809 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set); 806 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
810 acx->rate_class[idx].short_retry_limit = c->short_retry_limit; 807 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
811 acx->rate_class[idx].long_retry_limit = c->long_retry_limit; 808 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
812 acx->rate_class[idx].aflags = c->aflags; 809 acx->rate_class[idx].aflags = c->aflags;
@@ -1143,3 +1140,129 @@ out:
1143 kfree(acx); 1140 kfree(acx);
1144 return ret; 1141 return ret;
1145} 1142}
1143
1144int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
1145{
1146 struct wl1271_acx_keep_alive_mode *acx = NULL;
1147 int ret = 0;
1148
1149 wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
1150
1151 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1152 if (!acx) {
1153 ret = -ENOMEM;
1154 goto out;
1155 }
1156
1157 acx->enabled = enable;
1158
1159 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
1160 if (ret < 0) {
1161 wl1271_warning("acx keep alive mode failed: %d", ret);
1162 goto out;
1163 }
1164
1165out:
1166 kfree(acx);
1167 return ret;
1168}
1169
1170int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
1171{
1172 struct wl1271_acx_keep_alive_config *acx = NULL;
1173 int ret = 0;
1174
1175 wl1271_debug(DEBUG_ACX, "acx keep alive config");
1176
1177 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1178 if (!acx) {
1179 ret = -ENOMEM;
1180 goto out;
1181 }
1182
1183 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
1184 acx->index = index;
1185 acx->tpl_validation = tpl_valid;
1186 acx->trigger = ACX_KEEP_ALIVE_NO_TX;
1187
1188 ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
1189 acx, sizeof(*acx));
1190 if (ret < 0) {
1191 wl1271_warning("acx keep alive config failed: %d", ret);
1192 goto out;
1193 }
1194
1195out:
1196 kfree(acx);
1197 return ret;
1198}
1199
1200int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1201 s16 thold, u8 hyst)
1202{
1203 struct wl1271_acx_rssi_snr_trigger *acx = NULL;
1204 int ret = 0;
1205
1206 wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
1207
1208 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1209 if (!acx) {
1210 ret = -ENOMEM;
1211 goto out;
1212 }
1213
1214 wl->last_rssi_event = -1;
1215
1216 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
1217 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
1218 acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
1219 if (enable)
1220 acx->enable = WL1271_ACX_TRIG_ENABLE;
1221 else
1222 acx->enable = WL1271_ACX_TRIG_DISABLE;
1223
1224 acx->index = WL1271_ACX_TRIG_IDX_RSSI;
1225 acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
1226 acx->threshold = cpu_to_le16(thold);
1227 acx->hysteresis = hyst;
1228
1229 ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
1230 if (ret < 0) {
1231 wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
1232 goto out;
1233 }
1234
1235out:
1236 kfree(acx);
1237 return ret;
1238}
1239
1240int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
1241{
1242 struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
1243 struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
1244 int ret = 0;
1245
1246 wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
1247
1248 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1249 if (!acx) {
1250 ret = -ENOMEM;
1251 goto out;
1252 }
1253
1254 acx->rssi_beacon = c->avg_weight_rssi_beacon;
1255 acx->rssi_data = c->avg_weight_rssi_data;
1256 acx->snr_beacon = c->avg_weight_snr_beacon;
1257 acx->snr_data = c->avg_weight_snr_data;
1258
1259 ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
1260 if (ret < 0) {
1261 wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
1262 goto out;
1263 }
1264
1265out:
1266 kfree(acx);
1267 return ret;
1268}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index aeccc98581eb..420e7e2fc021 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -392,81 +392,27 @@ struct acx_conn_monit_params {
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */ 392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed)); 393} __attribute__ ((packed));
394 394
395enum {
396 SG_ENABLE = 0,
397 SG_DISABLE,
398 SG_SENSE_NO_ACTIVITY,
399 SG_SENSE_ACTIVE
400};
401
402struct acx_bt_wlan_coex { 395struct acx_bt_wlan_coex {
403 struct acx_header header; 396 struct acx_header header;
404 397
405 /*
406 * 0 -> PTA enabled
407 * 1 -> PTA disabled
408 * 2 -> sense no active mode, i.e.
409 * an interrupt is sent upon
410 * BT activity.
411 * 3 -> PTA is switched on in response
412 * to the interrupt sending.
413 */
414 u8 enable; 398 u8 enable;
415 u8 pad[3]; 399 u8 pad[3];
416} __attribute__ ((packed)); 400} __attribute__ ((packed));
417 401
418struct acx_dco_itrim_params { 402struct acx_bt_wlan_coex_param {
419 struct acx_header header; 403 struct acx_header header;
420 404
421 u8 enable; 405 __le32 params[CONF_SG_PARAMS_MAX];
406 u8 param_idx;
422 u8 padding[3]; 407 u8 padding[3];
423 __le32 timeout;
424} __attribute__ ((packed)); 408} __attribute__ ((packed));
425 409
426#define PTA_ANTENNA_TYPE_DEF (0) 410struct acx_dco_itrim_params {
427#define PTA_BT_HP_MAXTIME_DEF (2000)
428#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
429#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
430#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
431#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
432#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
433#define PTA_SIGNALING_TYPE_DEF (1)
434#define PTA_AFH_LEVERAGE_ON_DEF (0)
435#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
436#define PTA_MAX_NUM_CTS_DEF (3)
437#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
438#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
439#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
440#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
441#define PTA_CYCLE_TIME_FAST_DEF (8700)
442#define PTA_RX_FOR_AVALANCHE_DEF (5)
443#define PTA_ELP_HP_DEF (0)
444#define PTA_ANTI_STARVE_PERIOD_DEF (500)
445#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
446#define PTA_ALLOW_PA_SD_DEF (1)
447#define PTA_TIME_BEFORE_BEACON_DEF (6300)
448#define PTA_HPDM_MAX_TIME_DEF (1600)
449#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
450#define PTA_AUTO_MODE_NO_CTS_DEF (0)
451#define PTA_BT_HP_RESPECTED_DEF (3)
452#define PTA_WLAN_RX_MIN_RATE_DEF (24)
453#define PTA_ACK_MODE_DEF (1)
454
455struct acx_bt_wlan_coex_param {
456 struct acx_header header; 411 struct acx_header header;
457 412
458 __le32 per_threshold; 413 u8 enable;
459 __le32 max_scan_compensation_time;
460 __le16 nfs_sample_interval;
461 u8 load_ratio;
462 u8 auto_ps_mode;
463 u8 probe_req_compensation;
464 u8 scan_window_compensation;
465 u8 antenna_config;
466 u8 beacon_miss_threshold;
467 __le32 rate_adaptation_threshold;
468 s8 rate_adaptation_snr;
469 u8 padding[3]; 414 u8 padding[3];
415 __le32 timeout;
470} __attribute__ ((packed)); 416} __attribute__ ((packed));
471 417
472struct acx_energy_detection { 418struct acx_energy_detection {
@@ -969,6 +915,84 @@ struct wl1271_acx_pm_config {
969 u8 padding[3]; 915 u8 padding[3];
970} __attribute__ ((packed)); 916} __attribute__ ((packed));
971 917
918struct wl1271_acx_keep_alive_mode {
919 struct acx_header header;
920
921 u8 enabled;
922 u8 padding[3];
923} __attribute__ ((packed));
924
925enum {
926 ACX_KEEP_ALIVE_NO_TX = 0,
927 ACX_KEEP_ALIVE_PERIOD_ONLY
928};
929
930enum {
931 ACX_KEEP_ALIVE_TPL_INVALID = 0,
932 ACX_KEEP_ALIVE_TPL_VALID
933};
934
935struct wl1271_acx_keep_alive_config {
936 struct acx_header header;
937
938 __le32 period;
939 u8 index;
940 u8 tpl_validation;
941 u8 trigger;
942 u8 padding;
943} __attribute__ ((packed));
944
945enum {
946 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
947 WL1271_ACX_TRIG_TYPE_EDGE,
948};
949
950enum {
951 WL1271_ACX_TRIG_DIR_LOW = 0,
952 WL1271_ACX_TRIG_DIR_HIGH,
953 WL1271_ACX_TRIG_DIR_BIDIR,
954};
955
956enum {
957 WL1271_ACX_TRIG_ENABLE = 1,
958 WL1271_ACX_TRIG_DISABLE,
959};
960
961enum {
962 WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
963 WL1271_ACX_TRIG_METRIC_RSSI_DATA,
964 WL1271_ACX_TRIG_METRIC_SNR_BEACON,
965 WL1271_ACX_TRIG_METRIC_SNR_DATA,
966};
967
968enum {
969 WL1271_ACX_TRIG_IDX_RSSI = 0,
970 WL1271_ACX_TRIG_COUNT = 8,
971};
972
973struct wl1271_acx_rssi_snr_trigger {
974 struct acx_header header;
975
976 __le16 threshold;
977 __le16 pacing; /* 0 - 60000 ms */
978 u8 metric;
979 u8 type;
980 u8 dir;
981 u8 hysteresis;
982 u8 index;
983 u8 enable;
984 u8 padding[2];
985};
986
987struct wl1271_acx_rssi_snr_avg_weights {
988 struct acx_header header;
989
990 u8 rssi_beacon;
991 u8 rssi_data;
992 u8 snr_beacon;
993 u8 snr_data;
994};
995
972enum { 996enum {
973 ACX_WAKE_UP_CONDITIONS = 0x0002, 997 ACX_WAKE_UP_CONDITIONS = 0x0002,
974 ACX_MEM_CFG = 0x0003, 998 ACX_MEM_CFG = 0x0003,
@@ -1017,8 +1041,8 @@ enum {
1017 ACX_FRAG_CFG = 0x004F, 1041 ACX_FRAG_CFG = 0x004F,
1018 ACX_BET_ENABLE = 0x0050, 1042 ACX_BET_ENABLE = 0x0050,
1019 ACX_RSSI_SNR_TRIGGER = 0x0051, 1043 ACX_RSSI_SNR_TRIGGER = 0x0051,
1020 ACX_RSSI_SNR_WEIGHTS = 0x0051, 1044 ACX_RSSI_SNR_WEIGHTS = 0x0052,
1021 ACX_KEEP_ALIVE_MODE = 0x0052, 1045 ACX_KEEP_ALIVE_MODE = 0x0053,
1022 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, 1046 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
1023 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, 1047 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
1024 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, 1048 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
@@ -1058,8 +1082,8 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1058int wl1271_acx_dco_itrim_params(struct wl1271 *wl); 1082int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1083int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1084int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl); 1085int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
1062int wl1271_acx_sg_enable(struct wl1271 *wl); 1086int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
1063int wl1271_acx_sg_cfg(struct wl1271 *wl); 1087int wl1271_acx_sg_cfg(struct wl1271 *wl);
1064int wl1271_acx_cca_threshold(struct wl1271 *wl); 1088int wl1271_acx_cca_threshold(struct wl1271 *wl);
1065int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); 1089int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
@@ -1085,5 +1109,10 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1085int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1109int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1086 u8 version); 1110 u8 version);
1087int wl1271_acx_pm_config(struct wl1271 *wl); 1111int wl1271_acx_pm_config(struct wl1271 *wl);
1112int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
1113int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
1114int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1115 s16 thold, u8 hyst);
1116int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1088 1117
1089#endif /* __WL1271_ACX_H__ */ 1118#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 024356263065..1a36d8a2196e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 2008-2009 Nokia Corporation 4 * Copyright (C) 2008-2010 Nokia Corporation
5 * 5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
@@ -27,7 +27,6 @@
27#include "wl1271_acx.h" 27#include "wl1271_acx.h"
28#include "wl1271_reg.h" 28#include "wl1271_reg.h"
29#include "wl1271_boot.h" 29#include "wl1271_boot.h"
30#include "wl1271_spi.h"
31#include "wl1271_io.h" 30#include "wl1271_io.h"
32#include "wl1271_event.h" 31#include "wl1271_event.h"
33 32
@@ -230,6 +229,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
230 nvs_len = sizeof(wl->nvs->nvs); 229 nvs_len = sizeof(wl->nvs->nvs);
231 nvs_ptr = (u8 *)wl->nvs->nvs; 230 nvs_ptr = (u8 *)wl->nvs->nvs;
232 231
232 /* update current MAC address to NVS */
233 nvs_ptr[11] = wl->mac_addr[0];
234 nvs_ptr[10] = wl->mac_addr[1];
235 nvs_ptr[6] = wl->mac_addr[2];
236 nvs_ptr[5] = wl->mac_addr[3];
237 nvs_ptr[4] = wl->mac_addr[4];
238 nvs_ptr[3] = wl->mac_addr[5];
239
233 /* 240 /*
234 * Layout before the actual NVS tables: 241 * Layout before the actual NVS tables:
235 * 1 byte : burst length. 242 * 1 byte : burst length.
@@ -300,7 +307,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
300 307
301static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 308static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
302{ 309{
303 enable_irq(wl->irq); 310 wl1271_enable_interrupts(wl);
304 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, 311 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
305 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 312 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
306 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 313 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
@@ -344,7 +351,7 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
344static int wl1271_boot_run_firmware(struct wl1271 *wl) 351static int wl1271_boot_run_firmware(struct wl1271 *wl)
345{ 352{
346 int loop, ret; 353 int loop, ret;
347 u32 chip_id, interrupt; 354 u32 chip_id, intr;
348 355
349 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 356 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
350 357
@@ -361,15 +368,15 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
361 loop = 0; 368 loop = 0;
362 while (loop++ < INIT_LOOP) { 369 while (loop++ < INIT_LOOP) {
363 udelay(INIT_LOOP_DELAY); 370 udelay(INIT_LOOP_DELAY);
364 interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 371 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
365 372
366 if (interrupt == 0xffffffff) { 373 if (intr == 0xffffffff) {
367 wl1271_error("error reading hardware complete " 374 wl1271_error("error reading hardware complete "
368 "init indication"); 375 "init indication");
369 return -EIO; 376 return -EIO;
370 } 377 }
371 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 378 /* check that ACX_INTR_INIT_COMPLETE is enabled */
372 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 379 else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
373 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK, 380 wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
374 WL1271_ACX_INTR_INIT_COMPLETE); 381 WL1271_ACX_INTR_INIT_COMPLETE);
375 break; 382 break;
@@ -404,7 +411,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
404 /* unmask required mbox events */ 411 /* unmask required mbox events */
405 wl->event_mask = BSS_LOSE_EVENT_ID | 412 wl->event_mask = BSS_LOSE_EVENT_ID |
406 SCAN_COMPLETE_EVENT_ID | 413 SCAN_COMPLETE_EVENT_ID |
407 PS_REPORT_EVENT_ID; 414 PS_REPORT_EVENT_ID |
415 JOIN_EVENT_COMPLETE_ID |
416 DISCONNECT_EVENT_COMPLETE_ID |
417 RSSI_SNR_TRIGGER_0_EVENT_ID;
408 418
409 ret = wl1271_event_unmask(wl); 419 ret = wl1271_event_unmask(wl);
410 if (ret < 0) { 420 if (ret < 0) {
@@ -431,11 +441,23 @@ static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
431 return 0; 441 return 0;
432} 442}
433 443
444static void wl1271_boot_hw_version(struct wl1271 *wl)
445{
446 u32 fuse;
447
448 fuse = wl1271_top_reg_read(wl, REG_FUSE_DATA_2_1);
449 fuse = (fuse & PG_VER_MASK) >> PG_VER_OFFSET;
450
451 wl->hw_pg_ver = (s8)fuse;
452}
453
434int wl1271_boot(struct wl1271 *wl) 454int wl1271_boot(struct wl1271 *wl)
435{ 455{
436 int ret = 0; 456 int ret = 0;
437 u32 tmp, clk, pause; 457 u32 tmp, clk, pause;
438 458
459 wl1271_boot_hw_version(wl);
460
439 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4) 461 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
440 /* ref clk: 19.2/38.4/38.4-XTAL */ 462 /* ref clk: 19.2/38.4/38.4-XTAL */
441 clk = 0x3; 463 clk = 0x3;
@@ -445,11 +467,15 @@ int wl1271_boot(struct wl1271 *wl)
445 467
446 if (REF_CLOCK != 0) { 468 if (REF_CLOCK != 0) {
447 u16 val; 469 u16 val;
448 /* Set clock type */ 470 /* Set clock type (open drain) */
449 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 471 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
450 val &= FREF_CLK_TYPE_BITS; 472 val &= FREF_CLK_TYPE_BITS;
451 val |= CLK_REQ_PRCM;
452 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val); 473 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
474
475 /* Set clock pull mode (no pull) */
476 val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
477 val |= NO_PULL;
478 wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
453 } else { 479 } else {
454 u16 val; 480 u16 val;
455 /* Set clock polarity */ 481 /* Set clock polarity */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index 412443ee655a..f829699d597e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -53,10 +53,16 @@ struct wl1271_static_data {
53#define OCP_REG_POLARITY 0x0064 53#define OCP_REG_POLARITY 0x0064
54#define OCP_REG_CLK_TYPE 0x0448 54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_REG_CLK_POLARITY 0x0cb2 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_REG_CLK_PULL 0x0cb4
56 57
57#define CMD_MBOX_ADDRESS 0x407B4 58#define REG_FUSE_DATA_2_1 0x050a
59#define PG_VER_MASK 0x3c
60#define PG_VER_OFFSET 2
58 61
59#define POLARITY_LOW BIT(1) 62#define CMD_MBOX_ADDRESS 0x407B4
63
64#define POLARITY_LOW BIT(1)
65#define NO_PULL (BIT(14) | BIT(15))
60 66
61#define FREF_CLK_TYPE_BITS 0xfffffe7f 67#define FREF_CLK_TYPE_BITS 0xfffffe7f
62#define CLK_REQ_PRCM 0x100 68#define CLK_REQ_PRCM 0x100
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index e7832f3318eb..19393e236e2c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009-2010 Nokia Corporation
5 * 5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
@@ -26,15 +26,18 @@
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/ieee80211.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30 31
31#include "wl1271.h" 32#include "wl1271.h"
32#include "wl1271_reg.h" 33#include "wl1271_reg.h"
33#include "wl1271_spi.h"
34#include "wl1271_io.h" 34#include "wl1271_io.h"
35#include "wl1271_acx.h" 35#include "wl1271_acx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_cmd.h" 37#include "wl1271_cmd.h"
38#include "wl1271_event.h"
39
40#define WL1271_CMD_FAST_POLL_COUNT 50
38 41
39/* 42/*
40 * send command to firmware 43 * send command to firmware
@@ -52,6 +55,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
52 u32 intr; 55 u32 intr;
53 int ret = 0; 56 int ret = 0;
54 u16 status; 57 u16 status;
58 u16 poll_count = 0;
55 59
56 cmd = buf; 60 cmd = buf;
57 cmd->id = cpu_to_le16(id); 61 cmd->id = cpu_to_le16(id);
@@ -73,7 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
73 goto out; 77 goto out;
74 } 78 }
75 79
76 msleep(1); 80 poll_count++;
81 if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
82 udelay(10);
83 else
84 msleep(1);
77 85
78 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 86 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
79 } 87 }
@@ -249,7 +257,36 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
249 return ret; 257 return ret;
250} 258}
251 259
252int wl1271_cmd_join(struct wl1271 *wl) 260/*
261 * Poll the mailbox event field until any of the bits in the mask is set or a
262 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
263 */
264static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
265{
266 u32 events_vector, event;
267 unsigned long timeout;
268
269 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
270
271 do {
272 if (time_after(jiffies, timeout))
273 return -ETIMEDOUT;
274
275 msleep(1);
276
277 /* read from both event fields */
278 wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
279 sizeof(events_vector), false);
280 event = events_vector & mask;
281 wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
282 sizeof(events_vector), false);
283 event |= events_vector & mask;
284 } while (!event);
285
286 return 0;
287}
288
289int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
253{ 290{
254 static bool do_cal = true; 291 static bool do_cal = true;
255 struct wl1271_cmd_join *join; 292 struct wl1271_cmd_join *join;
@@ -280,30 +317,13 @@ int wl1271_cmd_join(struct wl1271 *wl)
280 317
281 join->rx_config_options = cpu_to_le32(wl->rx_config); 318 join->rx_config_options = cpu_to_le32(wl->rx_config);
282 join->rx_filter_options = cpu_to_le32(wl->rx_filter); 319 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
283 join->bss_type = wl->bss_type; 320 join->bss_type = bss_type;
321 join->basic_rate_set = cpu_to_le32(wl->basic_rate_set);
284 322
285 /* 323 if (wl->band == IEEE80211_BAND_5GHZ)
286 * FIXME: disable temporarily all filters because after commit
287 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
288 * association. The filter logic needs to be implemented properly
289 * and once that is done, this hack can be removed.
290 */
291 join->rx_config_options = cpu_to_le32(0);
292 join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
293
294 if (wl->band == IEEE80211_BAND_2GHZ)
295 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
296 CONF_HW_BIT_RATE_2MBPS |
297 CONF_HW_BIT_RATE_5_5MBPS |
298 CONF_HW_BIT_RATE_11MBPS);
299 else {
300 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 324 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
301 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
302 CONF_HW_BIT_RATE_12MBPS |
303 CONF_HW_BIT_RATE_24MBPS);
304 }
305 325
306 join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT); 326 join->beacon_interval = cpu_to_le16(wl->beacon_int);
307 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD; 327 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
308 328
309 join->channel = wl->channel; 329 join->channel = wl->channel;
@@ -320,8 +340,7 @@ int wl1271_cmd_join(struct wl1271 *wl)
320 340
321 /* reset TX security counters */ 341 /* reset TX security counters */
322 wl->tx_security_last_seq = 0; 342 wl->tx_security_last_seq = 0;
323 wl->tx_security_seq_16 = 0; 343 wl->tx_security_seq = 0;
324 wl->tx_security_seq_32 = 0;
325 344
326 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); 345 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
327 if (ret < 0) { 346 if (ret < 0) {
@@ -329,11 +348,9 @@ int wl1271_cmd_join(struct wl1271 *wl)
329 goto out_free; 348 goto out_free;
330 } 349 }
331 350
332 /* 351 ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
333 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 352 if (ret < 0)
334 * simplify locking we just sleep instead, for now 353 wl1271_error("cmd join event completion error");
335 */
336 msleep(10);
337 354
338out_free: 355out_free:
339 kfree(join); 356 kfree(join);
@@ -465,7 +482,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
465 if (ret < 0) { 482 if (ret < 0) {
466 wl1271_error("tx %s cmd for channel %d failed", 483 wl1271_error("tx %s cmd for channel %d failed",
467 enable ? "start" : "stop", cmd->channel); 484 enable ? "start" : "stop", cmd->channel);
468 return ret; 485 goto out;
469 } 486 }
470 487
471 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", 488 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
@@ -499,7 +516,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
499 ps_params->ps_mode = ps_mode; 516 ps_params->ps_mode = ps_mode;
500 ps_params->send_null_data = send; 517 ps_params->send_null_data = send;
501 ps_params->retries = 5; 518 ps_params->retries = 5;
502 ps_params->hang_over_period = 128; 519 ps_params->hang_over_period = 1;
503 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */ 520 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
504 521
505 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 522 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -549,25 +566,29 @@ out:
549 return ret; 566 return ret;
550} 567}
551 568
552int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 569int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
553 u8 active_scan, u8 high_prio, u8 band, 570 const u8 *ie, size_t ie_len, u8 active_scan,
554 u8 probe_requests) 571 u8 high_prio, u8 band, u8 probe_requests)
555{ 572{
556 573
557 struct wl1271_cmd_trigger_scan_to *trigger = NULL; 574 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
558 struct wl1271_cmd_scan *params = NULL; 575 struct wl1271_cmd_scan *params = NULL;
559 struct ieee80211_channel *channels; 576 struct ieee80211_channel *channels;
577 u32 rate;
560 int i, j, n_ch, ret; 578 int i, j, n_ch, ret;
561 u16 scan_options = 0; 579 u16 scan_options = 0;
562 u8 ieee_band; 580 u8 ieee_band;
563 581
564 if (band == WL1271_SCAN_BAND_2_4_GHZ) 582 if (band == WL1271_SCAN_BAND_2_4_GHZ) {
565 ieee_band = IEEE80211_BAND_2GHZ; 583 ieee_band = IEEE80211_BAND_2GHZ;
566 else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) 584 rate = wl->conf.tx.basic_rate;
585 } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
567 ieee_band = IEEE80211_BAND_2GHZ; 586 ieee_band = IEEE80211_BAND_2GHZ;
568 else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) 587 rate = wl->conf.tx.basic_rate;
588 } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
569 ieee_band = IEEE80211_BAND_5GHZ; 589 ieee_band = IEEE80211_BAND_5GHZ;
570 else 590 rate = wl->conf.tx.basic_rate_5;
591 } else
571 return -EINVAL; 592 return -EINVAL;
572 593
573 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL) 594 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
@@ -594,8 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
594 params->params.scan_options = cpu_to_le16(scan_options); 615 params->params.scan_options = cpu_to_le16(scan_options);
595 616
596 params->params.num_probe_requests = probe_requests; 617 params->params.num_probe_requests = probe_requests;
597 /* Let the fw autodetect suitable tx_rate for probes */ 618 params->params.tx_rate = cpu_to_le32(rate);
598 params->params.tx_rate = 0;
599 params->params.tid_trigger = 0; 619 params->params.tid_trigger = 0;
600 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 620 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
601 621
@@ -622,12 +642,13 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
622 642
623 params->params.num_channels = j; 643 params->params.num_channels = j;
624 644
625 if (len && ssid) { 645 if (ssid_len && ssid) {
626 params->params.ssid_len = len; 646 params->params.ssid_len = ssid_len;
627 memcpy(params->params.ssid, ssid, len); 647 memcpy(params->params.ssid, ssid, ssid_len);
628 } 648 }
629 649
630 ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band); 650 ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
651 ie, ie_len, ieee_band);
631 if (ret < 0) { 652 if (ret < 0) {
632 wl1271_error("PROBE request template failed"); 653 wl1271_error("PROBE request template failed");
633 goto out; 654 goto out;
@@ -658,9 +679,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
658 wl->scan.active = active_scan; 679 wl->scan.active = active_scan;
659 wl->scan.high_prio = high_prio; 680 wl->scan.high_prio = high_prio;
660 wl->scan.probe_requests = probe_requests; 681 wl->scan.probe_requests = probe_requests;
661 if (len && ssid) { 682 if (ssid_len && ssid) {
662 wl->scan.ssid_len = len; 683 wl->scan.ssid_len = ssid_len;
663 memcpy(wl->scan.ssid, ssid, len); 684 memcpy(wl->scan.ssid, ssid, ssid_len);
664 } else 685 } else
665 wl->scan.ssid_len = 0; 686 wl->scan.ssid_len = 0;
666 } 687 }
@@ -675,11 +696,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
675 696
676out: 697out:
677 kfree(params); 698 kfree(params);
699 kfree(trigger);
678 return ret; 700 return ret;
679} 701}
680 702
681int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 703int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
682 void *buf, size_t buf_len) 704 void *buf, size_t buf_len, int index, u32 rates)
683{ 705{
684 struct wl1271_cmd_template_set *cmd; 706 struct wl1271_cmd_template_set *cmd;
685 int ret = 0; 707 int ret = 0;
@@ -697,9 +719,10 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
697 719
698 cmd->len = cpu_to_le16(buf_len); 720 cmd->len = cpu_to_le16(buf_len);
699 cmd->template_type = template_id; 721 cmd->template_type = template_id;
700 cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates); 722 cmd->enabled_rates = cpu_to_le32(rates);
701 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; 723 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
702 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; 724 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
725 cmd->index = index;
703 726
704 if (buf) 727 if (buf)
705 memcpy(cmd->template_data, buf, buf_len); 728 memcpy(cmd->template_data, buf, buf_len);
@@ -717,155 +740,129 @@ out:
717 return ret; 740 return ret;
718} 741}
719 742
720static int wl1271_build_basic_rates(u8 *rates, u8 band) 743int wl1271_cmd_build_null_data(struct wl1271 *wl)
721{ 744{
722 u8 index = 0; 745 struct sk_buff *skb = NULL;
723 746 int size;
724 if (band == IEEE80211_BAND_2GHZ) { 747 void *ptr;
725 rates[index++] = 748 int ret = -ENOMEM;
726 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
727 rates[index++] =
728 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
729 rates[index++] =
730 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
731 rates[index++] =
732 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
733 } else if (band == IEEE80211_BAND_5GHZ) {
734 rates[index++] =
735 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
736 rates[index++] =
737 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
738 rates[index++] =
739 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
740 } else {
741 wl1271_error("build_basic_rates invalid band: %d", band);
742 }
743 749
744 return index;
745}
746 750
747static int wl1271_build_extended_rates(u8 *rates, u8 band) 751 if (wl->bss_type == BSS_TYPE_IBSS) {
748{ 752 size = sizeof(struct wl12xx_null_data_template);
749 u8 index = 0; 753 ptr = NULL;
750
751 if (band == IEEE80211_BAND_2GHZ) {
752 rates[index++] = IEEE80211_OFDM_RATE_6MB;
753 rates[index++] = IEEE80211_OFDM_RATE_9MB;
754 rates[index++] = IEEE80211_OFDM_RATE_12MB;
755 rates[index++] = IEEE80211_OFDM_RATE_18MB;
756 rates[index++] = IEEE80211_OFDM_RATE_24MB;
757 rates[index++] = IEEE80211_OFDM_RATE_36MB;
758 rates[index++] = IEEE80211_OFDM_RATE_48MB;
759 rates[index++] = IEEE80211_OFDM_RATE_54MB;
760 } else if (band == IEEE80211_BAND_5GHZ) {
761 rates[index++] =
762 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
763 rates[index++] =
764 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
765 rates[index++] =
766 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
767 rates[index++] =
768 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
769 rates[index++] =
770 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
771 rates[index++] =
772 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
773 } else { 754 } else {
774 wl1271_error("build_basic_rates invalid band: %d", band); 755 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
756 if (!skb)
757 goto out;
758 size = skb->len;
759 ptr = skb->data;
775 } 760 }
776 761
777 return index; 762 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
763 WL1271_RATE_AUTOMATIC);
764
765out:
766 dev_kfree_skb(skb);
767 if (ret)
768 wl1271_warning("cmd buld null data failed %d", ret);
769
770 return ret;
771
778} 772}
779 773
780int wl1271_cmd_build_null_data(struct wl1271 *wl) 774int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
781{ 775{
782 struct wl12xx_null_data_template template; 776 struct sk_buff *skb = NULL;
777 int ret = -ENOMEM;
783 778
784 if (!is_zero_ether_addr(wl->bssid)) { 779 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
785 memcpy(template.header.da, wl->bssid, ETH_ALEN); 780 if (!skb)
786 memcpy(template.header.bssid, wl->bssid, ETH_ALEN); 781 goto out;
787 } else { 782
788 memset(template.header.da, 0xff, ETH_ALEN); 783 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
789 memset(template.header.bssid, 0xff, ETH_ALEN); 784 skb->data, skb->len,
790 } 785 CMD_TEMPL_KLV_IDX_NULL_DATA,
786 WL1271_RATE_AUTOMATIC);
791 787
792 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 788out:
793 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 789 dev_kfree_skb(skb);
794 IEEE80211_STYPE_NULLFUNC | 790 if (ret)
795 IEEE80211_FCTL_TODS); 791 wl1271_warning("cmd build klv null data failed %d", ret);
796 792
797 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, 793 return ret;
798 sizeof(template));
799 794
800} 795}
801 796
802int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) 797int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
803{ 798{
804 struct wl12xx_ps_poll_template template; 799 struct sk_buff *skb;
805 800 int ret = 0;
806 memcpy(template.bssid, wl->bssid, ETH_ALEN);
807 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
808
809 /* aid in PS-Poll has its two MSBs each set to 1 */
810 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
811 801
812 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 802 skb = ieee80211_pspoll_get(wl->hw, wl->vif);
803 if (!skb)
804 goto out;
813 805
814 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, 806 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
815 sizeof(template)); 807 skb->len, 0, wl->basic_rate);
816 808
809out:
810 dev_kfree_skb(skb);
811 return ret;
817} 812}
818 813
819int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, 814int wl1271_cmd_build_probe_req(struct wl1271 *wl,
820 u8 band) 815 const u8 *ssid, size_t ssid_len,
816 const u8 *ie, size_t ie_len, u8 band)
821{ 817{
822 struct wl12xx_probe_req_template template; 818 struct sk_buff *skb;
823 struct wl12xx_ie_rates *rates;
824 char *ptr;
825 u16 size;
826 int ret; 819 int ret;
827 820
828 ptr = (char *)&template; 821 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
829 size = sizeof(struct ieee80211_header); 822 ie, ie_len);
830 823 if (!skb) {
831 memset(template.header.da, 0xff, ETH_ALEN); 824 ret = -ENOMEM;
832 memset(template.header.bssid, 0xff, ETH_ALEN); 825 goto out;
833 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 826 }
834 template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 827
835 828 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
836 /* IEs */
837 /* SSID */
838 template.ssid.header.id = WLAN_EID_SSID;
839 template.ssid.header.len = ssid_len;
840 if (ssid_len && ssid)
841 memcpy(template.ssid.ssid, ssid, ssid_len);
842 size += sizeof(struct wl12xx_ie_header) + ssid_len;
843 ptr += size;
844
845 /* Basic Rates */
846 rates = (struct wl12xx_ie_rates *)ptr;
847 rates->header.id = WLAN_EID_SUPP_RATES;
848 rates->header.len = wl1271_build_basic_rates(rates->rates, band);
849 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
850 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
851
852 /* Extended rates */
853 rates = (struct wl12xx_ie_rates *)ptr;
854 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
855 rates->header.len = wl1271_build_extended_rates(rates->rates, band);
856 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
857
858 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
859 829
860 if (band == IEEE80211_BAND_2GHZ) 830 if (band == IEEE80211_BAND_2GHZ)
861 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 831 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
862 &template, size); 832 skb->data, skb->len, 0,
833 wl->conf.tx.basic_rate);
863 else 834 else
864 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 835 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
865 &template, size); 836 skb->data, skb->len, 0,
837 wl->conf.tx.basic_rate_5);
838
839out:
840 dev_kfree_skb(skb);
866 return ret; 841 return ret;
867} 842}
868 843
844int wl1271_build_qos_null_data(struct wl1271 *wl)
845{
846 struct ieee80211_qos_hdr template;
847
848 memset(&template, 0, sizeof(template));
849
850 memcpy(template.addr1, wl->bssid, ETH_ALEN);
851 memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
852 memcpy(template.addr3, wl->bssid, ETH_ALEN);
853
854 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
855 IEEE80211_STYPE_QOS_NULLFUNC |
856 IEEE80211_FCTL_TODS);
857
858 /* FIXME: not sure what priority to use here */
859 template.qos_ctrl = cpu_to_le16(0);
860
861 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
862 sizeof(template), 0,
863 WL1271_RATE_AUTOMATIC);
864}
865
869int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 866int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
870{ 867{
871 struct wl1271_cmd_set_keys *cmd; 868 struct wl1271_cmd_set_keys *cmd;
@@ -976,6 +973,10 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
976 goto out_free; 973 goto out_free;
977 } 974 }
978 975
976 ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
977 if (ret < 0)
978 wl1271_error("cmd disconnect event completion error");
979
979out_free: 980out_free:
980 kfree(cmd); 981 kfree(cmd);
981 982
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 2dc06c73532b..f2820b42a943 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,7 +33,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl); 34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl1271_cmd_radio_parms(struct wl1271 *wl); 35int wl1271_cmd_radio_parms(struct wl1271 *wl);
36int wl1271_cmd_join(struct wl1271 *wl); 36int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -41,15 +41,18 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
44int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 44int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
45 u8 active_scan, u8 high_prio, u8 band, 45 const u8 *ie, size_t ie_len, u8 active_scan,
46 u8 probe_requests); 46 u8 high_prio, u8 band, u8 probe_requests);
47int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 47int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
48 void *buf, size_t buf_len); 48 void *buf, size_t buf_len, int index, u32 rates);
49int wl1271_cmd_build_null_data(struct wl1271 *wl); 49int wl1271_cmd_build_null_data(struct wl1271 *wl);
50int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 50int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
51int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, 51int wl1271_cmd_build_probe_req(struct wl1271 *wl,
52 u8 band); 52 const u8 *ssid, size_t ssid_len,
53 const u8 *ie, size_t ie_len, u8 band);
54int wl1271_build_qos_null_data(struct wl1271 *wl);
55int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
53int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 56int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
54int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 57int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
55 u8 key_size, const u8 *key, const u8 *addr, 58 u8 key_size, const u8 *key, const u8 *addr,
@@ -99,6 +102,11 @@ enum wl1271_commands {
99 102
100#define MAX_CMD_PARAMS 572 103#define MAX_CMD_PARAMS 572
101 104
105enum {
106 CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
107 CMD_TEMPL_KLV_IDX_MAX = 4
108};
109
102enum cmd_templ { 110enum cmd_templ {
103 CMD_TEMPL_NULL_DATA = 0, 111 CMD_TEMPL_NULL_DATA = 0,
104 CMD_TEMPL_BEACON, 112 CMD_TEMPL_BEACON,
@@ -121,6 +129,7 @@ enum cmd_templ {
121/* unit ms */ 129/* unit ms */
122#define WL1271_COMMAND_TIMEOUT 2000 130#define WL1271_COMMAND_TIMEOUT 2000
123#define WL1271_CMD_TEMPL_MAX_SIZE 252 131#define WL1271_CMD_TEMPL_MAX_SIZE 252
132#define WL1271_EVENT_TIMEOUT 750
124 133
125struct wl1271_cmd_header { 134struct wl1271_cmd_header {
126 __le16 id; 135 __le16 id;
@@ -243,6 +252,8 @@ struct cmd_enabledisable_path {
243 u8 padding[3]; 252 u8 padding[3];
244} __attribute__ ((packed)); 253} __attribute__ ((packed));
245 254
255#define WL1271_RATE_AUTOMATIC 0
256
246struct wl1271_cmd_template_set { 257struct wl1271_cmd_template_set {
247 struct wl1271_cmd_header header; 258 struct wl1271_cmd_header header;
248 259
@@ -509,6 +520,8 @@ enum wl1271_disconnect_type {
509}; 520};
510 521
511struct wl1271_cmd_disconnect { 522struct wl1271_cmd_disconnect {
523 struct wl1271_cmd_header header;
524
512 __le32 rx_config_options; 525 __le32 rx_config_options;
513 __le32 rx_filter_options; 526 __le32 rx_filter_options;
514 527
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 6f9e75cc5640..d046d044b5bd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -65,110 +65,344 @@ enum {
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, 65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66}; 66};
67 67
68struct conf_sg_settings { 68enum {
69 CONF_HW_RXTX_RATE_MCS7 = 0,
70 CONF_HW_RXTX_RATE_MCS6,
71 CONF_HW_RXTX_RATE_MCS5,
72 CONF_HW_RXTX_RATE_MCS4,
73 CONF_HW_RXTX_RATE_MCS3,
74 CONF_HW_RXTX_RATE_MCS2,
75 CONF_HW_RXTX_RATE_MCS1,
76 CONF_HW_RXTX_RATE_MCS0,
77 CONF_HW_RXTX_RATE_54,
78 CONF_HW_RXTX_RATE_48,
79 CONF_HW_RXTX_RATE_36,
80 CONF_HW_RXTX_RATE_24,
81 CONF_HW_RXTX_RATE_22,
82 CONF_HW_RXTX_RATE_18,
83 CONF_HW_RXTX_RATE_12,
84 CONF_HW_RXTX_RATE_11,
85 CONF_HW_RXTX_RATE_9,
86 CONF_HW_RXTX_RATE_6,
87 CONF_HW_RXTX_RATE_5_5,
88 CONF_HW_RXTX_RATE_2,
89 CONF_HW_RXTX_RATE_1,
90 CONF_HW_RXTX_RATE_MAX,
91 CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
92};
93
94enum {
95 CONF_SG_DISABLE = 0,
96 CONF_SG_PROTECTIVE,
97 CONF_SG_OPPORTUNISTIC
98};
99
100enum {
69 /* 101 /*
70 * Defines the PER threshold in PPM of the BT voice of which reaching 102 * PER threshold in PPM of the BT voice
71 * this value will trigger raising the priority of the BT voice by
72 * the BT IP until next NFS sample interval time as defined in
73 * nfs_sample_interval.
74 * 103 *
75 * Unit: PER value in PPM (parts per million) 104 * Range: 0 - 10000000
76 * #Error_packets / #Total_packets 105 */
106 CONF_SG_BT_PER_THRESHOLD = 0,
77 107
78 * Range: u32 108 /*
109 * Number of consequent RX_ACTIVE activities to override BT voice
110 * frames to ensure WLAN connection
111 *
112 * Range: 0 - 100
113 */
114 CONF_SG_HV3_MAX_OVERRIDE,
115
116 /*
117 * Defines the PER threshold of the BT voice
118 *
119 * Range: 0 - 65000
120 */
121 CONF_SG_BT_NFS_SAMPLE_INTERVAL,
122
123 /*
124 * Defines the load ratio of BT
125 *
126 * Range: 0 - 100 (%)
127 */
128 CONF_SG_BT_LOAD_RATIO,
129
130 /*
131 * Defines whether the SG will force WLAN host to enter/exit PSM
132 *
133 * Range: 1 - SG can force, 0 - host handles PSM
134 */
135 CONF_SG_AUTO_PS_MODE,
136
137 /*
138 * Compensation percentage of probe requests when scan initiated
139 * during BT voice/ACL link.
140 *
141 * Range: 0 - 255 (%)
142 */
143 CONF_SG_AUTO_SCAN_PROBE_REQ,
144
145 /*
146 * Compensation percentage of probe requests when active scan initiated
147 * during BT voice
148 *
149 * Range: 0 - 255 (%)
150 */
151 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
152
153 /*
154 * Defines antenna configuration (single/dual antenna)
155 *
156 * Range: 0 - single antenna, 1 - dual antenna
157 */
158 CONF_SG_ANTENNA_CONFIGURATION,
159
160 /*
161 * The threshold (percent) of max consequtive beacon misses before
162 * increasing priority of beacon reception.
163 *
164 * Range: 0 - 100 (%)
165 */
166 CONF_SG_BEACON_MISS_PERCENT,
167
168 /*
169 * The rate threshold below which receiving a data frame from the AP
170 * will increase the priority of the data frame above BT traffic.
171 *
172 * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
173 */
174 CONF_SG_RATE_ADAPT_THRESH,
175
176 /*
177 * Not used currently.
178 *
179 * Range: 0
180 */
181 CONF_SG_RATE_ADAPT_SNR,
182
183 /*
184 * Configure the min and max time BT gains the antenna
185 * in WLAN PSM / BT master basic rate
186 *
187 * Range: 0 - 255 (ms)
188 */
189 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
190 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
191
192 /*
193 * The time after it expires no new WLAN trigger frame is trasmitted
194 * in WLAN PSM / BT master basic rate
195 *
196 * Range: 0 - 255 (ms)
197 */
198 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
199
200 /*
201 * Configure the min and max time BT gains the antenna
202 * in WLAN PSM / BT slave basic rate
203 *
204 * Range: 0 - 255 (ms)
205 */
206 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
207 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
208
209 /*
210 * The time after it expires no new WLAN trigger frame is trasmitted
211 * in WLAN PSM / BT slave basic rate
212 *
213 * Range: 0 - 255 (ms)
214 */
215 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
216
217 /*
218 * Configure the min and max time BT gains the antenna
219 * in WLAN PSM / BT master EDR
220 *
221 * Range: 0 - 255 (ms)
222 */
223 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
224 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
225
226 /*
227 * The time after it expires no new WLAN trigger frame is trasmitted
228 * in WLAN PSM / BT master EDR
229 *
230 * Range: 0 - 255 (ms)
231 */
232 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
233
234 /*
235 * Configure the min and max time BT gains the antenna
236 * in WLAN PSM / BT slave EDR
237 *
238 * Range: 0 - 255 (ms)
239 */
240 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
241 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
242
243 /*
244 * The time after it expires no new WLAN trigger frame is trasmitted
245 * in WLAN PSM / BT slave EDR
246 *
247 * Range: 0 - 255 (ms)
248 */
249 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
250
251 /*
252 * RX guard time before the beginning of a new BT voice frame during
253 * which no new WLAN trigger frame is transmitted.
254 *
255 * Range: 0 - 100000 (us)
256 */
257 CONF_SG_RXT,
258
259 /*
260 * TX guard time before the beginning of a new BT voice frame during
261 * which no new WLAN frame is transmitted.
262 *
263 * Range: 0 - 100000 (us)
264 */
265
266 CONF_SG_TXT,
267
268 /*
269 * Enable adaptive RXT/TXT algorithm. If disabled, the host values
270 * will be utilized.
271 *
272 * Range: 0 - disable, 1 - enable
273 */
274 CONF_SG_ADAPTIVE_RXT_TXT,
275
276 /*
277 * The used WLAN legacy service period during active BT ACL link
278 *
279 * Range: 0 - 255 (ms)
280 */
281 CONF_SG_PS_POLL_TIMEOUT,
282
283 /*
284 * The used WLAN UPSD service period during active BT ACL link
285 *
286 * Range: 0 - 255 (ms)
79 */ 287 */
80 u32 per_threshold; 288 CONF_SG_UPSD_TIMEOUT,
81 289
82 /* 290 /*
83 * This value is an absolute time in micro-seconds to limit the 291 * Configure the min and max time BT gains the antenna
84 * maximum scan duration compensation while in SG 292 * in WLAN Active / BT master EDR
293 *
294 * Range: 0 - 255 (ms)
85 */ 295 */
86 u32 max_scan_compensation_time; 296 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
297 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
87 298
88 /* Defines the PER threshold of the BT voice of which reaching this 299 /*
89 * value will trigger raising the priority of the BT voice until next 300 * The maximum time WLAN can gain the antenna for
90 * NFS sample interval time as defined in sample_interval. 301 * in WLAN Active / BT master EDR
91 * 302 *
92 * Unit: msec 303 * Range: 0 - 255 (ms)
93 * Range: 1-65000
94 */ 304 */
95 u16 nfs_sample_interval; 305 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
96 306
97 /* 307 /*
98 * Defines the load ratio for the BT. 308 * Configure the min and max time BT gains the antenna
99 * The WLAN ratio is: 100 - load_ratio 309 * in WLAN Active / BT slave EDR
100 * 310 *
101 * Unit: Percent 311 * Range: 0 - 255 (ms)
102 * Range: 0-100
103 */ 312 */
104 u8 load_ratio; 313 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
314 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
105 315
106 /* 316 /*
107 * true - Co-ex is allowed to enter/exit P.S automatically and 317 * The maximum time WLAN can gain the antenna for
108 * transparently to the host 318 * in WLAN Active / BT slave EDR
109 * 319 *
110 * false - Co-ex is disallowed to enter/exit P.S and will trigger an 320 * Range: 0 - 255 (ms)
111 * event to the host to notify for the need to enter/exit P.S 321 */
112 * due to BT change state 322 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
323
324 /*
325 * Configure the min and max time BT gains the antenna
326 * in WLAN Active / BT basic rate
327 *
328 * Range: 0 - 255 (ms)
329 */
330 CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
331 CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
332
333 /*
334 * The maximum time WLAN can gain the antenna for
335 * in WLAN Active / BT basic rate
113 * 336 *
337 * Range: 0 - 255 (ms)
114 */ 338 */
115 u8 auto_ps_mode; 339 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
116 340
117 /* 341 /*
118 * This parameter defines the compensation percentage of num of probe 342 * Compensation percentage of WLAN passive scan window if initiated
119 * requests in case scan is initiated during BT voice/BT ACL 343 * during BT voice
120 * guaranteed link.
121 * 344 *
122 * Unit: Percent 345 * Range: 0 - 1000 (%)
123 * Range: 0-255 (0 - No compensation)
124 */ 346 */
125 u8 probe_req_compensation; 347 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
126 348
127 /* 349 /*
128 * This parameter defines the compensation percentage of scan window 350 * Compensation percentage of WLAN passive scan window if initiated
129 * size in case scan is initiated during BT voice/BT ACL Guaranteed 351 * during BT A2DP
130 * link.
131 * 352 *
132 * Unit: Percent 353 * Range: 0 - 1000 (%)
133 * Range: 0-255 (0 - No compensation)
134 */ 354 */
135 u8 scan_window_compensation; 355 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
136 356
137 /* 357 /*
138 * Defines the antenna configuration. 358 * Fixed time ensured for BT traffic to gain the antenna during WLAN
359 * passive scan.
139 * 360 *
140 * Range: 0 - Single Antenna; 1 - Dual Antenna 361 * Range: 0 - 1000 ms
141 */ 362 */
142 u8 antenna_config; 363 CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
143 364
144 /* 365 /*
145 * The percent out of the Max consecutive beacon miss roaming trigger 366 * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
146 * which is the threshold for raising the priority of beacon 367 * passive scan.
147 * reception.
148 * 368 *
149 * Range: 1-100 369 * Range: 0 - 1000 ms
150 * N = MaxConsecutiveBeaconMiss
151 * P = coexMaxConsecutiveBeaconMissPrecent
152 * Threshold = MIN( N-1, round(N * P / 100))
153 */ 370 */
154 u8 beacon_miss_threshold; 371 CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
155 372
156 /* 373 /*
157 * The RX rate threshold below which rate adaptation is assumed to be 374 * Number of consequent BT voice frames not interrupted by WLAN
158 * occurring at the AP which will raise priority for ACTIVE_RX and RX
159 * SP.
160 * 375 *
161 * Range: HW_BIT_RATE_* 376 * Range: 0 - 100
162 */ 377 */
163 u32 rate_adaptation_threshold; 378 CONF_SG_HV3_MAX_SERVED,
164 379
165 /* 380 /*
166 * The SNR above which the RX rate threshold indicating AP rate 381 * Protection time of the DHCP procedure.
167 * adaptation is valid
168 * 382 *
169 * Range: -128 - 127 383 * Range: 0 - 100000 (ms)
170 */ 384 */
171 s8 rate_adaptation_snr; 385 CONF_SG_DHCP_TIME,
386
387 /*
388 * Compensation percentage of WLAN active scan window if initiated
389 * during BT A2DP
390 *
391 * Range: 0 - 1000 (%)
392 */
393 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
394 CONF_SG_TEMP_PARAM_1,
395 CONF_SG_TEMP_PARAM_2,
396 CONF_SG_TEMP_PARAM_3,
397 CONF_SG_TEMP_PARAM_4,
398 CONF_SG_TEMP_PARAM_5,
399 CONF_SG_PARAMS_MAX,
400 CONF_SG_PARAMS_ALL = 0xff
401};
402
403struct conf_sg_settings {
404 u32 params[CONF_SG_PARAMS_MAX];
405 u8 state;
172}; 406};
173 407
174enum conf_rx_queue_type { 408enum conf_rx_queue_type {
@@ -440,6 +674,19 @@ struct conf_tx_settings {
440 */ 674 */
441 u16 tx_compl_threshold; 675 u16 tx_compl_threshold;
442 676
677 /*
678 * The rate used for control messages and scanning on the 2.4GHz band
679 *
680 * Range: CONF_HW_BIT_RATE_* bit mask
681 */
682 u32 basic_rate;
683
684 /*
685 * The rate used for control messages and scanning on the 5GHz band
686 *
687 * Range: CONF_HW_BIT_RATE_* bit mask
688 */
689 u32 basic_rate_5;
443}; 690};
444 691
445enum { 692enum {
@@ -509,65 +756,6 @@ enum {
509 CONF_TRIG_EVENT_DIR_BIDIR 756 CONF_TRIG_EVENT_DIR_BIDIR
510}; 757};
511 758
512
513struct conf_sig_trigger {
514 /*
515 * The RSSI / SNR threshold value.
516 *
517 * FIXME: what is the range?
518 */
519 s16 threshold;
520
521 /*
522 * Minimum delay between two trigger events for this trigger in ms.
523 *
524 * Range: 0 - 60000
525 */
526 u16 pacing;
527
528 /*
529 * The measurement data source for this trigger.
530 *
531 * Range: CONF_TRIG_METRIC_*
532 */
533 u8 metric;
534
535 /*
536 * The trigger type of this trigger.
537 *
538 * Range: CONF_TRIG_EVENT_TYPE_*
539 */
540 u8 type;
541
542 /*
543 * The direction of the trigger.
544 *
545 * Range: CONF_TRIG_EVENT_DIR_*
546 */
547 u8 direction;
548
549 /*
550 * Hysteresis range of the trigger around the threshold (in dB)
551 *
552 * Range: u8
553 */
554 u8 hysteresis;
555
556 /*
557 * Index of the trigger rule.
558 *
559 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
560 */
561 u8 index;
562
563 /*
564 * Enable / disable this rule (to use for clearing rules.)
565 *
566 * Range: 1 - Enabled, 2 - Not enabled
567 */
568 u8 enable;
569};
570
571struct conf_sig_weights { 759struct conf_sig_weights {
572 760
573 /* 761 /*
@@ -686,12 +874,6 @@ struct conf_conn_settings {
686 u8 ps_poll_threshold; 874 u8 ps_poll_threshold;
687 875
688 /* 876 /*
689 * Configuration of signal (rssi/snr) triggers.
690 */
691 u8 sig_trigger_count;
692 struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
693
694 /*
695 * Configuration of signal average weights. 877 * Configuration of signal average weights.
696 */ 878 */
697 struct conf_sig_weights sig_weights; 879 struct conf_sig_weights sig_weights;
@@ -721,6 +903,22 @@ struct conf_conn_settings {
721 * Range 0 - 255 903 * Range 0 - 255
722 */ 904 */
723 u8 psm_entry_retries; 905 u8 psm_entry_retries;
906
907 /*
908 *
909 * Specifies the interval of the connection keep-alive null-func
910 * frame in ms.
911 *
912 * Range: 1000 - 3600000
913 */
914 u32 keep_alive_interval;
915
916 /*
917 * Maximum listen interval supported by the driver in units of beacons.
918 *
919 * Range: u16
920 */
921 u8 max_listen_interval;
724}; 922};
725 923
726enum { 924enum {
@@ -782,6 +980,43 @@ struct conf_pm_config_settings {
782 bool host_fast_wakeup_support; 980 bool host_fast_wakeup_support;
783}; 981};
784 982
983struct conf_roam_trigger_settings {
984 /*
985 * The minimum interval between two trigger events.
986 *
987 * Range: 0 - 60000 ms
988 */
989 u16 trigger_pacing;
990
991 /*
992 * The weight for rssi/beacon average calculation
993 *
994 * Range: 0 - 255
995 */
996 u8 avg_weight_rssi_beacon;
997
998 /*
999 * The weight for rssi/data frame average calculation
1000 *
1001 * Range: 0 - 255
1002 */
1003 u8 avg_weight_rssi_data;
1004
1005 /*
1006 * The weight for snr/beacon average calculation
1007 *
1008 * Range: 0 - 255
1009 */
1010 u8 avg_weight_snr_beacon;
1011
1012 /*
1013 * The weight for snr/data frame average calculation
1014 *
1015 * Range: 0 - 255
1016 */
1017 u8 avg_weight_snr_data;
1018};
1019
785struct conf_drv_settings { 1020struct conf_drv_settings {
786 struct conf_sg_settings sg; 1021 struct conf_sg_settings sg;
787 struct conf_rx_settings rx; 1022 struct conf_rx_settings rx;
@@ -790,6 +1025,7 @@ struct conf_drv_settings {
790 struct conf_init_settings init; 1025 struct conf_init_settings init;
791 struct conf_itrim_settings itrim; 1026 struct conf_itrim_settings itrim;
792 struct conf_pm_config_settings pm_config; 1027 struct conf_pm_config_settings pm_config;
1028 struct conf_roam_trigger_settings roam_trigger;
793}; 1029};
794 1030
795#endif 1031#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index 3f7ff8d0cf5a..c239ef4d0b8d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -29,6 +29,7 @@
29#include "wl1271.h" 29#include "wl1271.h"
30#include "wl1271_acx.h" 30#include "wl1271_acx.h"
31#include "wl1271_ps.h" 31#include "wl1271_ps.h"
32#include "wl1271_io.h"
32 33
33/* ms */ 34/* ms */
34#define WL1271_DEBUGFS_STATS_LIFETIME 1000 35#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -277,13 +278,10 @@ static ssize_t gpio_power_write(struct file *file,
277 goto out; 278 goto out;
278 } 279 }
279 280
280 if (value) { 281 if (value)
281 wl->set_power(true); 282 wl1271_power_on(wl);
282 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 283 else
283 } else { 284 wl1271_power_off(wl);
284 wl->set_power(false);
285 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
286 }
287 285
288out: 286out:
289 mutex_unlock(&wl->mutex); 287 mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 7468ef10194b..cf37aa6eb137 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -23,7 +23,6 @@
23 23
24#include "wl1271.h" 24#include "wl1271.h"
25#include "wl1271_reg.h" 25#include "wl1271_reg.h"
26#include "wl1271_spi.h"
27#include "wl1271_io.h" 26#include "wl1271_io.h"
28#include "wl1271_event.h" 27#include "wl1271_event.h"
29#include "wl1271_ps.h" 28#include "wl1271_ps.h"
@@ -32,34 +31,24 @@
32static int wl1271_event_scan_complete(struct wl1271 *wl, 31static int wl1271_event_scan_complete(struct wl1271 *wl,
33 struct event_mailbox *mbox) 32 struct event_mailbox *mbox)
34{ 33{
35 int size = sizeof(struct wl12xx_probe_req_template);
36 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 34 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
37 mbox->scheduled_scan_status); 35 mbox->scheduled_scan_status);
38 36
39 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) { 37 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
40 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 38 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
41 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
42 NULL, size);
43 /* 2.4 GHz band scanned, scan 5 GHz band, pretend 39 /* 2.4 GHz band scanned, scan 5 GHz band, pretend
44 * to the wl1271_cmd_scan function that we are not 40 * to the wl1271_cmd_scan function that we are not
45 * scanning as it checks that. 41 * scanning as it checks that.
46 */ 42 */
47 clear_bit(WL1271_FLAG_SCANNING, &wl->flags); 43 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
44 /* FIXME: ie missing! */
48 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 45 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
46 NULL, 0,
49 wl->scan.active, 47 wl->scan.active,
50 wl->scan.high_prio, 48 wl->scan.high_prio,
51 WL1271_SCAN_BAND_5_GHZ, 49 WL1271_SCAN_BAND_5_GHZ,
52 wl->scan.probe_requests); 50 wl->scan.probe_requests);
53 } else { 51 } else {
54 if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
55 wl1271_cmd_template_set(wl,
56 CMD_TEMPL_CFG_PROBE_REQ_2_4,
57 NULL, size);
58 else
59 wl1271_cmd_template_set(wl,
60 CMD_TEMPL_CFG_PROBE_REQ_5,
61 NULL, size);
62
63 mutex_unlock(&wl->mutex); 52 mutex_unlock(&wl->mutex);
64 ieee80211_scan_completed(wl->hw, false); 53 ieee80211_scan_completed(wl->hw, false);
65 mutex_lock(&wl->mutex); 54 mutex_lock(&wl->mutex);
@@ -92,16 +81,9 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
92 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 81 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
93 true); 82 true);
94 } else { 83 } else {
95 wl1271_error("PSM entry failed, giving up.\n"); 84 wl1271_info("No ack to nullfunc from AP.");
96 /* FIXME: this may need to be reconsidered. for now it
97 is not possible to indicate to the mac80211
98 afterwards that PSM entry failed. To maximize
99 functionality (receiving data and remaining
100 associated) make sure that we are in sync with the
101 AP in regard of PSM mode. */
102 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
103 false);
104 wl->psm_entry_retry = 0; 85 wl->psm_entry_retry = 0;
86 *beacon_loss = true;
105 } 87 }
106 break; 88 break;
107 case EVENT_ENTER_POWER_SAVE_SUCCESS: 89 case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -143,6 +125,24 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
143 return ret; 125 return ret;
144} 126}
145 127
128static void wl1271_event_rssi_trigger(struct wl1271 *wl,
129 struct event_mailbox *mbox)
130{
131 enum nl80211_cqm_rssi_threshold_event event;
132 s8 metric = mbox->rssi_snr_trigger_metric[0];
133
134 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
135
136 if (metric <= wl->rssi_thold)
137 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
138 else
139 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
140
141 if (event != wl->last_rssi_event)
142 ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
143 wl->last_rssi_event = event;
144}
145
146static void wl1271_event_mbox_dump(struct event_mailbox *mbox) 146static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
147{ 147{
148 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); 148 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -172,10 +172,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
172 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon 172 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
173 * filtering) is enabled. Without PSM, the stack will receive all 173 * filtering) is enabled. Without PSM, the stack will receive all
174 * beacons and can detect beacon loss by itself. 174 * beacons and can detect beacon loss by itself.
175 *
176 * As there's possibility that the driver disables PSM before receiving
177 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
178 *
175 */ 179 */
176 if (vector & BSS_LOSE_EVENT_ID && 180 if (vector & BSS_LOSE_EVENT_ID) {
177 test_bit(WL1271_FLAG_PSM, &wl->flags)) { 181 wl1271_info("Beacon loss detected.");
178 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
179 182
180 /* indicate to the stack, that beacons have been lost */ 183 /* indicate to the stack, that beacons have been lost */
181 beacon_loss = true; 184 beacon_loss = true;
@@ -188,17 +191,15 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
188 return ret; 191 return ret;
189 } 192 }
190 193
191 if (wl->vif && beacon_loss) { 194 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
192 /* Obviously, it's dangerous to release the mutex while 195 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
193 we are holding many of the variables in the wl struct. 196 if (wl->vif)
194 That's why it's done last in the function, and care must 197 wl1271_event_rssi_trigger(wl, mbox);
195 be taken that nothing more is done after this function
196 returns. */
197 mutex_unlock(&wl->mutex);
198 ieee80211_beacon_loss(wl->vif);
199 mutex_lock(&wl->mutex);
200 } 198 }
201 199
200 if (wl->vif && beacon_loss)
201 ieee80211_connection_loss(wl->vif);
202
202 return 0; 203 return 0;
203} 204}
204 205
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 278f9206aa56..58371008f270 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -38,6 +38,14 @@
38 */ 38 */
39 39
40enum { 40enum {
41 RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0),
42 RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1),
43 RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2),
44 RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3),
45 RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4),
46 RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
47 RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
48 RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
41 MEASUREMENT_START_EVENT_ID = BIT(8), 49 MEASUREMENT_START_EVENT_ID = BIT(8),
42 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), 50 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
43 SCAN_COMPLETE_EVENT_ID = BIT(10), 51 SCAN_COMPLETE_EVENT_ID = BIT(10),
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index d189e8fe05a6..4447af1557f5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -52,50 +52,65 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
52 52
53int wl1271_init_templates_config(struct wl1271 *wl) 53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 54{
55 int ret; 55 int ret, i;
56 56
57 /* send empty templates for fw memory reservation */ 57 /* send empty templates for fw memory reservation */
58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, 58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
59 sizeof(struct wl12xx_probe_req_template)); 59 sizeof(struct wl12xx_probe_req_template),
60 0, WL1271_RATE_AUTOMATIC);
60 if (ret < 0) 61 if (ret < 0)
61 return ret; 62 return ret;
62 63
63 if (wl1271_11a_enabled()) { 64 if (wl1271_11a_enabled()) {
65 size_t size = sizeof(struct wl12xx_probe_req_template);
64 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
65 NULL, 67 NULL, size, 0,
66 sizeof(struct wl12xx_probe_req_template)); 68 WL1271_RATE_AUTOMATIC);
67 if (ret < 0) 69 if (ret < 0)
68 return ret; 70 return ret;
69 } 71 }
70 72
71 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 73 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
72 sizeof(struct wl12xx_null_data_template)); 74 sizeof(struct wl12xx_null_data_template),
75 0, WL1271_RATE_AUTOMATIC);
73 if (ret < 0) 76 if (ret < 0)
74 return ret; 77 return ret;
75 78
76 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL, 79 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
77 sizeof(struct wl12xx_ps_poll_template)); 80 sizeof(struct wl12xx_ps_poll_template),
81 0, WL1271_RATE_AUTOMATIC);
78 if (ret < 0) 82 if (ret < 0)
79 return ret; 83 return ret;
80 84
81 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, 85 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
82 sizeof 86 sizeof
83 (struct wl12xx_qos_null_data_template)); 87 (struct wl12xx_qos_null_data_template),
88 0, WL1271_RATE_AUTOMATIC);
84 if (ret < 0) 89 if (ret < 0)
85 return ret; 90 return ret;
86 91
87 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL, 92 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
88 sizeof 93 sizeof
89 (struct wl12xx_probe_resp_template)); 94 (struct wl12xx_probe_resp_template),
95 0, WL1271_RATE_AUTOMATIC);
90 if (ret < 0) 96 if (ret < 0)
91 return ret; 97 return ret;
92 98
93 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL, 99 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
94 sizeof 100 sizeof
95 (struct wl12xx_beacon_template)); 101 (struct wl12xx_beacon_template),
102 0, WL1271_RATE_AUTOMATIC);
96 if (ret < 0) 103 if (ret < 0)
97 return ret; 104 return ret;
98 105
106 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
107 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
108 WL1271_CMD_TEMPL_MAX_SIZE, i,
109 WL1271_RATE_AUTOMATIC);
110 if (ret < 0)
111 return ret;
112 }
113
99 return 0; 114 return 0;
100} 115}
101 116
@@ -161,11 +176,11 @@ int wl1271_init_pta(struct wl1271 *wl)
161{ 176{
162 int ret; 177 int ret;
163 178
164 ret = wl1271_acx_sg_enable(wl); 179 ret = wl1271_acx_sg_cfg(wl);
165 if (ret < 0) 180 if (ret < 0)
166 return ret; 181 return ret;
167 182
168 ret = wl1271_acx_sg_cfg(wl); 183 ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
169 if (ret < 0) 184 if (ret < 0)
170 return ret; 185 return ret;
171 186
@@ -237,7 +252,7 @@ int wl1271_hw_init(struct wl1271 *wl)
237 goto out_free_memmap; 252 goto out_free_memmap;
238 253
239 /* Initialize connection monitoring thresholds */ 254 /* Initialize connection monitoring thresholds */
240 ret = wl1271_acx_conn_monit_params(wl); 255 ret = wl1271_acx_conn_monit_params(wl, false);
241 if (ret < 0) 256 if (ret < 0)
242 goto out_free_memmap; 257 goto out_free_memmap;
243 258
@@ -325,6 +340,24 @@ int wl1271_hw_init(struct wl1271 *wl)
325 if (ret < 0) 340 if (ret < 0)
326 goto out_free_memmap; 341 goto out_free_memmap;
327 342
343 /* disable all keep-alive templates */
344 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
345 ret = wl1271_acx_keep_alive_config(wl, i,
346 ACX_KEEP_ALIVE_TPL_INVALID);
347 if (ret < 0)
348 goto out_free_memmap;
349 }
350
351 /* disable the keep-alive feature */
352 ret = wl1271_acx_keep_alive_mode(wl, false);
353 if (ret < 0)
354 goto out_free_memmap;
355
356 /* Configure rssi/snr averaging weights */
357 ret = wl1271_acx_rssi_snr_avg_weights(wl);
358 if (ret < 0)
359 goto out_free_memmap;
360
328 return 0; 361 return 0;
329 362
330 out_free_memmap: 363 out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
index 5cd94d5666c2..c8759acef131 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -28,30 +28,29 @@
28 28
29#include "wl1271.h" 29#include "wl1271.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h"
32#include "wl1271_io.h" 31#include "wl1271_io.h"
33 32
34static int wl1271_translate_addr(struct wl1271 *wl, int addr) 33#define OCP_CMD_LOOP 32
34
35#define OCP_CMD_WRITE 0x1
36#define OCP_CMD_READ 0x2
37
38#define OCP_READY_MASK BIT(18)
39#define OCP_STATUS_MASK (BIT(16) | BIT(17))
40
41#define OCP_STATUS_NO_RESP 0x00000
42#define OCP_STATUS_OK 0x10000
43#define OCP_STATUS_REQ_FAILED 0x20000
44#define OCP_STATUS_RESP_ERROR 0x30000
45
46void wl1271_disable_interrupts(struct wl1271 *wl)
35{ 47{
36 /* 48 wl->if_ops->disable_irq(wl);
37 * To translate, first check to which window of addresses the 49}
38 * particular address belongs. Then subtract the starting address 50
39 * of that window from the address. Then, add offset of the 51void wl1271_enable_interrupts(struct wl1271 *wl)
40 * translated region. 52{
41 * 53 wl->if_ops->enable_irq(wl);
42 * The translated regions occur next to each other in physical device
43 * memory, so just add the sizes of the preceeding address regions to
44 * get the offset to the new region.
45 *
46 * Currently, only the two first regions are addressed, and the
47 * assumption is that all addresses will fall into either of those
48 * two.
49 */
50 if ((addr >= wl->part.reg.start) &&
51 (addr < wl->part.reg.start + wl->part.reg.size))
52 return addr - wl->part.reg.start + wl->part.mem.size;
53 else
54 return addr - wl->part.mem.start;
55} 54}
56 55
57/* Set the SPI partitions to access the chip addresses 56/* Set the SPI partitions to access the chip addresses
@@ -117,54 +116,12 @@ int wl1271_set_partition(struct wl1271 *wl,
117 116
118void wl1271_io_reset(struct wl1271 *wl) 117void wl1271_io_reset(struct wl1271 *wl)
119{ 118{
120 wl1271_spi_reset(wl); 119 wl->if_ops->reset(wl);
121} 120}
122 121
123void wl1271_io_init(struct wl1271 *wl) 122void wl1271_io_init(struct wl1271 *wl)
124{ 123{
125 wl1271_spi_init(wl); 124 wl->if_ops->init(wl);
126}
127
128void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
129 size_t len, bool fixed)
130{
131 wl1271_spi_raw_write(wl, addr, buf, len, fixed);
132}
133
134void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
135 size_t len, bool fixed)
136{
137 wl1271_spi_raw_read(wl, addr, buf, len, fixed);
138}
139
140void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
141 bool fixed)
142{
143 int physical;
144
145 physical = wl1271_translate_addr(wl, addr);
146
147 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
148}
149
150void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
151 bool fixed)
152{
153 int physical;
154
155 physical = wl1271_translate_addr(wl, addr);
156
157 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
158}
159
160u32 wl1271_read32(struct wl1271 *wl, int addr)
161{
162 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
163}
164
165void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
166{
167 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
168} 125}
169 126
170void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) 127void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index fa9a0b35788f..bc806c74c63a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -25,44 +25,145 @@
25#ifndef __WL1271_IO_H__ 25#ifndef __WL1271_IO_H__
26#define __WL1271_IO_H__ 26#define __WL1271_IO_H__
27 27
28#include "wl1271_reg.h"
29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31
32#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0
33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40
41#define HW_ACCESS_REGISTER_SIZE 4
42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
44
28struct wl1271; 45struct wl1271;
29 46
47void wl1271_disable_interrupts(struct wl1271 *wl);
48void wl1271_enable_interrupts(struct wl1271 *wl);
49
30void wl1271_io_reset(struct wl1271 *wl); 50void wl1271_io_reset(struct wl1271 *wl);
31void wl1271_io_init(struct wl1271 *wl); 51void wl1271_io_init(struct wl1271 *wl);
32 52
33/* Raw target IO, address is not translated */ 53static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
34void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 54{
35 size_t len, bool fixed); 55 return wl->if_ops->dev(wl);
36void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, 56}
37 size_t len, bool fixed);
38 57
39/* Translated target IO */
40void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
41 bool fixed);
42void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
43 bool fixed);
44u32 wl1271_read32(struct wl1271 *wl, int addr);
45void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
46 58
47/* Top Register IO */ 59/* Raw target IO, address is not translated */
48void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val); 60static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
49u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); 61 size_t len, bool fixed)
62{
63 wl->if_ops->write(wl, addr, buf, len, fixed);
64}
50 65
51int wl1271_set_partition(struct wl1271 *wl, 66static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
52 struct wl1271_partition_set *p); 67 size_t len, bool fixed)
68{
69 wl->if_ops->read(wl, addr, buf, len, fixed);
70}
53 71
54static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 72static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
55{ 73{
56 wl1271_raw_read(wl, addr, &wl->buffer_32, 74 wl1271_raw_read(wl, addr, &wl->buffer_32,
57 sizeof(wl->buffer_32), false); 75 sizeof(wl->buffer_32), false);
58 76
59 return wl->buffer_32; 77 return le32_to_cpu(wl->buffer_32);
60} 78}
61 79
62static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val) 80static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
63{ 81{
64 wl->buffer_32 = val; 82 wl->buffer_32 = cpu_to_le32(val);
65 wl1271_raw_write(wl, addr, &wl->buffer_32, 83 wl1271_raw_write(wl, addr, &wl->buffer_32,
66 sizeof(wl->buffer_32), false); 84 sizeof(wl->buffer_32), false);
67} 85}
86
87/* Translated target IO */
88static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
89{
90 /*
91 * To translate, first check to which window of addresses the
92 * particular address belongs. Then subtract the starting address
93 * of that window from the address. Then, add offset of the
94 * translated region.
95 *
96 * The translated regions occur next to each other in physical device
97 * memory, so just add the sizes of the preceeding address regions to
98 * get the offset to the new region.
99 *
100 * Currently, only the two first regions are addressed, and the
101 * assumption is that all addresses will fall into either of those
102 * two.
103 */
104 if ((addr >= wl->part.reg.start) &&
105 (addr < wl->part.reg.start + wl->part.reg.size))
106 return addr - wl->part.reg.start + wl->part.mem.size;
107 else
108 return addr - wl->part.mem.start;
109}
110
111static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
112 size_t len, bool fixed)
113{
114 int physical;
115
116 physical = wl1271_translate_addr(wl, addr);
117
118 wl1271_raw_read(wl, physical, buf, len, fixed);
119}
120
121static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
122 size_t len, bool fixed)
123{
124 int physical;
125
126 physical = wl1271_translate_addr(wl, addr);
127
128 wl1271_raw_write(wl, physical, buf, len, fixed);
129}
130
131static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
132{
133 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
134}
135
136static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
137{
138 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
139}
140
141static inline void wl1271_power_off(struct wl1271 *wl)
142{
143 wl->if_ops->power(wl, false);
144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
145}
146
147static inline void wl1271_power_on(struct wl1271 *wl)
148{
149 wl->if_ops->power(wl, true);
150 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
151}
152
153
154/* Top Register IO */
155void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
156u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
157
158int wl1271_set_partition(struct wl1271 *wl,
159 struct wl1271_partition_set *p);
160
161/* Functions from wl1271_main.c */
162
163int wl1271_register_hw(struct wl1271 *wl);
164void wl1271_unregister_hw(struct wl1271 *wl);
165int wl1271_init_ieee80211(struct wl1271 *wl);
166struct ieee80211_hw *wl1271_alloc_hw(void);
167int wl1271_free_hw(struct wl1271 *wl);
168
68#endif 169#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 65a1aeba2419..b7d9137851ac 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -22,23 +22,19 @@
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/interrupt.h>
27#include <linux/firmware.h> 25#include <linux/firmware.h>
28#include <linux/delay.h> 26#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
31#include <linux/crc32.h> 28#include <linux/crc32.h>
32#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
33#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
34#include <linux/spi/wl12xx.h>
35#include <linux/inetdevice.h> 31#include <linux/inetdevice.h>
32#include <linux/platform_device.h>
36#include <linux/slab.h> 33#include <linux/slab.h>
37 34
38#include "wl1271.h" 35#include "wl1271.h"
39#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
40#include "wl1271_reg.h" 37#include "wl1271_reg.h"
41#include "wl1271_spi.h"
42#include "wl1271_io.h" 38#include "wl1271_io.h"
43#include "wl1271_event.h" 39#include "wl1271_event.h"
44#include "wl1271_tx.h" 40#include "wl1271_tx.h"
@@ -54,17 +50,57 @@
54 50
55static struct conf_drv_settings default_conf = { 51static struct conf_drv_settings default_conf = {
56 .sg = { 52 .sg = {
57 .per_threshold = 7500, 53 .params = {
58 .max_scan_compensation_time = 120000, 54 [CONF_SG_BT_PER_THRESHOLD] = 7500,
59 .nfs_sample_interval = 400, 55 [CONF_SG_HV3_MAX_OVERRIDE] = 0,
60 .load_ratio = 50, 56 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
61 .auto_ps_mode = 0, 57 [CONF_SG_BT_LOAD_RATIO] = 50,
62 .probe_req_compensation = 170, 58 [CONF_SG_AUTO_PS_MODE] = 0,
63 .scan_window_compensation = 50, 59 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
64 .antenna_config = 0, 60 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
65 .beacon_miss_threshold = 60, 61 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
66 .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS, 62 [CONF_SG_BEACON_MISS_PERCENT] = 60,
67 .rate_adaptation_snr = 0 63 [CONF_SG_RATE_ADAPT_THRESH] = 12,
64 [CONF_SG_RATE_ADAPT_SNR] = 0,
65 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
66 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
67 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
68 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
69 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
70 /* Note: with UPSD, this should be 4 */
71 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
72 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
73 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
74 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
75 /* Note: with UPDS, this should be 15 */
76 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
77 /* Note: with UPDS, this should be 50 */
78 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
79 /* Note: with UPDS, this should be 10 */
80 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
81 [CONF_SG_RXT] = 1200,
82 [CONF_SG_TXT] = 1000,
83 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
84 [CONF_SG_PS_POLL_TIMEOUT] = 10,
85 [CONF_SG_UPSD_TIMEOUT] = 10,
86 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
87 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
88 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
89 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
90 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
91 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
92 [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
93 [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
94 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
95 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
96 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
97 [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
98 [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
99 [CONF_SG_HV3_MAX_SERVED] = 6,
100 [CONF_SG_DHCP_TIME] = 5000,
101 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
102 },
103 .state = CONF_SG_PROTECTIVE,
68 }, 104 },
69 .rx = { 105 .rx = {
70 .rx_msdu_life_time = 512000, 106 .rx_msdu_life_time = 512000,
@@ -81,8 +117,7 @@ static struct conf_drv_settings default_conf = {
81 .tx = { 117 .tx = {
82 .tx_energy_detection = 0, 118 .tx_energy_detection = 0,
83 .rc_conf = { 119 .rc_conf = {
84 .enabled_rates = CONF_HW_BIT_RATE_1MBPS | 120 .enabled_rates = 0,
85 CONF_HW_BIT_RATE_2MBPS,
86 .short_retry_limit = 10, 121 .short_retry_limit = 10,
87 .long_retry_limit = 10, 122 .long_retry_limit = 10,
88 .aflags = 0 123 .aflags = 0
@@ -179,11 +214,13 @@ static struct conf_drv_settings default_conf = {
179 }, 214 },
180 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 215 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
181 .tx_compl_timeout = 700, 216 .tx_compl_timeout = 700,
182 .tx_compl_threshold = 4 217 .tx_compl_threshold = 4,
218 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
219 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
183 }, 220 },
184 .conn = { 221 .conn = {
185 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 222 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
186 .listen_interval = 0, 223 .listen_interval = 1,
187 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, 224 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
188 .bcn_filt_ie_count = 1, 225 .bcn_filt_ie_count = 1,
189 .bcn_filt_ie = { 226 .bcn_filt_ie = {
@@ -198,38 +235,11 @@ static struct conf_drv_settings default_conf = {
198 .broadcast_timeout = 20000, 235 .broadcast_timeout = 20000,
199 .rx_broadcast_in_ps = 1, 236 .rx_broadcast_in_ps = 1,
200 .ps_poll_threshold = 20, 237 .ps_poll_threshold = 20,
201 .sig_trigger_count = 2,
202 .sig_trigger = {
203 [0] = {
204 .threshold = -75,
205 .pacing = 500,
206 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
207 .type = CONF_TRIG_EVENT_TYPE_EDGE,
208 .direction = CONF_TRIG_EVENT_DIR_LOW,
209 .hysteresis = 2,
210 .index = 0,
211 .enable = 1
212 },
213 [1] = {
214 .threshold = -75,
215 .pacing = 500,
216 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
217 .type = CONF_TRIG_EVENT_TYPE_EDGE,
218 .direction = CONF_TRIG_EVENT_DIR_HIGH,
219 .hysteresis = 2,
220 .index = 1,
221 .enable = 1
222 }
223 },
224 .sig_weights = {
225 .rssi_bcn_avg_weight = 10,
226 .rssi_pkt_avg_weight = 10,
227 .snr_bcn_avg_weight = 10,
228 .snr_pkt_avg_weight = 10
229 },
230 .bet_enable = CONF_BET_MODE_ENABLE, 238 .bet_enable = CONF_BET_MODE_ENABLE,
231 .bet_max_consecutive = 10, 239 .bet_max_consecutive = 10,
232 .psm_entry_retries = 3 240 .psm_entry_retries = 3,
241 .keep_alive_interval = 55000,
242 .max_listen_interval = 20,
233 }, 243 },
234 .init = { 244 .init = {
235 .radioparam = { 245 .radioparam = {
@@ -243,9 +253,32 @@ static struct conf_drv_settings default_conf = {
243 .pm_config = { 253 .pm_config = {
244 .host_clk_settling_time = 5000, 254 .host_clk_settling_time = 5000,
245 .host_fast_wakeup_support = false 255 .host_fast_wakeup_support = false
256 },
257 .roam_trigger = {
258 /* FIXME: due to firmware bug, must use value 1 for now */
259 .trigger_pacing = 1,
260 .avg_weight_rssi_beacon = 20,
261 .avg_weight_rssi_data = 10,
262 .avg_weight_snr_beacon = 20,
263 .avg_weight_snr_data = 10
246 } 264 }
247}; 265};
248 266
267static void wl1271_device_release(struct device *dev)
268{
269
270}
271
272static struct platform_device wl1271_device = {
273 .name = "wl1271",
274 .id = -1,
275
276 /* device model insists to have a release function */
277 .dev = {
278 .release = wl1271_device_release,
279 },
280};
281
249static LIST_HEAD(wl_list); 282static LIST_HEAD(wl_list);
250 283
251static void wl1271_conf_init(struct wl1271 *wl) 284static void wl1271_conf_init(struct wl1271 *wl)
@@ -298,7 +331,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
298 goto out_free_memmap; 331 goto out_free_memmap;
299 332
300 /* Initialize connection monitoring thresholds */ 333 /* Initialize connection monitoring thresholds */
301 ret = wl1271_acx_conn_monit_params(wl); 334 ret = wl1271_acx_conn_monit_params(wl, false);
302 if (ret < 0) 335 if (ret < 0)
303 goto out_free_memmap; 336 goto out_free_memmap;
304 337
@@ -365,30 +398,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
365 return ret; 398 return ret;
366} 399}
367 400
368static void wl1271_disable_interrupts(struct wl1271 *wl)
369{
370 disable_irq(wl->irq);
371}
372
373static void wl1271_power_off(struct wl1271 *wl)
374{
375 wl->set_power(false);
376 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
377}
378
379static void wl1271_power_on(struct wl1271 *wl)
380{
381 wl->set_power(true);
382 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
383}
384
385static void wl1271_fw_status(struct wl1271 *wl, 401static void wl1271_fw_status(struct wl1271 *wl,
386 struct wl1271_fw_status *status) 402 struct wl1271_fw_status *status)
387{ 403{
404 struct timespec ts;
388 u32 total = 0; 405 u32 total = 0;
389 int i; 406 int i;
390 407
391 wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 408 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
392 409
393 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 410 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
394 "drv_rx_counter = %d, tx_results_counter = %d)", 411 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -413,14 +430,19 @@ static void wl1271_fw_status(struct wl1271 *wl,
413 ieee80211_queue_work(wl->hw, &wl->tx_work); 430 ieee80211_queue_work(wl->hw, &wl->tx_work);
414 431
415 /* update the host-chipset time offset */ 432 /* update the host-chipset time offset */
416 wl->time_offset = jiffies_to_usecs(jiffies) - 433 getnstimeofday(&ts);
417 le32_to_cpu(status->fw_localtime); 434 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
435 (s64)le32_to_cpu(status->fw_localtime);
418} 436}
419 437
438#define WL1271_IRQ_MAX_LOOPS 10
439
420static void wl1271_irq_work(struct work_struct *work) 440static void wl1271_irq_work(struct work_struct *work)
421{ 441{
422 int ret; 442 int ret;
423 u32 intr; 443 u32 intr;
444 int loopcount = WL1271_IRQ_MAX_LOOPS;
445 unsigned long flags;
424 struct wl1271 *wl = 446 struct wl1271 *wl =
425 container_of(work, struct wl1271, irq_work); 447 container_of(work, struct wl1271, irq_work);
426 448
@@ -428,91 +450,78 @@ static void wl1271_irq_work(struct work_struct *work)
428 450
429 wl1271_debug(DEBUG_IRQ, "IRQ work"); 451 wl1271_debug(DEBUG_IRQ, "IRQ work");
430 452
431 if (wl->state == WL1271_STATE_OFF) 453 if (unlikely(wl->state == WL1271_STATE_OFF))
432 goto out; 454 goto out;
433 455
434 ret = wl1271_ps_elp_wakeup(wl, true); 456 ret = wl1271_ps_elp_wakeup(wl, true);
435 if (ret < 0) 457 if (ret < 0)
436 goto out; 458 goto out;
437 459
438 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 460 spin_lock_irqsave(&wl->wl_lock, flags);
439 461 while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
440 wl1271_fw_status(wl, wl->fw_status); 462 clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
441 intr = le32_to_cpu(wl->fw_status->intr); 463 spin_unlock_irqrestore(&wl->wl_lock, flags);
442 if (!intr) { 464 loopcount--;
443 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 465
444 goto out_sleep; 466 wl1271_fw_status(wl, wl->fw_status);
445 } 467 intr = le32_to_cpu(wl->fw_status->intr);
468 if (!intr) {
469 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
470 spin_lock_irqsave(&wl->wl_lock, flags);
471 continue;
472 }
446 473
447 intr &= WL1271_INTR_MASK; 474 intr &= WL1271_INTR_MASK;
448 475
449 if (intr & WL1271_ACX_INTR_EVENT_A) { 476 if (intr & WL1271_ACX_INTR_DATA) {
450 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 477 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
451 wl1271_event_handle(wl, 0);
452 }
453 478
454 if (intr & WL1271_ACX_INTR_EVENT_B) { 479 /* check for tx results */
455 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 480 if (wl->fw_status->tx_results_counter !=
456 wl1271_event_handle(wl, 1); 481 (wl->tx_results_count & 0xff))
457 } 482 wl1271_tx_complete(wl);
458 483
459 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 484 wl1271_rx(wl, wl->fw_status);
460 wl1271_debug(DEBUG_IRQ, 485 }
461 "WL1271_ACX_INTR_INIT_COMPLETE");
462 486
463 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 487 if (intr & WL1271_ACX_INTR_EVENT_A) {
464 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 488 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
489 wl1271_event_handle(wl, 0);
490 }
465 491
466 if (intr & WL1271_ACX_INTR_DATA) { 492 if (intr & WL1271_ACX_INTR_EVENT_B) {
467 u8 tx_res_cnt = wl->fw_status->tx_results_counter - 493 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
468 wl->tx_results_count; 494 wl1271_event_handle(wl, 1);
495 }
469 496
470 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 497 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
498 wl1271_debug(DEBUG_IRQ,
499 "WL1271_ACX_INTR_INIT_COMPLETE");
471 500
472 /* check for tx results */ 501 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
473 if (tx_res_cnt) 502 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
474 wl1271_tx_complete(wl, tx_res_cnt);
475 503
476 wl1271_rx(wl, wl->fw_status); 504 spin_lock_irqsave(&wl->wl_lock, flags);
477 } 505 }
478 506
479out_sleep: 507 if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
480 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, 508 ieee80211_queue_work(wl->hw, &wl->irq_work);
481 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 509 else
510 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
511 spin_unlock_irqrestore(&wl->wl_lock, flags);
512
482 wl1271_ps_elp_sleep(wl); 513 wl1271_ps_elp_sleep(wl);
483 514
484out: 515out:
485 mutex_unlock(&wl->mutex); 516 mutex_unlock(&wl->mutex);
486} 517}
487 518
488static irqreturn_t wl1271_irq(int irq, void *cookie)
489{
490 struct wl1271 *wl;
491 unsigned long flags;
492
493 wl1271_debug(DEBUG_IRQ, "IRQ");
494
495 wl = cookie;
496
497 /* complete the ELP completion */
498 spin_lock_irqsave(&wl->wl_lock, flags);
499 if (wl->elp_compl) {
500 complete(wl->elp_compl);
501 wl->elp_compl = NULL;
502 }
503
504 ieee80211_queue_work(wl->hw, &wl->irq_work);
505 spin_unlock_irqrestore(&wl->wl_lock, flags);
506
507 return IRQ_HANDLED;
508}
509
510static int wl1271_fetch_firmware(struct wl1271 *wl) 519static int wl1271_fetch_firmware(struct wl1271 *wl)
511{ 520{
512 const struct firmware *fw; 521 const struct firmware *fw;
513 int ret; 522 int ret;
514 523
515 ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev); 524 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
516 525
517 if (ret < 0) { 526 if (ret < 0) {
518 wl1271_error("could not get firmware: %d", ret); 527 wl1271_error("could not get firmware: %d", ret);
@@ -545,46 +554,12 @@ out:
545 return ret; 554 return ret;
546} 555}
547 556
548static int wl1271_update_mac_addr(struct wl1271 *wl)
549{
550 int ret = 0;
551 u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
552
553 /* get mac address from the NVS */
554 wl->mac_addr[0] = nvs_ptr[11];
555 wl->mac_addr[1] = nvs_ptr[10];
556 wl->mac_addr[2] = nvs_ptr[6];
557 wl->mac_addr[3] = nvs_ptr[5];
558 wl->mac_addr[4] = nvs_ptr[4];
559 wl->mac_addr[5] = nvs_ptr[3];
560
561 /* FIXME: if it is a zero-address, we should bail out. Now, instead,
562 we randomize an address */
563 if (is_zero_ether_addr(wl->mac_addr)) {
564 static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
565 memcpy(wl->mac_addr, nokia_oui, 3);
566 get_random_bytes(wl->mac_addr + 3, 3);
567
568 /* update this address to the NVS */
569 nvs_ptr[11] = wl->mac_addr[0];
570 nvs_ptr[10] = wl->mac_addr[1];
571 nvs_ptr[6] = wl->mac_addr[2];
572 nvs_ptr[5] = wl->mac_addr[3];
573 nvs_ptr[4] = wl->mac_addr[4];
574 nvs_ptr[3] = wl->mac_addr[5];
575 }
576
577 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
578
579 return ret;
580}
581
582static int wl1271_fetch_nvs(struct wl1271 *wl) 557static int wl1271_fetch_nvs(struct wl1271 *wl)
583{ 558{
584 const struct firmware *fw; 559 const struct firmware *fw;
585 int ret; 560 int ret;
586 561
587 ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev); 562 ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
588 563
589 if (ret < 0) { 564 if (ret < 0) {
590 wl1271_error("could not get nvs file: %d", ret); 565 wl1271_error("could not get nvs file: %d", ret);
@@ -608,8 +583,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
608 583
609 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file)); 584 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
610 585
611 ret = wl1271_update_mac_addr(wl);
612
613out: 586out:
614 release_firmware(fw); 587 release_firmware(fw);
615 588
@@ -826,15 +799,13 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
826 * The workqueue is slow to process the tx_queue and we need stop 799 * The workqueue is slow to process the tx_queue and we need stop
827 * the queue here, otherwise the queue will get too long. 800 * the queue here, otherwise the queue will get too long.
828 */ 801 */
829 if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) { 802 if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
830 ieee80211_stop_queues(wl->hw); 803 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
831 804
832 /* 805 spin_lock_irqsave(&wl->wl_lock, flags);
833 * FIXME: this is racy, the variable is not properly 806 ieee80211_stop_queues(wl->hw);
834 * protected. Maybe fix this by removing the stupid
835 * variable altogether and checking the real queue state?
836 */
837 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); 807 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
808 spin_unlock_irqrestore(&wl->wl_lock, flags);
838 } 809 }
839 810
840 return NETDEV_TX_OK; 811 return NETDEV_TX_OK;
@@ -882,7 +853,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
882 if (wl == wl_temp) 853 if (wl == wl_temp)
883 break; 854 break;
884 } 855 }
885 if (wl == NULL) 856 if (wl != wl_temp)
886 return NOTIFY_DONE; 857 return NOTIFY_DONE;
887 858
888 /* Get the interface IP address for the device. "ifa" will become 859 /* Get the interface IP address for the device. "ifa" will become
@@ -929,13 +900,60 @@ static struct notifier_block wl1271_dev_notifier = {
929 900
930static int wl1271_op_start(struct ieee80211_hw *hw) 901static int wl1271_op_start(struct ieee80211_hw *hw)
931{ 902{
903 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
904
905 /*
906 * We have to delay the booting of the hardware because
907 * we need to know the local MAC address before downloading and
908 * initializing the firmware. The MAC address cannot be changed
909 * after boot, and without the proper MAC address, the firmware
910 * will not function properly.
911 *
912 * The MAC address is first known when the corresponding interface
913 * is added. That is where we will initialize the hardware.
914 */
915
916 return 0;
917}
918
919static void wl1271_op_stop(struct ieee80211_hw *hw)
920{
921 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
922}
923
924static int wl1271_op_add_interface(struct ieee80211_hw *hw,
925 struct ieee80211_vif *vif)
926{
932 struct wl1271 *wl = hw->priv; 927 struct wl1271 *wl = hw->priv;
933 int retries = WL1271_BOOT_RETRIES; 928 int retries = WL1271_BOOT_RETRIES;
934 int ret = 0; 929 int ret = 0;
935 930
936 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 931 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
932 vif->type, vif->addr);
937 933
938 mutex_lock(&wl->mutex); 934 mutex_lock(&wl->mutex);
935 if (wl->vif) {
936 ret = -EBUSY;
937 goto out;
938 }
939
940 wl->vif = vif;
941
942 switch (vif->type) {
943 case NL80211_IFTYPE_STATION:
944 wl->bss_type = BSS_TYPE_STA_BSS;
945 wl->set_bss_type = BSS_TYPE_STA_BSS;
946 break;
947 case NL80211_IFTYPE_ADHOC:
948 wl->bss_type = BSS_TYPE_IBSS;
949 wl->set_bss_type = BSS_TYPE_STA_BSS;
950 break;
951 default:
952 ret = -EOPNOTSUPP;
953 goto out;
954 }
955
956 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
939 957
940 if (wl->state != WL1271_STATE_OFF) { 958 if (wl->state != WL1271_STATE_OFF) {
941 wl1271_error("cannot start because not in off state: %d", 959 wl1271_error("cannot start because not in off state: %d",
@@ -991,19 +1009,20 @@ out:
991 return ret; 1009 return ret;
992} 1010}
993 1011
994static void wl1271_op_stop(struct ieee80211_hw *hw) 1012static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1013 struct ieee80211_vif *vif)
995{ 1014{
996 struct wl1271 *wl = hw->priv; 1015 struct wl1271 *wl = hw->priv;
997 int i; 1016 int i;
998 1017
999 wl1271_info("down");
1000
1001 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1002
1003 unregister_inetaddr_notifier(&wl1271_dev_notifier); 1018 unregister_inetaddr_notifier(&wl1271_dev_notifier);
1004 list_del(&wl->list);
1005 1019
1006 mutex_lock(&wl->mutex); 1020 mutex_lock(&wl->mutex);
1021 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1022
1023 wl1271_info("down");
1024
1025 list_del(&wl->list);
1007 1026
1008 WARN_ON(wl->state != WL1271_STATE_ON); 1027 WARN_ON(wl->state != WL1271_STATE_ON);
1009 1028
@@ -1032,6 +1051,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1032 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); 1051 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
1033 wl->ssid_len = 0; 1052 wl->ssid_len = 0;
1034 wl->bss_type = MAX_BSS_TYPE; 1053 wl->bss_type = MAX_BSS_TYPE;
1054 wl->set_bss_type = MAX_BSS_TYPE;
1035 wl->band = IEEE80211_BAND_2GHZ; 1055 wl->band = IEEE80211_BAND_2GHZ;
1036 1056
1037 wl->rx_counter = 0; 1057 wl->rx_counter = 0;
@@ -1041,163 +1061,142 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1041 wl->tx_results_count = 0; 1061 wl->tx_results_count = 0;
1042 wl->tx_packets_count = 0; 1062 wl->tx_packets_count = 0;
1043 wl->tx_security_last_seq = 0; 1063 wl->tx_security_last_seq = 0;
1044 wl->tx_security_seq_16 = 0; 1064 wl->tx_security_seq = 0;
1045 wl->tx_security_seq_32 = 0;
1046 wl->time_offset = 0; 1065 wl->time_offset = 0;
1047 wl->session_counter = 0; 1066 wl->session_counter = 0;
1048 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1067 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1049 wl->sta_rate_set = 0; 1068 wl->sta_rate_set = 0;
1050 wl->flags = 0; 1069 wl->flags = 0;
1070 wl->vif = NULL;
1071 wl->filters = 0;
1051 1072
1052 for (i = 0; i < NUM_TX_QUEUES; i++) 1073 for (i = 0; i < NUM_TX_QUEUES; i++)
1053 wl->tx_blocks_freed[i] = 0; 1074 wl->tx_blocks_freed[i] = 0;
1054 1075
1055 wl1271_debugfs_reset(wl); 1076 wl1271_debugfs_reset(wl);
1077
1078 kfree(wl->fw_status);
1079 wl->fw_status = NULL;
1080 kfree(wl->tx_res_if);
1081 wl->tx_res_if = NULL;
1082 kfree(wl->target_mem_map);
1083 wl->target_mem_map = NULL;
1084
1056 mutex_unlock(&wl->mutex); 1085 mutex_unlock(&wl->mutex);
1057} 1086}
1058 1087
1059static int wl1271_op_add_interface(struct ieee80211_hw *hw, 1088static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1060 struct ieee80211_vif *vif)
1061{ 1089{
1062 struct wl1271 *wl = hw->priv; 1090 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1063 int ret = 0; 1091 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1064 1092
1065 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 1093 /* combine requested filters with current filter config */
1066 vif->type, vif->addr); 1094 filters = wl->filters | filters;
1067 1095
1068 mutex_lock(&wl->mutex); 1096 wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
1069 if (wl->vif) { 1097
1070 ret = -EBUSY; 1098 if (filters & FIF_PROMISC_IN_BSS) {
1071 goto out; 1099 wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
1100 wl->rx_config &= ~CFG_UNI_FILTER_EN;
1101 wl->rx_config |= CFG_BSSID_FILTER_EN;
1102 }
1103 if (filters & FIF_BCN_PRBRESP_PROMISC) {
1104 wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
1105 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1106 wl->rx_config &= ~CFG_SSID_FILTER_EN;
1072 } 1107 }
1108 if (filters & FIF_OTHER_BSS) {
1109 wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
1110 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1111 }
1112 if (filters & FIF_CONTROL) {
1113 wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
1114 wl->rx_filter |= CFG_RX_CTL_EN;
1115 }
1116 if (filters & FIF_FCSFAIL) {
1117 wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
1118 wl->rx_filter |= CFG_RX_FCS_ERROR;
1119 }
1120}
1073 1121
1074 wl->vif = vif; 1122static int wl1271_dummy_join(struct wl1271 *wl)
1123{
1124 int ret = 0;
1125 /* we need to use a dummy BSSID for now */
1126 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1127 0xad, 0xbe, 0xef };
1075 1128
1076 switch (vif->type) { 1129 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1077 case NL80211_IFTYPE_STATION: 1130
1078 wl->bss_type = BSS_TYPE_STA_BSS; 1131 /* pass through frames from all BSS */
1079 break; 1132 wl1271_configure_filters(wl, FIF_OTHER_BSS);
1080 case NL80211_IFTYPE_ADHOC: 1133
1081 wl->bss_type = BSS_TYPE_IBSS; 1134 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1082 break; 1135 if (ret < 0)
1083 default:
1084 ret = -EOPNOTSUPP;
1085 goto out; 1136 goto out;
1086 }
1087 1137
1088 /* FIXME: what if conf->mac_addr changes? */ 1138 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1089 1139
1090out: 1140out:
1091 mutex_unlock(&wl->mutex);
1092 return ret; 1141 return ret;
1093} 1142}
1094 1143
1095static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1144static int wl1271_join(struct wl1271 *wl, bool set_assoc)
1096 struct ieee80211_vif *vif)
1097{
1098 struct wl1271 *wl = hw->priv;
1099
1100 mutex_lock(&wl->mutex);
1101 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1102 wl->vif = NULL;
1103 mutex_unlock(&wl->mutex);
1104}
1105
1106#if 0
1107static int wl1271_op_config_interface(struct ieee80211_hw *hw,
1108 struct ieee80211_vif *vif,
1109 struct ieee80211_if_conf *conf)
1110{ 1145{
1111 struct wl1271 *wl = hw->priv;
1112 struct sk_buff *beacon;
1113 int ret; 1146 int ret;
1114 1147
1115 wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM", 1148 /*
1116 conf->bssid); 1149 * One of the side effects of the JOIN command is that is clears
1117 wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid, 1150 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
1118 conf->ssid_len); 1151 * to a WPA/WPA2 access point will therefore kill the data-path.
1152 * Currently there is no supported scenario for JOIN during
1153 * association - if it becomes a supported scenario, the WPA/WPA2 keys
1154 * must be handled somehow.
1155 *
1156 */
1157 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1158 wl1271_info("JOIN while associated.");
1119 1159
1120 mutex_lock(&wl->mutex); 1160 if (set_assoc)
1161 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1121 1162
1122 ret = wl1271_ps_elp_wakeup(wl, false); 1163 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1123 if (ret < 0) 1164 if (ret < 0)
1124 goto out; 1165 goto out;
1125 1166
1126 if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) { 1167 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1127 wl1271_debug(DEBUG_MAC80211, "bssid changed");
1128
1129 memcpy(wl->bssid, conf->bssid, ETH_ALEN);
1130
1131 ret = wl1271_cmd_join(wl);
1132 if (ret < 0)
1133 goto out_sleep;
1134
1135 ret = wl1271_cmd_build_null_data(wl);
1136 if (ret < 0)
1137 goto out_sleep;
1138 }
1139
1140 wl->ssid_len = conf->ssid_len;
1141 if (wl->ssid_len)
1142 memcpy(wl->ssid, conf->ssid, wl->ssid_len);
1143
1144 if (conf->changed & IEEE80211_IFCC_BEACON) {
1145 beacon = ieee80211_beacon_get(hw, vif);
1146 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1147 beacon->data, beacon->len);
1148
1149 if (ret < 0) {
1150 dev_kfree_skb(beacon);
1151 goto out_sleep;
1152 }
1153
1154 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE,
1155 beacon->data, beacon->len);
1156
1157 dev_kfree_skb(beacon);
1158
1159 if (ret < 0)
1160 goto out_sleep;
1161 }
1162
1163out_sleep:
1164 wl1271_ps_elp_sleep(wl);
1165
1166out:
1167 mutex_unlock(&wl->mutex);
1168
1169 return ret;
1170}
1171#endif
1172
1173static int wl1271_join_channel(struct wl1271 *wl, int channel)
1174{
1175 int ret = 0;
1176 /* we need to use a dummy BSSID for now */
1177 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1178 0xad, 0xbe, 0xef };
1179 1168
1180 /* the dummy join is not required for ad-hoc */ 1169 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1181 if (wl->bss_type == BSS_TYPE_IBSS)
1182 goto out; 1170 goto out;
1183 1171
1184 /* disable mac filter, so we hear everything */ 1172 /*
1185 wl->rx_config &= ~CFG_BSSID_FILTER_EN; 1173 * The join command disable the keep-alive mode, shut down its process,
1174 * and also clear the template config, so we need to reset it all after
1175 * the join. The acx_aid starts the keep-alive process, and the order
1176 * of the commands below is relevant.
1177 */
1178 ret = wl1271_acx_keep_alive_mode(wl, true);
1179 if (ret < 0)
1180 goto out;
1186 1181
1187 wl->channel = channel; 1182 ret = wl1271_acx_aid(wl, wl->aid);
1188 memcpy(wl->bssid, dummy_bssid, ETH_ALEN); 1183 if (ret < 0)
1184 goto out;
1189 1185
1190 ret = wl1271_cmd_join(wl); 1186 ret = wl1271_cmd_build_klv_null_data(wl);
1191 if (ret < 0) 1187 if (ret < 0)
1192 goto out; 1188 goto out;
1193 1189
1194 set_bit(WL1271_FLAG_JOINED, &wl->flags); 1190 ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1191 ACX_KEEP_ALIVE_TPL_VALID);
1192 if (ret < 0)
1193 goto out;
1195 1194
1196out: 1195out:
1197 return ret; 1196 return ret;
1198} 1197}
1199 1198
1200static int wl1271_unjoin_channel(struct wl1271 *wl) 1199static int wl1271_unjoin(struct wl1271 *wl)
1201{ 1200{
1202 int ret; 1201 int ret;
1203 1202
@@ -1207,14 +1206,41 @@ static int wl1271_unjoin_channel(struct wl1271 *wl)
1207 goto out; 1206 goto out;
1208 1207
1209 clear_bit(WL1271_FLAG_JOINED, &wl->flags); 1208 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1210 wl->channel = 0;
1211 memset(wl->bssid, 0, ETH_ALEN); 1209 memset(wl->bssid, 0, ETH_ALEN);
1212 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1210
1211 /* stop filterting packets based on bssid */
1212 wl1271_configure_filters(wl, FIF_OTHER_BSS);
1213 1213
1214out: 1214out:
1215 return ret; 1215 return ret;
1216} 1216}
1217 1217
1218static void wl1271_set_band_rate(struct wl1271 *wl)
1219{
1220 if (wl->band == IEEE80211_BAND_2GHZ)
1221 wl->basic_rate_set = wl->conf.tx.basic_rate;
1222 else
1223 wl->basic_rate_set = wl->conf.tx.basic_rate_5;
1224}
1225
1226static u32 wl1271_min_rate_get(struct wl1271 *wl)
1227{
1228 int i;
1229 u32 rate = 0;
1230
1231 if (!wl->basic_rate_set) {
1232 WARN_ON(1);
1233 wl->basic_rate_set = wl->conf.tx.basic_rate;
1234 }
1235
1236 for (i = 0; !rate; i++) {
1237 if ((wl->basic_rate_set >> i) & 0x1)
1238 rate = 1 << i;
1239 }
1240
1241 return rate;
1242}
1243
1218static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1244static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1219{ 1245{
1220 struct wl1271 *wl = hw->priv; 1246 struct wl1271 *wl = hw->priv;
@@ -1231,38 +1257,62 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1231 1257
1232 mutex_lock(&wl->mutex); 1258 mutex_lock(&wl->mutex);
1233 1259
1234 wl->band = conf->channel->band; 1260 if (unlikely(wl->state == WL1271_STATE_OFF))
1261 goto out;
1235 1262
1236 ret = wl1271_ps_elp_wakeup(wl, false); 1263 ret = wl1271_ps_elp_wakeup(wl, false);
1237 if (ret < 0) 1264 if (ret < 0)
1238 goto out; 1265 goto out;
1239 1266
1267 /* if the channel changes while joined, join again */
1268 if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
1269 ((wl->band != conf->channel->band) ||
1270 (wl->channel != channel))) {
1271 wl->band = conf->channel->band;
1272 wl->channel = channel;
1273
1274 /*
1275 * FIXME: the mac80211 should really provide a fixed rate
1276 * to use here. for now, just use the smallest possible rate
1277 * for the band as a fixed rate for association frames and
1278 * other control messages.
1279 */
1280 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1281 wl1271_set_band_rate(wl);
1282
1283 wl->basic_rate = wl1271_min_rate_get(wl);
1284 ret = wl1271_acx_rate_policies(wl);
1285 if (ret < 0)
1286 wl1271_warning("rate policy for update channel "
1287 "failed %d", ret);
1288
1289 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1290 ret = wl1271_join(wl, false);
1291 if (ret < 0)
1292 wl1271_warning("cmd join to update channel "
1293 "failed %d", ret);
1294 }
1295 }
1296
1240 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1297 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1241 if (conf->flags & IEEE80211_CONF_IDLE && 1298 if (conf->flags & IEEE80211_CONF_IDLE &&
1242 test_bit(WL1271_FLAG_JOINED, &wl->flags)) 1299 test_bit(WL1271_FLAG_JOINED, &wl->flags))
1243 wl1271_unjoin_channel(wl); 1300 wl1271_unjoin(wl);
1244 else if (!(conf->flags & IEEE80211_CONF_IDLE)) 1301 else if (!(conf->flags & IEEE80211_CONF_IDLE))
1245 wl1271_join_channel(wl, channel); 1302 wl1271_dummy_join(wl);
1246 1303
1247 if (conf->flags & IEEE80211_CONF_IDLE) { 1304 if (conf->flags & IEEE80211_CONF_IDLE) {
1248 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1305 wl->rate_set = wl1271_min_rate_get(wl);
1249 wl->sta_rate_set = 0; 1306 wl->sta_rate_set = 0;
1250 wl1271_acx_rate_policies(wl); 1307 wl1271_acx_rate_policies(wl);
1251 } 1308 wl1271_acx_keep_alive_config(
1309 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1310 ACX_KEEP_ALIVE_TPL_INVALID);
1311 set_bit(WL1271_FLAG_IDLE, &wl->flags);
1312 } else
1313 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
1252 } 1314 }
1253 1315
1254 /* if the channel changes while joined, join again */
1255 if (channel != wl->channel &&
1256 test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1257 wl->channel = channel;
1258 /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
1259 ret = wl1271_cmd_join(wl);
1260 if (ret < 0)
1261 wl1271_warning("cmd join to update channel failed %d",
1262 ret);
1263 } else
1264 wl->channel = channel;
1265
1266 if (conf->flags & IEEE80211_CONF_PS && 1316 if (conf->flags & IEEE80211_CONF_PS &&
1267 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1317 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1268 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 1318 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1273,13 +1323,13 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1273 * through the bss_info_changed() hook. 1323 * through the bss_info_changed() hook.
1274 */ 1324 */
1275 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 1325 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1276 wl1271_info("psm enabled"); 1326 wl1271_debug(DEBUG_PSM, "psm enabled");
1277 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 1327 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
1278 true); 1328 true);
1279 } 1329 }
1280 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1330 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1281 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1331 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1282 wl1271_info("psm disabled"); 1332 wl1271_debug(DEBUG_PSM, "psm disabled");
1283 1333
1284 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 1334 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1285 1335
@@ -1311,11 +1361,15 @@ struct wl1271_filter_params {
1311 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; 1361 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
1312}; 1362};
1313 1363
1314static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 1364static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
1315 struct dev_addr_list *mc_list) 1365 struct netdev_hw_addr_list *mc_list)
1316{ 1366{
1317 struct wl1271_filter_params *fp; 1367 struct wl1271_filter_params *fp;
1318 int i; 1368 struct netdev_hw_addr *ha;
1369 struct wl1271 *wl = hw->priv;
1370
1371 if (unlikely(wl->state == WL1271_STATE_OFF))
1372 return 0;
1319 1373
1320 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 1374 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
1321 if (!fp) { 1375 if (!fp) {
@@ -1324,21 +1378,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
1324 } 1378 }
1325 1379
1326 /* update multicast filtering parameters */ 1380 /* update multicast filtering parameters */
1327 fp->enabled = true;
1328 if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
1329 mc_count = 0;
1330 fp->enabled = false;
1331 }
1332
1333 fp->mc_list_length = 0; 1381 fp->mc_list_length = 0;
1334 for (i = 0; i < mc_count; i++) { 1382 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
1335 if (mc_list->da_addrlen == ETH_ALEN) { 1383 fp->enabled = false;
1384 } else {
1385 fp->enabled = true;
1386 netdev_hw_addr_list_for_each(ha, mc_list) {
1336 memcpy(fp->mc_list[fp->mc_list_length], 1387 memcpy(fp->mc_list[fp->mc_list_length],
1337 mc_list->da_addr, ETH_ALEN); 1388 ha->addr, ETH_ALEN);
1338 fp->mc_list_length++; 1389 fp->mc_list_length++;
1339 } else 1390 }
1340 wl1271_warning("Unknown mc address length.");
1341 mc_list = mc_list->next;
1342 } 1391 }
1343 1392
1344 return (u64)(unsigned long)fp; 1393 return (u64)(unsigned long)fp;
@@ -1363,15 +1412,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1363 1412
1364 mutex_lock(&wl->mutex); 1413 mutex_lock(&wl->mutex);
1365 1414
1366 if (wl->state == WL1271_STATE_OFF) 1415 *total &= WL1271_SUPPORTED_FILTERS;
1416 changed &= WL1271_SUPPORTED_FILTERS;
1417
1418 if (unlikely(wl->state == WL1271_STATE_OFF))
1367 goto out; 1419 goto out;
1368 1420
1369 ret = wl1271_ps_elp_wakeup(wl, false); 1421 ret = wl1271_ps_elp_wakeup(wl, false);
1370 if (ret < 0) 1422 if (ret < 0)
1371 goto out; 1423 goto out;
1372 1424
1373 *total &= WL1271_SUPPORTED_FILTERS;
1374 changed &= WL1271_SUPPORTED_FILTERS;
1375 1425
1376 if (*total & FIF_ALLMULTI) 1426 if (*total & FIF_ALLMULTI)
1377 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); 1427 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
@@ -1382,14 +1432,14 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1382 if (ret < 0) 1432 if (ret < 0)
1383 goto out_sleep; 1433 goto out_sleep;
1384 1434
1385 kfree(fp);
1386
1387 /* FIXME: We still need to set our filters properly */
1388
1389 /* determine, whether supported filter values have changed */ 1435 /* determine, whether supported filter values have changed */
1390 if (changed == 0) 1436 if (changed == 0)
1391 goto out_sleep; 1437 goto out_sleep;
1392 1438
1439 /* configure filters */
1440 wl->filters = *total;
1441 wl1271_configure_filters(wl, 0);
1442
1393 /* apply configured filters */ 1443 /* apply configured filters */
1394 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter); 1444 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
1395 if (ret < 0) 1445 if (ret < 0)
@@ -1400,6 +1450,7 @@ out_sleep:
1400 1450
1401out: 1451out:
1402 mutex_unlock(&wl->mutex); 1452 mutex_unlock(&wl->mutex);
1453 kfree(fp);
1403} 1454}
1404 1455
1405static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1456static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1450,15 +1501,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1450 key_type = KEY_TKIP; 1501 key_type = KEY_TKIP;
1451 1502
1452 key_conf->hw_key_idx = key_conf->keyidx; 1503 key_conf->hw_key_idx = key_conf->keyidx;
1453 tx_seq_32 = wl->tx_security_seq_32; 1504 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1454 tx_seq_16 = wl->tx_security_seq_16; 1505 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1455 break; 1506 break;
1456 case ALG_CCMP: 1507 case ALG_CCMP:
1457 key_type = KEY_AES; 1508 key_type = KEY_AES;
1458 1509
1459 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1510 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1460 tx_seq_32 = wl->tx_security_seq_32; 1511 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1461 tx_seq_16 = wl->tx_security_seq_16; 1512 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1462 break; 1513 break;
1463 default: 1514 default:
1464 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1515 wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -1508,8 +1559,6 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1508 default: 1559 default:
1509 wl1271_error("Unsupported key cmd 0x%x", cmd); 1560 wl1271_error("Unsupported key cmd 0x%x", cmd);
1510 ret = -EOPNOTSUPP; 1561 ret = -EOPNOTSUPP;
1511 goto out_sleep;
1512
1513 break; 1562 break;
1514 } 1563 }
1515 1564
@@ -1524,6 +1573,7 @@ out:
1524} 1573}
1525 1574
1526static int wl1271_op_hw_scan(struct ieee80211_hw *hw, 1575static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1576 struct ieee80211_vif *vif,
1527 struct cfg80211_scan_request *req) 1577 struct cfg80211_scan_request *req)
1528{ 1578{
1529 struct wl1271 *wl = hw->priv; 1579 struct wl1271 *wl = hw->priv;
@@ -1545,10 +1595,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1545 goto out; 1595 goto out;
1546 1596
1547 if (wl1271_11a_enabled()) 1597 if (wl1271_11a_enabled())
1548 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, 1598 ret = wl1271_cmd_scan(hw->priv, ssid, len,
1599 req->ie, req->ie_len, 1, 0,
1549 WL1271_SCAN_BAND_DUAL, 3); 1600 WL1271_SCAN_BAND_DUAL, 3);
1550 else 1601 else
1551 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, 1602 ret = wl1271_cmd_scan(hw->priv, ssid, len,
1603 req->ie, req->ie_len, 1, 0,
1552 WL1271_SCAN_BAND_2_4_GHZ, 3); 1604 WL1271_SCAN_BAND_2_4_GHZ, 3);
1553 1605
1554 wl1271_ps_elp_sleep(wl); 1606 wl1271_ps_elp_sleep(wl);
@@ -1562,10 +1614,13 @@ out:
1562static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 1614static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1563{ 1615{
1564 struct wl1271 *wl = hw->priv; 1616 struct wl1271 *wl = hw->priv;
1565 int ret; 1617 int ret = 0;
1566 1618
1567 mutex_lock(&wl->mutex); 1619 mutex_lock(&wl->mutex);
1568 1620
1621 if (unlikely(wl->state == WL1271_STATE_OFF))
1622 goto out;
1623
1569 ret = wl1271_ps_elp_wakeup(wl, false); 1624 ret = wl1271_ps_elp_wakeup(wl, false);
1570 if (ret < 0) 1625 if (ret < 0)
1571 goto out; 1626 goto out;
@@ -1607,6 +1662,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1607 enum wl1271_cmd_ps_mode mode; 1662 enum wl1271_cmd_ps_mode mode;
1608 struct wl1271 *wl = hw->priv; 1663 struct wl1271 *wl = hw->priv;
1609 bool do_join = false; 1664 bool do_join = false;
1665 bool set_assoc = false;
1610 int ret; 1666 int ret;
1611 1667
1612 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 1668 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1617,20 +1673,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1617 if (ret < 0) 1673 if (ret < 0)
1618 goto out; 1674 goto out;
1619 1675
1620 if (wl->bss_type == BSS_TYPE_IBSS) { 1676 if ((changed && BSS_CHANGED_BEACON_INT) &&
1621 /* FIXME: This implements rudimentary ad-hoc support - 1677 (wl->bss_type == BSS_TYPE_IBSS)) {
1622 proper templates are on the wish list and notification 1678 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
1623 on when they change. This patch will update the templates 1679 bss_conf->beacon_int);
1624 on every call to this function. */ 1680
1681 wl->beacon_int = bss_conf->beacon_int;
1682 do_join = true;
1683 }
1684
1685 if ((changed && BSS_CHANGED_BEACON) &&
1686 (wl->bss_type == BSS_TYPE_IBSS)) {
1625 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1687 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1626 1688
1689 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
1690
1627 if (beacon) { 1691 if (beacon) {
1628 struct ieee80211_hdr *hdr; 1692 struct ieee80211_hdr *hdr;
1629 1693
1630 wl1271_ssid_set(wl, beacon); 1694 wl1271_ssid_set(wl, beacon);
1631 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1695 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1632 beacon->data, 1696 beacon->data,
1633 beacon->len); 1697 beacon->len, 0,
1698 wl1271_min_rate_get(wl));
1634 1699
1635 if (ret < 0) { 1700 if (ret < 0) {
1636 dev_kfree_skb(beacon); 1701 dev_kfree_skb(beacon);
@@ -1645,7 +1710,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1645 ret = wl1271_cmd_template_set(wl, 1710 ret = wl1271_cmd_template_set(wl,
1646 CMD_TEMPL_PROBE_RESPONSE, 1711 CMD_TEMPL_PROBE_RESPONSE,
1647 beacon->data, 1712 beacon->data,
1648 beacon->len); 1713 beacon->len, 0,
1714 wl1271_min_rate_get(wl));
1649 dev_kfree_skb(beacon); 1715 dev_kfree_skb(beacon);
1650 if (ret < 0) 1716 if (ret < 0)
1651 goto out_sleep; 1717 goto out_sleep;
@@ -1655,20 +1721,48 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1655 } 1721 }
1656 } 1722 }
1657 1723
1724 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1725 (wl->bss_type == BSS_TYPE_IBSS)) {
1726 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
1727 bss_conf->enable_beacon ? "enabled" : "disabled");
1728
1729 if (bss_conf->enable_beacon)
1730 wl->set_bss_type = BSS_TYPE_IBSS;
1731 else
1732 wl->set_bss_type = BSS_TYPE_STA_BSS;
1733 do_join = true;
1734 }
1735
1736 if (changed & BSS_CHANGED_CQM) {
1737 bool enable = false;
1738 if (bss_conf->cqm_rssi_thold)
1739 enable = true;
1740 ret = wl1271_acx_rssi_snr_trigger(wl, enable,
1741 bss_conf->cqm_rssi_thold,
1742 bss_conf->cqm_rssi_hyst);
1743 if (ret < 0)
1744 goto out;
1745 wl->rssi_thold = bss_conf->cqm_rssi_thold;
1746 }
1747
1658 if ((changed & BSS_CHANGED_BSSID) && 1748 if ((changed & BSS_CHANGED_BSSID) &&
1659 /* 1749 /*
1660 * Now we know the correct bssid, so we send a new join command 1750 * Now we know the correct bssid, so we send a new join command
1661 * and enable the BSSID filter 1751 * and enable the BSSID filter
1662 */ 1752 */
1663 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { 1753 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1664 wl->rx_config |= CFG_BSSID_FILTER_EN;
1665 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 1754 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1755
1666 ret = wl1271_cmd_build_null_data(wl); 1756 ret = wl1271_cmd_build_null_data(wl);
1667 if (ret < 0) { 1757 if (ret < 0)
1668 wl1271_warning("cmd buld null data failed %d", 1758 goto out_sleep;
1669 ret); 1759
1760 ret = wl1271_build_qos_null_data(wl);
1761 if (ret < 0)
1670 goto out_sleep; 1762 goto out_sleep;
1671 } 1763
1764 /* filter out all packets not from this BSSID */
1765 wl1271_configure_filters(wl, 0);
1672 1766
1673 /* Need to update the BSSID (for filtering etc) */ 1767 /* Need to update the BSSID (for filtering etc) */
1674 do_join = true; 1768 do_join = true;
@@ -1676,8 +1770,21 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1676 1770
1677 if (changed & BSS_CHANGED_ASSOC) { 1771 if (changed & BSS_CHANGED_ASSOC) {
1678 if (bss_conf->assoc) { 1772 if (bss_conf->assoc) {
1773 u32 rates;
1679 wl->aid = bss_conf->aid; 1774 wl->aid = bss_conf->aid;
1680 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1775 set_assoc = true;
1776
1777 /*
1778 * use basic rates from AP, and determine lowest rate
1779 * to use with control frames.
1780 */
1781 rates = bss_conf->basic_rates;
1782 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
1783 rates);
1784 wl->basic_rate = wl1271_min_rate_get(wl);
1785 ret = wl1271_acx_rate_policies(wl);
1786 if (ret < 0)
1787 goto out_sleep;
1681 1788
1682 /* 1789 /*
1683 * with wl1271, we don't need to update the 1790 * with wl1271, we don't need to update the
@@ -1689,7 +1796,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1689 if (ret < 0) 1796 if (ret < 0)
1690 goto out_sleep; 1797 goto out_sleep;
1691 1798
1692 ret = wl1271_acx_aid(wl, wl->aid); 1799 /*
1800 * The SSID is intentionally set to NULL here - the
1801 * firmware will set the probe request with a
1802 * broadcast SSID regardless of what we set in the
1803 * template.
1804 */
1805 ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
1806 NULL, 0, wl->band);
1807
1808 /* enable the connection monitoring feature */
1809 ret = wl1271_acx_conn_monit_params(wl, true);
1693 if (ret < 0) 1810 if (ret < 0)
1694 goto out_sleep; 1811 goto out_sleep;
1695 1812
@@ -1705,6 +1822,22 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1705 /* use defaults when not associated */ 1822 /* use defaults when not associated */
1706 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1823 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1707 wl->aid = 0; 1824 wl->aid = 0;
1825
1826 /* revert back to minimum rates for the current band */
1827 wl1271_set_band_rate(wl);
1828 wl->basic_rate = wl1271_min_rate_get(wl);
1829 ret = wl1271_acx_rate_policies(wl);
1830 if (ret < 0)
1831 goto out_sleep;
1832
1833 /* disable connection monitor features */
1834 ret = wl1271_acx_conn_monit_params(wl, false);
1835
1836 /* Disable the keep-alive feature */
1837 ret = wl1271_acx_keep_alive_mode(wl, false);
1838
1839 if (ret < 0)
1840 goto out_sleep;
1708 } 1841 }
1709 1842
1710 } 1843 }
@@ -1739,12 +1872,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1739 } 1872 }
1740 1873
1741 if (do_join) { 1874 if (do_join) {
1742 ret = wl1271_cmd_join(wl); 1875 ret = wl1271_join(wl, set_assoc);
1743 if (ret < 0) { 1876 if (ret < 0) {
1744 wl1271_warning("cmd join failed %d", ret); 1877 wl1271_warning("cmd join failed %d", ret);
1745 goto out_sleep; 1878 goto out_sleep;
1746 } 1879 }
1747 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1748 } 1880 }
1749 1881
1750out_sleep: 1882out_sleep:
@@ -1758,6 +1890,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1758 const struct ieee80211_tx_queue_params *params) 1890 const struct ieee80211_tx_queue_params *params)
1759{ 1891{
1760 struct wl1271 *wl = hw->priv; 1892 struct wl1271 *wl = hw->priv;
1893 u8 ps_scheme;
1761 int ret; 1894 int ret;
1762 1895
1763 mutex_lock(&wl->mutex); 1896 mutex_lock(&wl->mutex);
@@ -1768,17 +1901,22 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1768 if (ret < 0) 1901 if (ret < 0)
1769 goto out; 1902 goto out;
1770 1903
1904 /* the txop is confed in units of 32us by the mac80211, we need us */
1771 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), 1905 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
1772 params->cw_min, params->cw_max, 1906 params->cw_min, params->cw_max,
1773 params->aifs, params->txop); 1907 params->aifs, params->txop << 5);
1774 if (ret < 0) 1908 if (ret < 0)
1775 goto out_sleep; 1909 goto out_sleep;
1776 1910
1911 if (params->uapsd)
1912 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
1913 else
1914 ps_scheme = CONF_PS_SCHEME_LEGACY;
1915
1777 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 1916 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
1778 CONF_CHANNEL_TYPE_EDCF, 1917 CONF_CHANNEL_TYPE_EDCF,
1779 wl1271_tx_get_queue(queue), 1918 wl1271_tx_get_queue(queue),
1780 CONF_PS_SCHEME_LEGACY_PSPOLL, 1919 ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
1781 CONF_ACK_POLICY_LEGACY, 0, 0);
1782 if (ret < 0) 1920 if (ret < 0)
1783 goto out_sleep; 1921 goto out_sleep;
1784 1922
@@ -1852,6 +1990,36 @@ static struct ieee80211_channel wl1271_channels[] = {
1852 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 1990 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
1853}; 1991};
1854 1992
1993/* mapping to indexes for wl1271_rates */
1994const static u8 wl1271_rate_to_idx_2ghz[] = {
1995 /* MCS rates are used only with 11n */
1996 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
1997 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
1998 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
1999 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
2000 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
2001 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
2002 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
2003 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
2004
2005 11, /* CONF_HW_RXTX_RATE_54 */
2006 10, /* CONF_HW_RXTX_RATE_48 */
2007 9, /* CONF_HW_RXTX_RATE_36 */
2008 8, /* CONF_HW_RXTX_RATE_24 */
2009
2010 /* TI-specific rate */
2011 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
2012
2013 7, /* CONF_HW_RXTX_RATE_18 */
2014 6, /* CONF_HW_RXTX_RATE_12 */
2015 3, /* CONF_HW_RXTX_RATE_11 */
2016 5, /* CONF_HW_RXTX_RATE_9 */
2017 4, /* CONF_HW_RXTX_RATE_6 */
2018 2, /* CONF_HW_RXTX_RATE_5_5 */
2019 1, /* CONF_HW_RXTX_RATE_2 */
2020 0 /* CONF_HW_RXTX_RATE_1 */
2021};
2022
1855/* can't be const, mac80211 writes to this */ 2023/* can't be const, mac80211 writes to this */
1856static struct ieee80211_supported_band wl1271_band_2ghz = { 2024static struct ieee80211_supported_band wl1271_band_2ghz = {
1857 .channels = wl1271_channels, 2025 .channels = wl1271_channels,
@@ -1934,6 +2102,35 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
1934 { .hw_value = 165, .center_freq = 5825}, 2102 { .hw_value = 165, .center_freq = 5825},
1935}; 2103};
1936 2104
2105/* mapping to indexes for wl1271_rates_5ghz */
2106const static u8 wl1271_rate_to_idx_5ghz[] = {
2107 /* MCS rates are used only with 11n */
2108 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
2109 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
2110 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
2111 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
2112 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
2113 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
2114 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
2115 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
2116
2117 7, /* CONF_HW_RXTX_RATE_54 */
2118 6, /* CONF_HW_RXTX_RATE_48 */
2119 5, /* CONF_HW_RXTX_RATE_36 */
2120 4, /* CONF_HW_RXTX_RATE_24 */
2121
2122 /* TI-specific rate */
2123 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
2124
2125 3, /* CONF_HW_RXTX_RATE_18 */
2126 2, /* CONF_HW_RXTX_RATE_12 */
2127 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
2128 1, /* CONF_HW_RXTX_RATE_9 */
2129 0, /* CONF_HW_RXTX_RATE_6 */
2130 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
2131 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
2132 CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
2133};
1937 2134
1938static struct ieee80211_supported_band wl1271_band_5ghz = { 2135static struct ieee80211_supported_band wl1271_band_5ghz = {
1939 .channels = wl1271_channels_5ghz, 2136 .channels = wl1271_channels_5ghz,
@@ -1942,13 +2139,17 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
1942 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 2139 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
1943}; 2140};
1944 2141
2142const static u8 *wl1271_band_rate_to_idx[] = {
2143 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
2144 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
2145};
2146
1945static const struct ieee80211_ops wl1271_ops = { 2147static const struct ieee80211_ops wl1271_ops = {
1946 .start = wl1271_op_start, 2148 .start = wl1271_op_start,
1947 .stop = wl1271_op_stop, 2149 .stop = wl1271_op_stop,
1948 .add_interface = wl1271_op_add_interface, 2150 .add_interface = wl1271_op_add_interface,
1949 .remove_interface = wl1271_op_remove_interface, 2151 .remove_interface = wl1271_op_remove_interface,
1950 .config = wl1271_op_config, 2152 .config = wl1271_op_config,
1951/* .config_interface = wl1271_op_config_interface, */
1952 .prepare_multicast = wl1271_op_prepare_multicast, 2153 .prepare_multicast = wl1271_op_prepare_multicast,
1953 .configure_filter = wl1271_op_configure_filter, 2154 .configure_filter = wl1271_op_configure_filter,
1954 .tx = wl1271_op_tx, 2155 .tx = wl1271_op_tx,
@@ -1960,7 +2161,113 @@ static const struct ieee80211_ops wl1271_ops = {
1960 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 2161 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
1961}; 2162};
1962 2163
1963static int wl1271_register_hw(struct wl1271 *wl) 2164
2165u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
2166{
2167 u8 idx;
2168
2169 BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
2170
2171 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
2172 wl1271_error("Illegal RX rate from HW: %d", rate);
2173 return 0;
2174 }
2175
2176 idx = wl1271_band_rate_to_idx[wl->band][rate];
2177 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
2178 wl1271_error("Unsupported RX rate from HW: %d", rate);
2179 return 0;
2180 }
2181
2182 return idx;
2183}
2184
2185static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
2186 struct device_attribute *attr,
2187 char *buf)
2188{
2189 struct wl1271 *wl = dev_get_drvdata(dev);
2190 ssize_t len;
2191
2192 /* FIXME: what's the maximum length of buf? page size?*/
2193 len = 500;
2194
2195 mutex_lock(&wl->mutex);
2196 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
2197 wl->sg_enabled);
2198 mutex_unlock(&wl->mutex);
2199
2200 return len;
2201
2202}
2203
2204static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
2205 struct device_attribute *attr,
2206 const char *buf, size_t count)
2207{
2208 struct wl1271 *wl = dev_get_drvdata(dev);
2209 unsigned long res;
2210 int ret;
2211
2212 ret = strict_strtoul(buf, 10, &res);
2213
2214 if (ret < 0) {
2215 wl1271_warning("incorrect value written to bt_coex_mode");
2216 return count;
2217 }
2218
2219 mutex_lock(&wl->mutex);
2220
2221 res = !!res;
2222
2223 if (res == wl->sg_enabled)
2224 goto out;
2225
2226 wl->sg_enabled = res;
2227
2228 if (wl->state == WL1271_STATE_OFF)
2229 goto out;
2230
2231 ret = wl1271_ps_elp_wakeup(wl, false);
2232 if (ret < 0)
2233 goto out;
2234
2235 wl1271_acx_sg_enable(wl, wl->sg_enabled);
2236 wl1271_ps_elp_sleep(wl);
2237
2238 out:
2239 mutex_unlock(&wl->mutex);
2240 return count;
2241}
2242
2243static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
2244 wl1271_sysfs_show_bt_coex_state,
2245 wl1271_sysfs_store_bt_coex_state);
2246
2247static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
2248 struct device_attribute *attr,
2249 char *buf)
2250{
2251 struct wl1271 *wl = dev_get_drvdata(dev);
2252 ssize_t len;
2253
2254 /* FIXME: what's the maximum length of buf? page size?*/
2255 len = 500;
2256
2257 mutex_lock(&wl->mutex);
2258 if (wl->hw_pg_ver >= 0)
2259 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
2260 else
2261 len = snprintf(buf, len, "n/a\n");
2262 mutex_unlock(&wl->mutex);
2263
2264 return len;
2265}
2266
2267static DEVICE_ATTR(hw_pg_ver, S_IRUGO | S_IWUSR,
2268 wl1271_sysfs_show_hw_pg_ver, NULL);
2269
2270int wl1271_register_hw(struct wl1271 *wl)
1964{ 2271{
1965 int ret; 2272 int ret;
1966 2273
@@ -1981,8 +2288,17 @@ static int wl1271_register_hw(struct wl1271 *wl)
1981 2288
1982 return 0; 2289 return 0;
1983} 2290}
2291EXPORT_SYMBOL_GPL(wl1271_register_hw);
2292
2293void wl1271_unregister_hw(struct wl1271 *wl)
2294{
2295 ieee80211_unregister_hw(wl->hw);
2296 wl->mac80211_registered = false;
2297
2298}
2299EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
1984 2300
1985static int wl1271_init_ieee80211(struct wl1271 *wl) 2301int wl1271_init_ieee80211(struct wl1271 *wl)
1986{ 2302{
1987 /* The tx descriptor buffer and the TKIP space. */ 2303 /* The tx descriptor buffer and the TKIP space. */
1988 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE + 2304 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
@@ -1991,11 +2307,15 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
1991 /* unit us */ 2307 /* unit us */
1992 /* FIXME: find a proper value */ 2308 /* FIXME: find a proper value */
1993 wl->hw->channel_change_time = 10000; 2309 wl->hw->channel_change_time = 10000;
2310 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
1994 2311
1995 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 2312 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1996 IEEE80211_HW_NOISE_DBM |
1997 IEEE80211_HW_BEACON_FILTER | 2313 IEEE80211_HW_BEACON_FILTER |
1998 IEEE80211_HW_SUPPORTS_PS; 2314 IEEE80211_HW_SUPPORTS_PS |
2315 IEEE80211_HW_SUPPORTS_UAPSD |
2316 IEEE80211_HW_HAS_RATE_CONTROL |
2317 IEEE80211_HW_CONNECTION_MONITOR |
2318 IEEE80211_HW_SUPPORTS_CQM_RSSI;
1999 2319
2000 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2320 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2001 BIT(NL80211_IFTYPE_ADHOC); 2321 BIT(NL80211_IFTYPE_ADHOC);
@@ -2005,51 +2325,53 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
2005 if (wl1271_11a_enabled()) 2325 if (wl1271_11a_enabled())
2006 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; 2326 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
2007 2327
2008 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); 2328 wl->hw->queues = 4;
2329 wl->hw->max_rates = 1;
2009 2330
2010 return 0; 2331 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2011}
2012
2013static void wl1271_device_release(struct device *dev)
2014{
2015 2332
2333 return 0;
2016} 2334}
2017 2335EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
2018static struct platform_device wl1271_device = {
2019 .name = "wl1271",
2020 .id = -1,
2021
2022 /* device model insists to have a release function */
2023 .dev = {
2024 .release = wl1271_device_release,
2025 },
2026};
2027 2336
2028#define WL1271_DEFAULT_CHANNEL 0 2337#define WL1271_DEFAULT_CHANNEL 0
2029 2338
2030static struct ieee80211_hw *wl1271_alloc_hw(void) 2339struct ieee80211_hw *wl1271_alloc_hw(void)
2031{ 2340{
2032 struct ieee80211_hw *hw; 2341 struct ieee80211_hw *hw;
2342 struct platform_device *plat_dev = NULL;
2033 struct wl1271 *wl; 2343 struct wl1271 *wl;
2034 int i; 2344 int i, ret;
2035 2345
2036 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 2346 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
2037 if (!hw) { 2347 if (!hw) {
2038 wl1271_error("could not alloc ieee80211_hw"); 2348 wl1271_error("could not alloc ieee80211_hw");
2039 return ERR_PTR(-ENOMEM); 2349 ret = -ENOMEM;
2350 goto err_hw_alloc;
2351 }
2352
2353 plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL);
2354 if (!plat_dev) {
2355 wl1271_error("could not allocate platform_device");
2356 ret = -ENOMEM;
2357 goto err_plat_alloc;
2040 } 2358 }
2041 2359
2360 memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
2361
2042 wl = hw->priv; 2362 wl = hw->priv;
2043 memset(wl, 0, sizeof(*wl)); 2363 memset(wl, 0, sizeof(*wl));
2044 2364
2045 INIT_LIST_HEAD(&wl->list); 2365 INIT_LIST_HEAD(&wl->list);
2046 2366
2047 wl->hw = hw; 2367 wl->hw = hw;
2368 wl->plat_dev = plat_dev;
2048 2369
2049 skb_queue_head_init(&wl->tx_queue); 2370 skb_queue_head_init(&wl->tx_queue);
2050 2371
2051 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2372 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2052 wl->channel = WL1271_DEFAULT_CHANNEL; 2373 wl->channel = WL1271_DEFAULT_CHANNEL;
2374 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2053 wl->default_key = 0; 2375 wl->default_key = 0;
2054 wl->rx_counter = 0; 2376 wl->rx_counter = 0;
2055 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 2377 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
@@ -2057,11 +2379,14 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
2057 wl->psm_entry_retry = 0; 2379 wl->psm_entry_retry = 0;
2058 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 2380 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2059 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2381 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2382 wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
2060 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 2383 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2061 wl->sta_rate_set = 0; 2384 wl->sta_rate_set = 0;
2062 wl->band = IEEE80211_BAND_2GHZ; 2385 wl->band = IEEE80211_BAND_2GHZ;
2063 wl->vif = NULL; 2386 wl->vif = NULL;
2064 wl->flags = 0; 2387 wl->flags = 0;
2388 wl->sg_enabled = true;
2389 wl->hw_pg_ver = -1;
2065 2390
2066 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 2391 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
2067 wl->tx_frames[i] = NULL; 2392 wl->tx_frames[i] = NULL;
@@ -2074,167 +2399,72 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
2074 /* Apply default driver configuration. */ 2399 /* Apply default driver configuration. */
2075 wl1271_conf_init(wl); 2400 wl1271_conf_init(wl);
2076 2401
2077 return hw; 2402 wl1271_debugfs_init(wl);
2078}
2079
2080int wl1271_free_hw(struct wl1271 *wl)
2081{
2082 ieee80211_unregister_hw(wl->hw);
2083
2084 wl1271_debugfs_exit(wl);
2085
2086 kfree(wl->target_mem_map);
2087 vfree(wl->fw);
2088 wl->fw = NULL;
2089 kfree(wl->nvs);
2090 wl->nvs = NULL;
2091
2092 kfree(wl->fw_status);
2093 kfree(wl->tx_res_if);
2094
2095 ieee80211_free_hw(wl->hw);
2096
2097 return 0;
2098}
2099
2100static int __devinit wl1271_probe(struct spi_device *spi)
2101{
2102 struct wl12xx_platform_data *pdata;
2103 struct ieee80211_hw *hw;
2104 struct wl1271 *wl;
2105 int ret;
2106 2403
2107 pdata = spi->dev.platform_data; 2404 /* Register platform device */
2108 if (!pdata) { 2405 ret = platform_device_register(wl->plat_dev);
2109 wl1271_error("no platform data"); 2406 if (ret) {
2110 return -ENODEV; 2407 wl1271_error("couldn't register platform device");
2408 goto err_hw;
2111 } 2409 }
2410 dev_set_drvdata(&wl->plat_dev->dev, wl);
2112 2411
2113 hw = wl1271_alloc_hw(); 2412 /* Create sysfs file to control bt coex state */
2114 if (IS_ERR(hw)) 2413 ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
2115 return PTR_ERR(hw);
2116
2117 wl = hw->priv;
2118
2119 dev_set_drvdata(&spi->dev, wl);
2120 wl->spi = spi;
2121
2122 /* This is the only SPI value that we need to set here, the rest
2123 * comes from the board-peripherals file */
2124 spi->bits_per_word = 32;
2125
2126 ret = spi_setup(spi);
2127 if (ret < 0) { 2414 if (ret < 0) {
2128 wl1271_error("spi_setup failed"); 2415 wl1271_error("failed to create sysfs file bt_coex_state");
2129 goto out_free; 2416 goto err_platform;
2130 }
2131
2132 wl->set_power = pdata->set_power;
2133 if (!wl->set_power) {
2134 wl1271_error("set power function missing in platform data");
2135 ret = -ENODEV;
2136 goto out_free;
2137 } 2417 }
2138 2418
2139 wl->irq = spi->irq; 2419 /* Create sysfs file to get HW PG version */
2140 if (wl->irq < 0) { 2420 ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
2141 wl1271_error("irq missing in platform data");
2142 ret = -ENODEV;
2143 goto out_free;
2144 }
2145
2146 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
2147 if (ret < 0) { 2421 if (ret < 0) {
2148 wl1271_error("request_irq() failed: %d", ret); 2422 wl1271_error("failed to create sysfs file hw_pg_ver");
2149 goto out_free; 2423 goto err_bt_coex_state;
2150 }
2151
2152 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
2153
2154 disable_irq(wl->irq);
2155
2156 ret = platform_device_register(&wl1271_device);
2157 if (ret) {
2158 wl1271_error("couldn't register platform device");
2159 goto out_irq;
2160 } 2424 }
2161 dev_set_drvdata(&wl1271_device.dev, wl);
2162
2163 ret = wl1271_init_ieee80211(wl);
2164 if (ret)
2165 goto out_platform;
2166
2167 ret = wl1271_register_hw(wl);
2168 if (ret)
2169 goto out_platform;
2170
2171 wl1271_debugfs_init(wl);
2172 2425
2173 wl1271_notice("initialized"); 2426 return hw;
2174 2427
2175 return 0; 2428err_bt_coex_state:
2429 device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
2176 2430
2177 out_platform: 2431err_platform:
2178 platform_device_unregister(&wl1271_device); 2432 platform_device_unregister(wl->plat_dev);
2179 2433
2180 out_irq: 2434err_hw:
2181 free_irq(wl->irq, wl); 2435 wl1271_debugfs_exit(wl);
2436 kfree(plat_dev);
2182 2437
2183 out_free: 2438err_plat_alloc:
2184 ieee80211_free_hw(hw); 2439 ieee80211_free_hw(hw);
2185 2440
2186 return ret; 2441err_hw_alloc:
2187}
2188
2189static int __devexit wl1271_remove(struct spi_device *spi)
2190{
2191 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
2192 2442
2193 platform_device_unregister(&wl1271_device); 2443 return ERR_PTR(ret);
2194 free_irq(wl->irq, wl);
2195
2196 wl1271_free_hw(wl);
2197
2198 return 0;
2199} 2444}
2445EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
2200 2446
2201 2447int wl1271_free_hw(struct wl1271 *wl)
2202static struct spi_driver wl1271_spi_driver = {
2203 .driver = {
2204 .name = "wl1271",
2205 .bus = &spi_bus_type,
2206 .owner = THIS_MODULE,
2207 },
2208
2209 .probe = wl1271_probe,
2210 .remove = __devexit_p(wl1271_remove),
2211};
2212
2213static int __init wl1271_init(void)
2214{ 2448{
2215 int ret; 2449 platform_device_unregister(wl->plat_dev);
2450 kfree(wl->plat_dev);
2216 2451
2217 ret = spi_register_driver(&wl1271_spi_driver); 2452 wl1271_debugfs_exit(wl);
2218 if (ret < 0) {
2219 wl1271_error("failed to register spi driver: %d", ret);
2220 goto out;
2221 }
2222 2453
2223out: 2454 vfree(wl->fw);
2224 return ret; 2455 wl->fw = NULL;
2225} 2456 kfree(wl->nvs);
2457 wl->nvs = NULL;
2226 2458
2227static void __exit wl1271_exit(void) 2459 kfree(wl->fw_status);
2228{ 2460 kfree(wl->tx_res_if);
2229 spi_unregister_driver(&wl1271_spi_driver);
2230 2461
2231 wl1271_notice("unloaded"); 2462 ieee80211_free_hw(wl->hw);
2232}
2233 2463
2234module_init(wl1271_init); 2464 return 0;
2235module_exit(wl1271_exit); 2465}
2466EXPORT_SYMBOL_GPL(wl1271_free_hw);
2236 2467
2237MODULE_LICENSE("GPL"); 2468MODULE_LICENSE("GPL");
2238MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 2469MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
2239MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 2470MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
2240MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index e2b1ebf096e8..a5e60e0403e5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -23,7 +23,6 @@
23 23
24#include "wl1271_reg.h" 24#include "wl1271_reg.h"
25#include "wl1271_ps.h" 25#include "wl1271_ps.h"
26#include "wl1271_spi.h"
27#include "wl1271_io.h" 26#include "wl1271_io.h"
28 27
29#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
@@ -41,7 +40,8 @@ void wl1271_elp_work(struct work_struct *work)
41 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
42 41
43 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || 42 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
44 !test_bit(WL1271_FLAG_PSM, &wl->flags)) 43 (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
44 !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
45 goto out; 45 goto out;
46 46
47 wl1271_debug(DEBUG_PSM, "chip to elp"); 47 wl1271_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,8 @@ out:
57/* Routines to toggle sleep mode while in ELP */ 57/* Routines to toggle sleep mode while in ELP */
58void wl1271_ps_elp_sleep(struct wl1271 *wl) 58void wl1271_ps_elp_sleep(struct wl1271 *wl)
59{ 59{
60 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { 60 if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
61 test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
61 cancel_delayed_work(&wl->elp_work); 62 cancel_delayed_work(&wl->elp_work);
62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 63 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
63 msecs_to_jiffies(ELP_ENTRY_DELAY)); 64 msecs_to_jiffies(ELP_ENTRY_DELAY));
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index c723d9c7e131..57f4bfd959c8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -27,7 +27,6 @@
27#include "wl1271_acx.h" 27#include "wl1271_acx.h"
28#include "wl1271_reg.h" 28#include "wl1271_reg.h"
29#include "wl1271_rx.h" 29#include "wl1271_rx.h"
30#include "wl1271_spi.h"
31#include "wl1271_io.h" 30#include "wl1271_io.h"
32 31
33static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
@@ -44,66 +43,6 @@ static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
44 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; 43 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
45} 44}
46 45
47/* The values of this table must match the wl1271_rates[] array */
48static u8 wl1271_rx_rate_to_idx[] = {
49 /* MCS rates are used only with 11n */
50 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
51 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
52 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
53 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
54 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
55 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
56 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
57 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
58
59 11, /* WL1271_RATE_54 */
60 10, /* WL1271_RATE_48 */
61 9, /* WL1271_RATE_36 */
62 8, /* WL1271_RATE_24 */
63
64 /* TI-specific rate */
65 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
66
67 7, /* WL1271_RATE_18 */
68 6, /* WL1271_RATE_12 */
69 3, /* WL1271_RATE_11 */
70 5, /* WL1271_RATE_9 */
71 4, /* WL1271_RATE_6 */
72 2, /* WL1271_RATE_5_5 */
73 1, /* WL1271_RATE_2 */
74 0 /* WL1271_RATE_1 */
75};
76
77/* The values of this table must match the wl1271_rates[] array */
78static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
79 /* MCS rates are used only with 11n */
80 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
81 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
82 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
83 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
84 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
85 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
86 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
87 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
88
89 7, /* WL1271_RATE_54 */
90 6, /* WL1271_RATE_48 */
91 5, /* WL1271_RATE_36 */
92 4, /* WL1271_RATE_24 */
93
94 /* TI-specific rate */
95 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
96
97 3, /* WL1271_RATE_18 */
98 2, /* WL1271_RATE_12 */
99 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
100 1, /* WL1271_RATE_9 */
101 0, /* WL1271_RATE_6 */
102 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
103 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
104 WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
105};
106
107static void wl1271_rx_status(struct wl1271 *wl, 46static void wl1271_rx_status(struct wl1271 *wl,
108 struct wl1271_rx_descriptor *desc, 47 struct wl1271_rx_descriptor *desc,
109 struct ieee80211_rx_status *status, 48 struct ieee80211_rx_status *status,
@@ -111,20 +50,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
111{ 50{
112 memset(status, 0, sizeof(struct ieee80211_rx_status)); 51 memset(status, 0, sizeof(struct ieee80211_rx_status));
113 52
114 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == 53 status->band = wl->band;
115 WL1271_RX_DESC_BAND_BG) { 54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
116 status->band = IEEE80211_BAND_2GHZ;
117 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
118 } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
119 WL1271_RX_DESC_BAND_A) {
120 status->band = IEEE80211_BAND_5GHZ;
121 status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
122 } else
123 wl1271_warning("unsupported band 0x%x",
124 desc->flags & WL1271_RX_DESC_BAND_MASK);
125
126 if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
127 wl1271_warning("unsupported rate");
128 55
129 /* 56 /*
130 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 57 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
@@ -134,13 +61,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
134 */ 61 */
135 status->signal = desc->rssi; 62 status->signal = desc->rssi;
136 63
137 /*
138 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
139 * need to divide by two for now, but TI has been discussing about
140 * changing it. This needs to be rechecked.
141 */
142 status->noise = desc->rssi - (desc->snr >> 1);
143
144 status->freq = ieee80211_channel_to_frequency(desc->channel); 64 status->freq = ieee80211_channel_to_frequency(desc->channel);
145 65
146 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 66 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
@@ -162,6 +82,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
162 u8 *buf; 82 u8 *buf;
163 u8 beacon = 0; 83 u8 beacon = 0;
164 84
85 /*
86 * In PLT mode we seem to get frames and mac80211 warns about them,
87 * workaround this by not retrieving them at all.
88 */
89 if (unlikely(wl->state == WL1271_STATE_PLT))
90 return;
91
165 skb = __dev_alloc_skb(length, GFP_KERNEL); 92 skb = __dev_alloc_skb(length, GFP_KERNEL);
166 if (!skb) { 93 if (!skb) {
167 wl1271_error("Couldn't allocate RX frame"); 94 wl1271_error("Couldn't allocate RX frame");
@@ -220,6 +147,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
220 147
221 wl->rx_counter++; 148 wl->rx_counter++;
222 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 149 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
223 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
224 } 150 }
151
152 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
225} 153}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index 1ae6d1783ed4..b89be4758e78 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -43,7 +43,6 @@
43#define RX_MAX_PACKET_ID 3 43#define RX_MAX_PACKET_ID 3
44 44
45#define NUM_RX_PKT_DESC_MOD_MASK 7 45#define NUM_RX_PKT_DESC_MOD_MASK 7
46#define WL1271_RX_RATE_UNSUPPORTED 0xFF
47 46
48#define RX_DESC_VALID_FCS 0x0001 47#define RX_DESC_VALID_FCS 0x0001
49#define RX_DESC_MATCH_RXADDR1 0x0002 48#define RX_DESC_MATCH_RXADDR1 0x0002
@@ -117,5 +116,6 @@ struct wl1271_rx_descriptor {
117} __attribute__ ((packed)); 116} __attribute__ ((packed));
118 117
119void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
120 120
121#endif 121#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
new file mode 100644
index 000000000000..d3d6f302f705
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -0,0 +1,291 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/irq.h>
25#include <linux/module.h>
26#include <linux/crc7.h>
27#include <linux/vmalloc.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h>
31#include <plat/gpio.h>
32
33#include "wl1271.h"
34#include "wl12xx_80211.h"
35#include "wl1271_io.h"
36
37
38#define RX71_WL1271_IRQ_GPIO 42
39
40#ifndef SDIO_VENDOR_ID_TI
41#define SDIO_VENDOR_ID_TI 0x0097
42#endif
43
44#ifndef SDIO_DEVICE_ID_TI_WL1271
45#define SDIO_DEVICE_ID_TI_WL1271 0x4076
46#endif
47
48static const struct sdio_device_id wl1271_devices[] = {
49 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
50 {}
51};
52MODULE_DEVICE_TABLE(sdio, wl1271_devices);
53
54static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
55{
56 return wl->if_priv;
57}
58
59static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
60{
61 return &(wl_to_func(wl)->dev);
62}
63
64static irqreturn_t wl1271_irq(int irq, void *cookie)
65{
66 struct wl1271 *wl = cookie;
67 unsigned long flags;
68
69 wl1271_debug(DEBUG_IRQ, "IRQ");
70
71 /* complete the ELP completion */
72 spin_lock_irqsave(&wl->wl_lock, flags);
73 if (wl->elp_compl) {
74 complete(wl->elp_compl);
75 wl->elp_compl = NULL;
76 }
77
78 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
79 ieee80211_queue_work(wl->hw, &wl->irq_work);
80 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
81 spin_unlock_irqrestore(&wl->wl_lock, flags);
82
83 return IRQ_HANDLED;
84}
85
86static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
87{
88 disable_irq(wl->irq);
89}
90
91static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
92{
93 enable_irq(wl->irq);
94}
95
96static void wl1271_sdio_reset(struct wl1271 *wl)
97{
98}
99
100static void wl1271_sdio_init(struct wl1271 *wl)
101{
102}
103
104static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
105 size_t len, bool fixed)
106{
107 int ret;
108 struct sdio_func *func = wl_to_func(wl);
109
110 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
111 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
112 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
113 addr, ((u8 *)buf)[0]);
114 } else {
115 if (fixed)
116 ret = sdio_readsb(func, buf, addr, len);
117 else
118 ret = sdio_memcpy_fromio(func, buf, addr, len);
119
120 wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
121 addr, len);
122 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
123 }
124
125 if (ret)
126 wl1271_error("sdio read failed (%d)", ret);
127
128}
129
130static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
131 size_t len, bool fixed)
132{
133 int ret;
134 struct sdio_func *func = wl_to_func(wl);
135
136 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
137 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
138 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
139 addr, ((u8 *)buf)[0]);
140 } else {
141 wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
142 addr, len);
143 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
144
145 if (fixed)
146 ret = sdio_writesb(func, addr, buf, len);
147 else
148 ret = sdio_memcpy_toio(func, addr, buf, len);
149 }
150 if (ret)
151 wl1271_error("sdio write failed (%d)", ret);
152
153}
154
155static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
156{
157 struct sdio_func *func = wl_to_func(wl);
158
159 /* Let the SDIO stack handle wlan_enable control, so we
160 * keep host claimed while wlan is in use to keep wl1271
161 * alive.
162 */
163 if (enable) {
164 sdio_claim_host(func);
165 sdio_enable_func(func);
166 } else {
167 sdio_disable_func(func);
168 sdio_release_host(func);
169 }
170}
171
172static struct wl1271_if_operations sdio_ops = {
173 .read = wl1271_sdio_raw_read,
174 .write = wl1271_sdio_raw_write,
175 .reset = wl1271_sdio_reset,
176 .init = wl1271_sdio_init,
177 .power = wl1271_sdio_set_power,
178 .dev = wl1271_sdio_wl_to_dev,
179 .enable_irq = wl1271_sdio_enable_interrupts,
180 .disable_irq = wl1271_sdio_disable_interrupts
181};
182
183static int __devinit wl1271_probe(struct sdio_func *func,
184 const struct sdio_device_id *id)
185{
186 struct ieee80211_hw *hw;
187 struct wl1271 *wl;
188 int ret;
189
190 /* We are only able to handle the wlan function */
191 if (func->num != 0x02)
192 return -ENODEV;
193
194 hw = wl1271_alloc_hw();
195 if (IS_ERR(hw))
196 return PTR_ERR(hw);
197
198 wl = hw->priv;
199
200 wl->if_priv = func;
201 wl->if_ops = &sdio_ops;
202
203 /* Grab access to FN0 for ELP reg. */
204 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
205
206 wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO);
207 if (wl->irq < 0) {
208 ret = wl->irq;
209 wl1271_error("could not get irq!");
210 goto out_free;
211 }
212
213 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
214 if (ret < 0) {
215 wl1271_error("request_irq() failed: %d", ret);
216 goto out_free;
217 }
218
219 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
220
221 disable_irq(wl->irq);
222
223 ret = wl1271_init_ieee80211(wl);
224 if (ret)
225 goto out_irq;
226
227 ret = wl1271_register_hw(wl);
228 if (ret)
229 goto out_irq;
230
231 sdio_set_drvdata(func, wl);
232
233 wl1271_notice("initialized");
234
235 return 0;
236
237 out_irq:
238 free_irq(wl->irq, wl);
239
240
241 out_free:
242 wl1271_free_hw(wl);
243
244 return ret;
245}
246
247static void __devexit wl1271_remove(struct sdio_func *func)
248{
249 struct wl1271 *wl = sdio_get_drvdata(func);
250
251 free_irq(wl->irq, wl);
252
253 wl1271_unregister_hw(wl);
254 wl1271_free_hw(wl);
255}
256
257static struct sdio_driver wl1271_sdio_driver = {
258 .name = "wl1271_sdio",
259 .id_table = wl1271_devices,
260 .probe = wl1271_probe,
261 .remove = __devexit_p(wl1271_remove),
262};
263
264static int __init wl1271_init(void)
265{
266 int ret;
267
268 ret = sdio_register_driver(&wl1271_sdio_driver);
269 if (ret < 0) {
270 wl1271_error("failed to register sdio driver: %d", ret);
271 goto out;
272 }
273
274out:
275 return ret;
276}
277
278static void __exit wl1271_exit(void)
279{
280 sdio_unregister_driver(&wl1271_sdio_driver);
281
282 wl1271_notice("unloaded");
283}
284
285module_init(wl1271_init);
286module_exit(wl1271_exit);
287
288MODULE_LICENSE("GPL");
289MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
290MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
291MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 053c84aceb49..5189b812f939 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -21,18 +21,69 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/irq.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/wl12xx.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29 30
30#include "wl1271.h" 31#include "wl1271.h"
31#include "wl12xx_80211.h" 32#include "wl12xx_80211.h"
32#include "wl1271_spi.h" 33#include "wl1271_io.h"
34
35#include "wl1271_reg.h"
36
37#define WSPI_CMD_READ 0x40000000
38#define WSPI_CMD_WRITE 0x00000000
39#define WSPI_CMD_FIXED 0x20000000
40#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
41#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
42#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
43
44#define WSPI_INIT_CMD_CRC_LEN 5
45
46#define WSPI_INIT_CMD_START 0x00
47#define WSPI_INIT_CMD_TX 0x40
48/* the extra bypass bit is sampled by the TNET as '1' */
49#define WSPI_INIT_CMD_BYPASS_BIT 0x80
50#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
51#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
52#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
53#define WSPI_INIT_CMD_IOD 0x40
54#define WSPI_INIT_CMD_IP 0x20
55#define WSPI_INIT_CMD_CS 0x10
56#define WSPI_INIT_CMD_WS 0x08
57#define WSPI_INIT_CMD_WSPI 0x01
58#define WSPI_INIT_CMD_END 0x01
59
60#define WSPI_INIT_CMD_LEN 8
61
62#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
63 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
64#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
65
66static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
67{
68 return wl->if_priv;
69}
33 70
71static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
72{
73 return &(wl_to_spi(wl)->dev);
74}
34 75
35void wl1271_spi_reset(struct wl1271 *wl) 76static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
77{
78 disable_irq(wl->irq);
79}
80
81static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
82{
83 enable_irq(wl->irq);
84}
85
86static void wl1271_spi_reset(struct wl1271 *wl)
36{ 87{
37 u8 *cmd; 88 u8 *cmd;
38 struct spi_transfer t; 89 struct spi_transfer t;
@@ -53,12 +104,13 @@ void wl1271_spi_reset(struct wl1271 *wl)
53 t.len = WSPI_INIT_CMD_LEN; 104 t.len = WSPI_INIT_CMD_LEN;
54 spi_message_add_tail(&t, &m); 105 spi_message_add_tail(&t, &m);
55 106
56 spi_sync(wl->spi, &m); 107 spi_sync(wl_to_spi(wl), &m);
108 kfree(cmd);
57 109
58 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 110 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
59} 111}
60 112
61void wl1271_spi_init(struct wl1271 *wl) 113static void wl1271_spi_init(struct wl1271 *wl)
62{ 114{
63 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; 115 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
64 struct spi_transfer t; 116 struct spi_transfer t;
@@ -107,48 +159,25 @@ void wl1271_spi_init(struct wl1271 *wl)
107 t.len = WSPI_INIT_CMD_LEN; 159 t.len = WSPI_INIT_CMD_LEN;
108 spi_message_add_tail(&t, &m); 160 spi_message_add_tail(&t, &m);
109 161
110 spi_sync(wl->spi, &m); 162 spi_sync(wl_to_spi(wl), &m);
163 kfree(cmd);
111 164
112 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 165 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
113} 166}
114 167
115#define WL1271_BUSY_WORD_TIMEOUT 1000 168#define WL1271_BUSY_WORD_TIMEOUT 1000
116 169
117/* FIXME: Check busy words, removed due to SPI bug */ 170static int wl1271_spi_read_busy(struct wl1271 *wl)
118#if 0
119static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
120{ 171{
121 struct spi_transfer t[1]; 172 struct spi_transfer t[1];
122 struct spi_message m; 173 struct spi_message m;
123 u32 *busy_buf; 174 u32 *busy_buf;
124 int num_busy_bytes = 0; 175 int num_busy_bytes = 0;
125 176
126 wl1271_info("spi read BUSY!");
127
128 /*
129 * Look for the non-busy word in the read buffer, and if found,
130 * read in the remaining data into the buffer.
131 */
132 busy_buf = (u32 *)buf;
133 for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
134 num_busy_bytes += sizeof(u32);
135 if (*busy_buf & 0x1) {
136 spi_message_init(&m);
137 memset(t, 0, sizeof(t));
138 memmove(buf, busy_buf, len - num_busy_bytes);
139 t[0].rx_buf = buf + (len - num_busy_bytes);
140 t[0].len = num_busy_bytes;
141 spi_message_add_tail(&t[0], &m);
142 spi_sync(wl->spi, &m);
143 return;
144 }
145 }
146
147 /* 177 /*
148 * Read further busy words from SPI until a non-busy word is 178 * Read further busy words from SPI until a non-busy word is
149 * encountered, then read the data itself into the buffer. 179 * encountered, then read the data itself into the buffer.
150 */ 180 */
151 wl1271_info("spi read BUSY-polling needed!");
152 181
153 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT; 182 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
154 busy_buf = wl->buffer_busyword; 183 busy_buf = wl->buffer_busyword;
@@ -158,28 +187,21 @@ static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
158 memset(t, 0, sizeof(t)); 187 memset(t, 0, sizeof(t));
159 t[0].rx_buf = busy_buf; 188 t[0].rx_buf = busy_buf;
160 t[0].len = sizeof(u32); 189 t[0].len = sizeof(u32);
190 t[0].cs_change = true;
161 spi_message_add_tail(&t[0], &m); 191 spi_message_add_tail(&t[0], &m);
162 spi_sync(wl->spi, &m); 192 spi_sync(wl_to_spi(wl), &m);
163 193
164 if (*busy_buf & 0x1) { 194 if (*busy_buf & 0x1)
165 spi_message_init(&m); 195 return 0;
166 memset(t, 0, sizeof(t));
167 t[0].rx_buf = buf;
168 t[0].len = len;
169 spi_message_add_tail(&t[0], &m);
170 spi_sync(wl->spi, &m);
171 return;
172 }
173 } 196 }
174 197
175 /* The SPI bus is unresponsive, the read failed. */ 198 /* The SPI bus is unresponsive, the read failed. */
176 memset(buf, 0, len);
177 wl1271_error("SPI read busy-word timeout!\n"); 199 wl1271_error("SPI read busy-word timeout!\n");
200 return -ETIMEDOUT;
178} 201}
179#endif
180 202
181void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, 203static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
182 size_t len, bool fixed) 204 size_t len, bool fixed)
183{ 205{
184 struct spi_transfer t[3]; 206 struct spi_transfer t[3];
185 struct spi_message m; 207 struct spi_message m;
@@ -202,28 +224,38 @@ void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
202 224
203 t[0].tx_buf = cmd; 225 t[0].tx_buf = cmd;
204 t[0].len = 4; 226 t[0].len = 4;
227 t[0].cs_change = true;
205 spi_message_add_tail(&t[0], &m); 228 spi_message_add_tail(&t[0], &m);
206 229
207 /* Busy and non busy words read */ 230 /* Busy and non busy words read */
208 t[1].rx_buf = busy_buf; 231 t[1].rx_buf = busy_buf;
209 t[1].len = WL1271_BUSY_WORD_LEN; 232 t[1].len = WL1271_BUSY_WORD_LEN;
233 t[1].cs_change = true;
210 spi_message_add_tail(&t[1], &m); 234 spi_message_add_tail(&t[1], &m);
211 235
212 t[2].rx_buf = buf; 236 spi_sync(wl_to_spi(wl), &m);
213 t[2].len = len;
214 spi_message_add_tail(&t[2], &m);
215 237
216 spi_sync(wl->spi, &m); 238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
239 wl1271_spi_read_busy(wl)) {
240 memset(buf, 0, len);
241 return;
242 }
217 243
218 /* FIXME: Check busy words, removed due to SPI bug */ 244 spi_message_init(&m);
219 /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1)) 245 memset(t, 0, sizeof(t));
220 wl1271_spi_read_busy(wl, buf, len); */ 246
247 t[0].rx_buf = buf;
248 t[0].len = len;
249 t[0].cs_change = true;
250 spi_message_add_tail(&t[0], &m);
251
252 spi_sync(wl_to_spi(wl), &m);
221 253
222 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 254 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
223 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 255 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
224} 256}
225 257
226void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, 258static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
227 size_t len, bool fixed) 259 size_t len, bool fixed)
228{ 260{
229 struct spi_transfer t[2]; 261 struct spi_transfer t[2];
@@ -251,8 +283,181 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
251 t[1].len = len; 283 t[1].len = len;
252 spi_message_add_tail(&t[1], &m); 284 spi_message_add_tail(&t[1], &m);
253 285
254 spi_sync(wl->spi, &m); 286 spi_sync(wl_to_spi(wl), &m);
255 287
256 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); 288 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
257 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 289 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
258} 290}
291
292static irqreturn_t wl1271_irq(int irq, void *cookie)
293{
294 struct wl1271 *wl;
295 unsigned long flags;
296
297 wl1271_debug(DEBUG_IRQ, "IRQ");
298
299 wl = cookie;
300
301 /* complete the ELP completion */
302 spin_lock_irqsave(&wl->wl_lock, flags);
303 if (wl->elp_compl) {
304 complete(wl->elp_compl);
305 wl->elp_compl = NULL;
306 }
307
308 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
309 ieee80211_queue_work(wl->hw, &wl->irq_work);
310 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
311 spin_unlock_irqrestore(&wl->wl_lock, flags);
312
313 return IRQ_HANDLED;
314}
315
316static void wl1271_spi_set_power(struct wl1271 *wl, bool enable)
317{
318 if (wl->set_power)
319 wl->set_power(enable);
320}
321
322static struct wl1271_if_operations spi_ops = {
323 .read = wl1271_spi_raw_read,
324 .write = wl1271_spi_raw_write,
325 .reset = wl1271_spi_reset,
326 .init = wl1271_spi_init,
327 .power = wl1271_spi_set_power,
328 .dev = wl1271_spi_wl_to_dev,
329 .enable_irq = wl1271_spi_enable_interrupts,
330 .disable_irq = wl1271_spi_disable_interrupts
331};
332
333static int __devinit wl1271_probe(struct spi_device *spi)
334{
335 struct wl12xx_platform_data *pdata;
336 struct ieee80211_hw *hw;
337 struct wl1271 *wl;
338 int ret;
339
340 pdata = spi->dev.platform_data;
341 if (!pdata) {
342 wl1271_error("no platform data");
343 return -ENODEV;
344 }
345
346 hw = wl1271_alloc_hw();
347 if (IS_ERR(hw))
348 return PTR_ERR(hw);
349
350 wl = hw->priv;
351
352 dev_set_drvdata(&spi->dev, wl);
353 wl->if_priv = spi;
354
355 wl->if_ops = &spi_ops;
356
357 /* This is the only SPI value that we need to set here, the rest
358 * comes from the board-peripherals file */
359 spi->bits_per_word = 32;
360
361 ret = spi_setup(spi);
362 if (ret < 0) {
363 wl1271_error("spi_setup failed");
364 goto out_free;
365 }
366
367 wl->set_power = pdata->set_power;
368 if (!wl->set_power) {
369 wl1271_error("set power function missing in platform data");
370 ret = -ENODEV;
371 goto out_free;
372 }
373
374 wl->irq = spi->irq;
375 if (wl->irq < 0) {
376 wl1271_error("irq missing in platform data");
377 ret = -ENODEV;
378 goto out_free;
379 }
380
381 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
382 if (ret < 0) {
383 wl1271_error("request_irq() failed: %d", ret);
384 goto out_free;
385 }
386
387 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
388
389 disable_irq(wl->irq);
390
391 ret = wl1271_init_ieee80211(wl);
392 if (ret)
393 goto out_irq;
394
395 ret = wl1271_register_hw(wl);
396 if (ret)
397 goto out_irq;
398
399 wl1271_notice("initialized");
400
401 return 0;
402
403 out_irq:
404 free_irq(wl->irq, wl);
405
406 out_free:
407 wl1271_free_hw(wl);
408
409 return ret;
410}
411
412static int __devexit wl1271_remove(struct spi_device *spi)
413{
414 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
415
416 free_irq(wl->irq, wl);
417
418 wl1271_unregister_hw(wl);
419 wl1271_free_hw(wl);
420
421 return 0;
422}
423
424
425static struct spi_driver wl1271_spi_driver = {
426 .driver = {
427 .name = "wl1271_spi",
428 .bus = &spi_bus_type,
429 .owner = THIS_MODULE,
430 },
431
432 .probe = wl1271_probe,
433 .remove = __devexit_p(wl1271_remove),
434};
435
436static int __init wl1271_init(void)
437{
438 int ret;
439
440 ret = spi_register_driver(&wl1271_spi_driver);
441 if (ret < 0) {
442 wl1271_error("failed to register spi driver: %d", ret);
443 goto out;
444 }
445
446out:
447 return ret;
448}
449
450static void __exit wl1271_exit(void)
451{
452 spi_unregister_driver(&wl1271_spi_driver);
453
454 wl1271_notice("unloaded");
455}
456
457module_init(wl1271_init);
458module_exit(wl1271_exit);
459
460MODULE_LICENSE("GPL");
461MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
462MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
463MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
deleted file mode 100644
index a803596dad4a..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __WL1271_SPI_H__
26#define __WL1271_SPI_H__
27
28#include "wl1271_reg.h"
29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31
32#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40
41#define HW_ACCESS_REGISTER_SIZE 4
42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
44
45#define WSPI_CMD_READ 0x40000000
46#define WSPI_CMD_WRITE 0x00000000
47#define WSPI_CMD_FIXED 0x20000000
48#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
49#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
50#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
51
52#define WSPI_INIT_CMD_CRC_LEN 5
53
54#define WSPI_INIT_CMD_START 0x00
55#define WSPI_INIT_CMD_TX 0x40
56/* the extra bypass bit is sampled by the TNET as '1' */
57#define WSPI_INIT_CMD_BYPASS_BIT 0x80
58#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
59#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
60#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
61#define WSPI_INIT_CMD_IOD 0x40
62#define WSPI_INIT_CMD_IP 0x20
63#define WSPI_INIT_CMD_CS 0x10
64#define WSPI_INIT_CMD_WS 0x08
65#define WSPI_INIT_CMD_WSPI 0x01
66#define WSPI_INIT_CMD_END 0x01
67
68#define WSPI_INIT_CMD_LEN 8
69
70#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
71 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
72#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
73
74#define OCP_CMD_LOOP 32
75
76#define OCP_CMD_WRITE 0x1
77#define OCP_CMD_READ 0x2
78
79#define OCP_READY_MASK BIT(18)
80#define OCP_STATUS_MASK (BIT(16) | BIT(17))
81
82#define OCP_STATUS_NO_RESP 0x00000
83#define OCP_STATUS_OK 0x10000
84#define OCP_STATUS_REQ_FAILED 0x20000
85#define OCP_STATUS_RESP_ERROR 0x30000
86
87/* Raw target IO, address is not translated */
88void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
89 size_t len, bool fixed);
90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
91 size_t len, bool fixed);
92
93/* INIT and RESET words */
94void wl1271_spi_reset(struct wl1271 *wl);
95void wl1271_spi_init(struct wl1271 *wl);
96#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 5c1c4f565fd8..554deb4d024e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -26,7 +26,6 @@
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl1271.h" 28#include "wl1271.h"
29#include "wl1271_spi.h"
30#include "wl1271_acx.h" 29#include "wl1271_acx.h"
31 30
32#define WL1271_TM_MAX_DATA_LENGTH 1024 31#define WL1271_TM_MAX_DATA_LENGTH 1024
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 811e739d05bf..62db79508ddf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -25,7 +25,6 @@
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include "wl1271.h" 27#include "wl1271.h"
28#include "wl1271_spi.h"
29#include "wl1271_io.h" 28#include "wl1271_io.h"
30#include "wl1271_reg.h" 29#include "wl1271_reg.h"
31#include "wl1271_ps.h" 30#include "wl1271_ps.h"
@@ -47,7 +46,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
47{ 46{
48 struct wl1271_tx_hw_descr *desc; 47 struct wl1271_tx_hw_descr *desc;
49 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 48 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
50 u32 total_blocks, excluded; 49 u32 total_blocks;
51 int id, ret = -EBUSY; 50 int id, ret = -EBUSY;
52 51
53 /* allocate free identifier for the packet */ 52 /* allocate free identifier for the packet */
@@ -57,12 +56,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
57 56
58 /* approximate the number of blocks required for this packet 57 /* approximate the number of blocks required for this packet
59 in the firmware */ 58 in the firmware */
60 /* FIXME: try to figure out what is done here and make it cleaner */ 59 total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
61 total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV; 60 total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
62 excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
63 total_blocks += (excluded > 252) ? 2 : 1;
64 total_blocks += TX_HW_BLOCK_SPARE;
65
66 if (total_blocks <= wl->tx_blocks_available) { 61 if (total_blocks <= wl->tx_blocks_available) {
67 desc = (struct wl1271_tx_hw_descr *)skb_push( 62 desc = (struct wl1271_tx_hw_descr *)skb_push(
68 skb, total_len - skb->len); 63 skb, total_len - skb->len);
@@ -87,8 +82,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
87static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 82static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
88 u32 extra, struct ieee80211_tx_info *control) 83 u32 extra, struct ieee80211_tx_info *control)
89{ 84{
85 struct timespec ts;
90 struct wl1271_tx_hw_descr *desc; 86 struct wl1271_tx_hw_descr *desc;
91 int pad, ac; 87 int pad, ac;
88 s64 hosttime;
92 u16 tx_attr; 89 u16 tx_attr;
93 90
94 desc = (struct wl1271_tx_hw_descr *) skb->data; 91 desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -102,8 +99,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
102 } 99 }
103 100
104 /* configure packet life time */ 101 /* configure packet life time */
105 desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) - 102 getnstimeofday(&ts);
106 wl->time_offset); 103 hosttime = (timespec_to_ns(&ts) >> 10);
104 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
107 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 105 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
108 106
109 /* configure the tx attributes */ 107 /* configure the tx attributes */
@@ -170,7 +168,6 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
170 168
171 /* write packet new counter into the write access register */ 169 /* write packet new counter into the write access register */
172 wl->tx_packets_count++; 170 wl->tx_packets_count++;
173 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
174 171
175 desc = (struct wl1271_tx_hw_descr *) skb->data; 172 desc = (struct wl1271_tx_hw_descr *) skb->data;
176 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 173 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -223,7 +220,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
223 return ret; 220 return ret;
224} 221}
225 222
226static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) 223u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
227{ 224{
228 struct ieee80211_supported_band *band; 225 struct ieee80211_supported_band *band;
229 u32 enabled_rates = 0; 226 u32 enabled_rates = 0;
@@ -245,6 +242,7 @@ void wl1271_tx_work(struct work_struct *work)
245 struct sk_buff *skb; 242 struct sk_buff *skb;
246 bool woken_up = false; 243 bool woken_up = false;
247 u32 sta_rates = 0; 244 u32 sta_rates = 0;
245 u32 prev_tx_packets_count;
248 int ret; 246 int ret;
249 247
250 /* check if the rates supported by the AP have changed */ 248 /* check if the rates supported by the AP have changed */
@@ -261,6 +259,8 @@ void wl1271_tx_work(struct work_struct *work)
261 if (unlikely(wl->state == WL1271_STATE_OFF)) 259 if (unlikely(wl->state == WL1271_STATE_OFF))
262 goto out; 260 goto out;
263 261
262 prev_tx_packets_count = wl->tx_packets_count;
263
264 /* if rates have changed, re-configure the rate policy */ 264 /* if rates have changed, re-configure the rate policy */
265 if (unlikely(sta_rates)) { 265 if (unlikely(sta_rates)) {
266 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); 266 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
@@ -271,31 +271,26 @@ void wl1271_tx_work(struct work_struct *work)
271 if (!woken_up) { 271 if (!woken_up) {
272 ret = wl1271_ps_elp_wakeup(wl, false); 272 ret = wl1271_ps_elp_wakeup(wl, false);
273 if (ret < 0) 273 if (ret < 0)
274 goto out; 274 goto out_ack;
275 woken_up = true; 275 woken_up = true;
276 } 276 }
277 277
278 ret = wl1271_tx_frame(wl, skb); 278 ret = wl1271_tx_frame(wl, skb);
279 if (ret == -EBUSY) { 279 if (ret == -EBUSY) {
280 /* firmware buffer is full, stop queues */ 280 /* firmware buffer is full, lets stop transmitting. */
281 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
282 "stop queues");
283 ieee80211_stop_queues(wl->hw);
284 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
285 skb_queue_head(&wl->tx_queue, skb); 281 skb_queue_head(&wl->tx_queue, skb);
286 goto out; 282 goto out_ack;
287 } else if (ret < 0) { 283 } else if (ret < 0) {
288 dev_kfree_skb(skb); 284 dev_kfree_skb(skb);
289 goto out; 285 goto out_ack;
290 } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
291 &wl->flags)) {
292 /* firmware buffer has space, restart queues */
293 wl1271_debug(DEBUG_TX,
294 "complete_packet: waking queues");
295 ieee80211_wake_queues(wl->hw);
296 } 286 }
297 } 287 }
298 288
289out_ack:
290 /* interrupt the firmware with the new packets */
291 if (prev_tx_packets_count != wl->tx_packets_count)
292 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
293
299out: 294out:
300 if (woken_up) 295 if (woken_up)
301 wl1271_ps_elp_sleep(wl); 296 wl1271_ps_elp_sleep(wl);
@@ -308,11 +303,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
308{ 303{
309 struct ieee80211_tx_info *info; 304 struct ieee80211_tx_info *info;
310 struct sk_buff *skb; 305 struct sk_buff *skb;
311 u16 seq;
312 int id = result->id; 306 int id = result->id;
307 int rate = -1;
308 u8 retries = 0;
313 309
314 /* check for id legality */ 310 /* check for id legality */
315 if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) { 311 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
316 wl1271_warning("TX result illegal id: %d", id); 312 wl1271_warning("TX result illegal id: %d", id);
317 return; 313 return;
318 } 314 }
@@ -320,31 +316,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
320 skb = wl->tx_frames[id]; 316 skb = wl->tx_frames[id];
321 info = IEEE80211_SKB_CB(skb); 317 info = IEEE80211_SKB_CB(skb);
322 318
323 /* update packet status */ 319 /* update the TX status info */
324 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 320 if (result->status == TX_SUCCESS) {
325 if (result->status == TX_SUCCESS) 321 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
326 info->flags |= IEEE80211_TX_STAT_ACK; 322 info->flags |= IEEE80211_TX_STAT_ACK;
327 if (result->status & TX_RETRY_EXCEEDED) { 323 rate = wl1271_rate_to_idx(wl, result->rate_class_index);
328 /* FIXME */ 324 retries = result->ack_failures;
329 /* info->status.excessive_retries = 1; */ 325 } else if (result->status == TX_RETRY_EXCEEDED) {
330 wl->stats.excessive_retries++; 326 wl->stats.excessive_retries++;
331 } 327 retries = result->ack_failures;
332 } 328 }
333 329
334 /* FIXME */ 330 info->status.rates[0].idx = rate;
335 /* info->status.retry_count = result->ack_failures; */ 331 info->status.rates[0].count = retries;
332 info->status.rates[0].flags = 0;
333 info->status.ack_signal = -1;
334
336 wl->stats.retry_count += result->ack_failures; 335 wl->stats.retry_count += result->ack_failures;
337 336
338 /* update security sequence number */ 337 /* update security sequence number */
339 seq = wl->tx_security_seq_16 + 338 wl->tx_security_seq += (result->lsb_security_sequence_number -
340 (result->lsb_security_sequence_number - 339 wl->tx_security_last_seq);
341 wl->tx_security_last_seq);
342 wl->tx_security_last_seq = result->lsb_security_sequence_number; 340 wl->tx_security_last_seq = result->lsb_security_sequence_number;
343 341
344 if (seq < wl->tx_security_seq_16)
345 wl->tx_security_seq_32++;
346 wl->tx_security_seq_16 = seq;
347
348 /* remove private header from packet */ 342 /* remove private header from packet */
349 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 343 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
350 344
@@ -367,23 +361,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
367} 361}
368 362
369/* Called upon reception of a TX complete interrupt */ 363/* Called upon reception of a TX complete interrupt */
370void wl1271_tx_complete(struct wl1271 *wl, u32 count) 364void wl1271_tx_complete(struct wl1271 *wl)
371{ 365{
372 struct wl1271_acx_mem_map *memmap = 366 struct wl1271_acx_mem_map *memmap =
373 (struct wl1271_acx_mem_map *)wl->target_mem_map; 367 (struct wl1271_acx_mem_map *)wl->target_mem_map;
368 u32 count, fw_counter;
374 u32 i; 369 u32 i;
375 370
376 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
377
378 /* read the tx results from the chipset */ 371 /* read the tx results from the chipset */
379 wl1271_read(wl, le32_to_cpu(memmap->tx_result), 372 wl1271_read(wl, le32_to_cpu(memmap->tx_result),
380 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 373 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
374 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
375
376 /* write host counter to chipset (to ack) */
377 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
378 offsetof(struct wl1271_tx_hw_res_if,
379 tx_result_host_counter), fw_counter);
380
381 count = fw_counter - wl->tx_results_count;
382 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
381 383
382 /* verify that the result buffer is not getting overrun */ 384 /* verify that the result buffer is not getting overrun */
383 if (count > TX_HW_RESULT_QUEUE_LEN) { 385 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
384 wl1271_warning("TX result overflow from chipset: %d", count); 386 wl1271_warning("TX result overflow from chipset: %d", count);
385 count = TX_HW_RESULT_QUEUE_LEN;
386 }
387 387
388 /* process the results */ 388 /* process the results */
389 for (i = 0; i < count; i++) { 389 for (i = 0; i < count; i++) {
@@ -397,11 +397,18 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
397 wl->tx_results_count++; 397 wl->tx_results_count++;
398 } 398 }
399 399
400 /* write host counter to chipset (to ack) */ 400 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
401 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + 401 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
402 offsetof(struct wl1271_tx_hw_res_if, 402 unsigned long flags;
403 tx_result_host_counter), 403
404 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter)); 404 /* firmware buffer has space, restart queues */
405 wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
406 spin_lock_irqsave(&wl->wl_lock, flags);
407 ieee80211_wake_queues(wl->hw);
408 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
409 spin_unlock_irqrestore(&wl->wl_lock, flags);
410 ieee80211_queue_work(wl->hw, &wl->tx_work);
411 }
405} 412}
406 413
407/* caller must hold wl->mutex */ 414/* caller must hold wl->mutex */
@@ -409,31 +416,19 @@ void wl1271_tx_flush(struct wl1271 *wl)
409{ 416{
410 int i; 417 int i;
411 struct sk_buff *skb; 418 struct sk_buff *skb;
412 struct ieee80211_tx_info *info;
413 419
414 /* TX failure */ 420 /* TX failure */
415/* control->flags = 0; FIXME */ 421/* control->flags = 0; FIXME */
416 422
417 while ((skb = skb_dequeue(&wl->tx_queue))) { 423 while ((skb = skb_dequeue(&wl->tx_queue))) {
418 info = IEEE80211_SKB_CB(skb);
419
420 wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); 424 wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
421
422 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
423 continue;
424
425 ieee80211_tx_status(wl->hw, skb); 425 ieee80211_tx_status(wl->hw, skb);
426 } 426 }
427 427
428 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 428 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
429 if (wl->tx_frames[i] != NULL) { 429 if (wl->tx_frames[i] != NULL) {
430 skb = wl->tx_frames[i]; 430 skb = wl->tx_frames[i];
431 info = IEEE80211_SKB_CB(skb);
432
433 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
434 continue;
435
436 ieee80211_tx_status(wl->hw, skb);
437 wl->tx_frames[i] = NULL; 431 wl->tx_frames[i] = NULL;
432 ieee80211_tx_status(wl->hw, skb);
438 } 433 }
439} 434}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 17e405a09caa..3b8b7ac253fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -26,7 +26,7 @@
26#define __WL1271_TX_H__ 26#define __WL1271_TX_H__
27 27
28#define TX_HW_BLOCK_SPARE 2 28#define TX_HW_BLOCK_SPARE 2
29#define TX_HW_BLOCK_SHIFT_DIV 8 29#define TX_HW_BLOCK_SIZE 252
30 30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32/* The chipset reference driver states, that the "aid" value 1 32/* The chipset reference driver states, that the "aid" value 1
@@ -125,9 +125,6 @@ struct wl1271_tx_hw_res_if {
125 125
126static inline int wl1271_tx_get_queue(int queue) 126static inline int wl1271_tx_get_queue(int queue)
127{ 127{
128 /* FIXME: use best effort until WMM is enabled */
129 return CONF_TX_AC_BE;
130
131 switch (queue) { 128 switch (queue) {
132 case 0: 129 case 0:
133 return CONF_TX_AC_VO; 130 return CONF_TX_AC_VO;
@@ -160,7 +157,9 @@ static inline int wl1271_tx_ac_to_tid(int ac)
160} 157}
161 158
162void wl1271_tx_work(struct work_struct *work); 159void wl1271_tx_work(struct work_struct *work);
163void wl1271_tx_complete(struct wl1271 *wl, u32 count); 160void wl1271_tx_complete(struct wl1271 *wl);
164void wl1271_tx_flush(struct wl1271 *wl); 161void wl1271_tx_flush(struct wl1271 *wl);
162u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
163u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
165 164
166#endif 165#endif
diff --git a/drivers/net/wireless/wl3501.h b/drivers/net/wireless/wl3501.h
index 8bce1a550a22..8816e371fd0e 100644
--- a/drivers/net/wireless/wl3501.h
+++ b/drivers/net/wireless/wl3501.h
@@ -610,7 +610,6 @@ struct wl3501_card {
610 struct iw_statistics wstats; 610 struct iw_statistics wstats;
611 struct iw_spy_data spy_data; 611 struct iw_spy_data spy_data;
612 struct iw_public_data wireless_data; 612 struct iw_public_data wireless_data;
613 struct dev_node_t node;
614 struct pcmcia_device *p_dev; 613 struct pcmcia_device *p_dev;
615}; 614};
616#endif 615#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7b9621de239f..376c6b964a9c 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1307,7 +1307,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
1307 printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n", 1307 printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
1308 dev->name, rc); 1308 dev->name, rc);
1309 else { 1309 else {
1310 dev->trans_start = jiffies; 1310 dev->trans_start = jiffies; /* prevent tx timeout */
1311 netif_wake_queue(dev); 1311 netif_wake_queue(dev);
1312 } 1312 }
1313} 1313}
@@ -1326,7 +1326,6 @@ static netdev_tx_t wl3501_hard_start_xmit(struct sk_buff *skb,
1326 1326
1327 spin_lock_irqsave(&this->lock, flags); 1327 spin_lock_irqsave(&this->lock, flags);
1328 enabled = wl3501_block_interrupt(this); 1328 enabled = wl3501_block_interrupt(this);
1329 dev->trans_start = jiffies;
1330 rc = wl3501_send_pkt(this, skb->data, skb->len); 1329 rc = wl3501_send_pkt(this, skb->data, skb->len);
1331 if (enabled) 1330 if (enabled)
1332 wl3501_unblock_interrupt(this); 1331 wl3501_unblock_interrupt(this);
@@ -1451,10 +1450,10 @@ static void wl3501_detach(struct pcmcia_device *link)
1451 netif_device_detach(dev); 1450 netif_device_detach(dev);
1452 wl3501_release(link); 1451 wl3501_release(link);
1453 1452
1453 unregister_netdev(dev);
1454
1454 if (link->priv) 1455 if (link->priv)
1455 free_netdev(link->priv); 1456 free_netdev(link->priv);
1456
1457 return;
1458} 1457}
1459 1458
1460static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info, 1459static int wl3501_get_name(struct net_device *dev, struct iw_request_info *info,
@@ -1834,32 +1833,32 @@ out:
1834} 1833}
1835 1834
1836static const iw_handler wl3501_handler[] = { 1835static const iw_handler wl3501_handler[] = {
1837 [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name, 1836 IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
1838 [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq, 1837 IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
1839 [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq, 1838 IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
1840 [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode, 1839 IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
1841 [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode, 1840 IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
1842 [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens, 1841 IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
1843 [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range, 1842 IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
1844 [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy, 1843 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1845 [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy, 1844 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1846 [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy, 1845 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1847 [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy, 1846 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1848 [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap, 1847 IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
1849 [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap, 1848 IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
1850 [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan, 1849 IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
1851 [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan, 1850 IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
1852 [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid, 1851 IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
1853 [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid, 1852 IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
1854 [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick, 1853 IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
1855 [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick, 1854 IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
1856 [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate, 1855 IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
1857 [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold, 1856 IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
1858 [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold, 1857 IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
1859 [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow, 1858 IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
1860 [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry, 1859 IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
1861 [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode, 1860 IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
1862 [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power, 1861 IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
1863}; 1862};
1864 1863
1865static const struct iw_handler_def wl3501_handler_def = { 1864static const struct iw_handler_def wl3501_handler_def = {
@@ -1897,10 +1896,6 @@ static int wl3501_probe(struct pcmcia_device *p_dev)
1897 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8; 1896 p_dev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
1898 p_dev->io.IOAddrLines = 5; 1897 p_dev->io.IOAddrLines = 5;
1899 1898
1900 /* Interrupt setup */
1901 p_dev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
1902 p_dev->irq.Handler = wl3501_interrupt;
1903
1904 /* General socket configuration */ 1899 /* General socket configuration */
1905 p_dev->conf.Attributes = CONF_ENABLE_IRQ; 1900 p_dev->conf.Attributes = CONF_ENABLE_IRQ;
1906 p_dev->conf.IntType = INT_MEMORY_AND_IO; 1901 p_dev->conf.IntType = INT_MEMORY_AND_IO;
@@ -1961,7 +1956,7 @@ static int wl3501_config(struct pcmcia_device *link)
1961 /* Now allocate an interrupt line. Note that this does not actually 1956 /* Now allocate an interrupt line. Note that this does not actually
1962 * assign a handler to the interrupt. */ 1957 * assign a handler to the interrupt. */
1963 1958
1964 ret = pcmcia_request_irq(link, &link->irq); 1959 ret = pcmcia_request_irq(link, wl3501_interrupt);
1965 if (ret) 1960 if (ret)
1966 goto failed; 1961 goto failed;
1967 1962
@@ -1972,7 +1967,7 @@ static int wl3501_config(struct pcmcia_device *link)
1972 if (ret) 1967 if (ret)
1973 goto failed; 1968 goto failed;
1974 1969
1975 dev->irq = link->irq.AssignedIRQ; 1970 dev->irq = link->irq;
1976 dev->base_addr = link->io.BasePort1; 1971 dev->base_addr = link->io.BasePort1;
1977 SET_NETDEV_DEV(dev, &link->dev); 1972 SET_NETDEV_DEV(dev, &link->dev);
1978 if (register_netdev(dev)) { 1973 if (register_netdev(dev)) {
@@ -1981,20 +1976,15 @@ static int wl3501_config(struct pcmcia_device *link)
1981 } 1976 }
1982 1977
1983 this = netdev_priv(dev); 1978 this = netdev_priv(dev);
1984 /*
1985 * At this point, the dev_node_t structure(s) should be initialized and
1986 * arranged in a linked list at link->dev_node.
1987 */
1988 link->dev_node = &this->node;
1989 1979
1990 this->base_addr = dev->base_addr; 1980 this->base_addr = dev->base_addr;
1991 1981
1992 if (!wl3501_get_flash_mac_addr(this)) { 1982 if (!wl3501_get_flash_mac_addr(this)) {
1993 printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n", 1983 printk(KERN_WARNING "%s: Cant read MAC addr in flash ROM?\n",
1994 dev->name); 1984 dev->name);
1985 unregister_netdev(dev);
1995 goto failed; 1986 goto failed;
1996 } 1987 }
1997 strcpy(this->node.dev_name, dev->name);
1998 1988
1999 for (i = 0; i < 6; i++) 1989 for (i = 0; i < 6; i++)
2000 dev->dev_addr[i] = ((char *)&this->mac_addr)[i]; 1990 dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
@@ -2038,12 +2028,6 @@ failed:
2038 */ 2028 */
2039static void wl3501_release(struct pcmcia_device *link) 2029static void wl3501_release(struct pcmcia_device *link)
2040{ 2030{
2041 struct net_device *dev = link->priv;
2042
2043 /* Unlink the device chain */
2044 if (link->dev_node)
2045 unregister_netdev(dev);
2046
2047 pcmcia_disable_device(link); 2031 pcmcia_disable_device(link);
2048} 2032}
2049 2033
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 9d1277874645..390d77f762c4 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -134,7 +134,6 @@ static void zd1201_usbfree(struct urb *urb)
134 134
135 kfree(urb->transfer_buffer); 135 kfree(urb->transfer_buffer);
136 usb_free_urb(urb); 136 usb_free_urb(urb);
137 return;
138} 137}
139 138
140/* cmdreq message: 139/* cmdreq message:
@@ -185,7 +184,6 @@ static void zd1201_usbtx(struct urb *urb)
185{ 184{
186 struct zd1201 *zd = urb->context; 185 struct zd1201 *zd = urb->context;
187 netif_wake_queue(zd->dev); 186 netif_wake_queue(zd->dev);
188 return;
189} 187}
190 188
191/* Incoming data */ 189/* Incoming data */
@@ -407,7 +405,6 @@ exit:
407 wake_up(&zd->rxdataq); 405 wake_up(&zd->rxdataq);
408 kfree(urb->transfer_buffer); 406 kfree(urb->transfer_buffer);
409 } 407 }
410 return;
411} 408}
412 409
413static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata, 410static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
@@ -827,7 +824,6 @@ static netdev_tx_t zd1201_hard_start_xmit(struct sk_buff *skb,
827 } else { 824 } else {
828 dev->stats.tx_packets++; 825 dev->stats.tx_packets++;
829 dev->stats.tx_bytes += skb->len; 826 dev->stats.tx_bytes += skb->len;
830 dev->trans_start = jiffies;
831 } 827 }
832 kfree_skb(skb); 828 kfree_skb(skb);
833 829
@@ -845,7 +841,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
845 usb_unlink_urb(zd->tx_urb); 841 usb_unlink_urb(zd->tx_urb);
846 dev->stats.tx_errors++; 842 dev->stats.tx_errors++;
847 /* Restart the timeout to quiet the watchdog: */ 843 /* Restart the timeout to quiet the watchdog: */
848 dev->trans_start = jiffies; 844 dev->trans_start = jiffies; /* prevent tx timeout */
849} 845}
850 846
851static int zd1201_set_mac_address(struct net_device *dev, void *p) 847static int zd1201_set_mac_address(struct net_device *dev, void *p)
@@ -876,7 +872,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
876static void zd1201_set_multicast(struct net_device *dev) 872static void zd1201_set_multicast(struct net_device *dev)
877{ 873{
878 struct zd1201 *zd = netdev_priv(dev); 874 struct zd1201 *zd = netdev_priv(dev);
879 struct dev_mc_list *mc; 875 struct netdev_hw_addr *ha;
880 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; 876 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
881 int i; 877 int i;
882 878
@@ -884,8 +880,8 @@ static void zd1201_set_multicast(struct net_device *dev)
884 return; 880 return;
885 881
886 i = 0; 882 i = 0;
887 netdev_for_each_mc_addr(mc, dev) 883 netdev_for_each_mc_addr(ha, dev)
888 memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN); 884 memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
889 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, 885 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
890 netdev_mc_count(dev) * ETH_ALEN, 0); 886 netdev_mc_count(dev) * ETH_ALEN, 0);
891} 887}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 16fa289ad77b..b0b666019a93 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -948,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work)
948} 948}
949 949
950static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, 950static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
951 int mc_count, struct dev_addr_list *mclist) 951 struct netdev_hw_addr_list *mc_list)
952{ 952{
953 struct zd_mac *mac = zd_hw_mac(hw); 953 struct zd_mac *mac = zd_hw_mac(hw);
954 struct zd_mc_hash hash; 954 struct zd_mc_hash hash;
955 int i; 955 struct netdev_hw_addr *ha;
956 956
957 zd_mc_clear(&hash); 957 zd_mc_clear(&hash);
958 958
959 for (i = 0; i < mc_count; i++) { 959 netdev_hw_addr_list_for_each(ha, mc_list) {
960 if (!mclist) 960 dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
961 break; 961 zd_mc_add_addr(&hash, ha->addr);
962 dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr);
963 zd_mc_add_addr(&hash, mclist->dmi_addr);
964 mclist = mclist->next;
965 } 962 }
966 963
967 return hash.low | ((u64)hash.high << 32); 964 return hash.low | ((u64)hash.high << 32);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index d91ad1a612af..c257940b71b6 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -664,15 +664,15 @@ static struct urb *alloc_rx_urb(struct zd_usb *usb)
664 urb = usb_alloc_urb(0, GFP_KERNEL); 664 urb = usb_alloc_urb(0, GFP_KERNEL);
665 if (!urb) 665 if (!urb)
666 return NULL; 666 return NULL;
667 buffer = usb_buffer_alloc(udev, USB_MAX_RX_SIZE, GFP_KERNEL, 667 buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL,
668 &urb->transfer_dma); 668 &urb->transfer_dma);
669 if (!buffer) { 669 if (!buffer) {
670 usb_free_urb(urb); 670 usb_free_urb(urb);
671 return NULL; 671 return NULL;
672 } 672 }
673 673
674 usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), 674 usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN),
675 buffer, USB_MAX_RX_SIZE, 675 buffer, USB_MAX_RX_SIZE,
676 rx_urb_complete, usb); 676 rx_urb_complete, usb);
677 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 677 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
678 678
@@ -683,8 +683,8 @@ static void free_rx_urb(struct urb *urb)
683{ 683{
684 if (!urb) 684 if (!urb)
685 return; 685 return;
686 usb_buffer_free(urb->dev, urb->transfer_buffer_length, 686 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
687 urb->transfer_buffer, urb->transfer_dma); 687 urb->transfer_buffer, urb->transfer_dma);
688 usb_free_urb(urb); 688 usb_free_urb(urb);
689} 689}
690 690
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 67f9237237dd..d04c5b262050 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -558,7 +558,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
558 } 558 }
559 559
560 /* To exclude tx timeout */ 560 /* To exclude tx timeout */
561 dev->trans_start = 0xffffffff - TX_TIMEOUT - TX_TIMEOUT; 561 dev->trans_start = jiffies; /* prevent tx timeout */
562 562
563 /* We're all ready to go. Start the queue */ 563 /* We're all ready to go. Start the queue */
564 netif_wake_queue(dev); 564 netif_wake_queue(dev);
@@ -590,7 +590,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
590 dev->stats.tx_bytes += lp->deferred_skb->len; 590 dev->stats.tx_bytes += lp->deferred_skb->len;
591 dev_kfree_skb_irq(lp->deferred_skb); 591 dev_kfree_skb_irq(lp->deferred_skb);
592 lp->deferred_skb = NULL; 592 lp->deferred_skb = NULL;
593 dev->trans_start = jiffies; 593 dev->trans_start = jiffies; /* prevent tx timeout */
594 netif_wake_queue(dev); 594 netif_wake_queue(dev);
595 } 595 }
596 } 596 }
@@ -639,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev)
639 } 639 }
640 640
641 skb_put(skb, len); /* Tell the skb how much data we got */ 641 skb_put(skb, len); /* Tell the skb how much data we got */
642 skb->dev = dev; /* Fill out required meta-data */
643 642
644 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
645 skb->ip_summed = CHECKSUM_NONE; 644 skb->ip_summed = CHECKSUM_NONE;
@@ -1055,7 +1054,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1055 1054
1056 dev->stats.tx_bytes += len; 1055 dev->stats.tx_bytes += len;
1057 dev_kfree_skb(new_skb); 1056 dev_kfree_skb(new_skb);
1058 dev->trans_start = jiffies;
1059 1057
1060 return 0; 1058 return 0;
1061} 1059}
@@ -1172,7 +1170,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1172 } 1170 }
1173 1171
1174 /* Get the virtual base address for the device */ 1172 /* Get the virtual base address for the device */
1175 lp->base_addr = ioremap(r_mem.start, r_mem.end - r_mem.start + 1); 1173 lp->base_addr = ioremap(r_mem.start, resource_size(&r_mem));
1176 if (NULL == lp->base_addr) { 1174 if (NULL == lp->base_addr) {
1177 dev_err(dev, "EmacLite: Could not allocate iomem\n"); 1175 dev_err(dev, "EmacLite: Could not allocate iomem\n");
1178 rc = -EIO; 1176 rc = -EIO;
@@ -1225,7 +1223,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
1225 return 0; 1223 return 0;
1226 1224
1227error1: 1225error1:
1228 release_mem_region(ndev->mem_start, r_mem.end - r_mem.start + 1); 1226 release_mem_region(ndev->mem_start, resource_size(&r_mem));
1229 1227
1230error2: 1228error2:
1231 xemaclite_remove_ndev(ndev); 1229 xemaclite_remove_ndev(ndev);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ede5b2436f22..4eb67aed68dd 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -564,7 +564,6 @@ static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value
564 for (i = 10000; i >= 0; i--) 564 for (i = 10000; i >= 0; i--)
565 if ((ioread16(ioaddr + MII_Status) & 1) == 0) 565 if ((ioread16(ioaddr + MII_Status) & 1) == 0)
566 break; 566 break;
567 return;
568} 567}
569 568
570 569
@@ -1299,25 +1298,25 @@ static void set_rx_mode(struct net_device *dev)
1299 /* Too many to filter well, or accept all multicasts. */ 1298 /* Too many to filter well, or accept all multicasts. */
1300 iowrite16(0x000B, ioaddr + AddrMode); 1299 iowrite16(0x000B, ioaddr + AddrMode);
1301 } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ 1300 } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1302 struct dev_mc_list *mclist; 1301 struct netdev_hw_addr *ha;
1303 u16 hash_table[4]; 1302 u16 hash_table[4];
1304 int i; 1303 int i;
1305 1304
1306 memset(hash_table, 0, sizeof(hash_table)); 1305 memset(hash_table, 0, sizeof(hash_table));
1307 netdev_for_each_mc_addr(mclist, dev) { 1306 netdev_for_each_mc_addr(ha, dev) {
1308 unsigned int bit; 1307 unsigned int bit;
1309 1308
1310 /* Due to a bug in the early chip versions, multiple filter 1309 /* Due to a bug in the early chip versions, multiple filter
1311 slots must be set for each address. */ 1310 slots must be set for each address. */
1312 if (yp->drv_flags & HasMulticastBug) { 1311 if (yp->drv_flags & HasMulticastBug) {
1313 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f; 1312 bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1314 hash_table[bit >> 4] |= (1 << bit); 1313 hash_table[bit >> 4] |= (1 << bit);
1315 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f; 1314 bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1316 hash_table[bit >> 4] |= (1 << bit); 1315 hash_table[bit >> 4] |= (1 << bit);
1317 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f; 1316 bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1318 hash_table[bit >> 4] |= (1 << bit); 1317 hash_table[bit >> 4] |= (1 << bit);
1319 } 1318 }
1320 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f; 1319 bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1321 hash_table[bit >> 4] |= (1 << bit); 1320 hash_table[bit >> 4] |= (1 << bit);
1322 } 1321 }
1323 /* Copy the hash table to the chip. */ 1322 /* Copy the hash table to the chip. */
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index dbfef8d70f2d..c3a329204511 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -587,7 +587,6 @@ static netdev_tx_t znet_send_packet(struct sk_buff *skb, struct net_device *dev)
587 } 587 }
588 spin_unlock_irqrestore (&znet->lock, flags); 588 spin_unlock_irqrestore (&znet->lock, flags);
589 589
590 dev->trans_start = jiffies;
591 netif_start_queue (dev); 590 netif_start_queue (dev);
592 591
593 if (znet_debug > 4) 592 if (znet_debug > 4)
@@ -802,7 +801,6 @@ static void znet_rx(struct net_device *dev)
802 /* If any worth-while packets have been received, dev_rint() 801 /* If any worth-while packets have been received, dev_rint()
803 has done a mark_bh(INET_BH) for us and will work on them 802 has done a mark_bh(INET_BH) for us and will work on them
804 when we get to the bottom-half routine. */ 803 when we get to the bottom-half routine. */
805 return;
806} 804}
807 805
808/* The inverse routine to znet_open(). */ 806/* The inverse routine to znet_open(). */
diff --git a/drivers/net/zorro8390.c b/drivers/net/zorro8390.c
index 81c753a617ab..b78a38d9172a 100644
--- a/drivers/net/zorro8390.c
+++ b/drivers/net/zorro8390.c
@@ -102,6 +102,7 @@ static struct zorro_device_id zorro8390_zorro_tbl[] __devinitdata = {
102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, }, 102 { ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF, },
103 { 0 } 103 { 0 }
104}; 104};
105MODULE_DEVICE_TABLE(zorro, zorro8390_zorro_tbl);
105 106
106static struct zorro_driver zorro8390_driver = { 107static struct zorro_driver zorro8390_driver = {
107 .name = "zorro8390", 108 .name = "zorro8390",
@@ -430,7 +431,6 @@ static void zorro8390_block_output(struct net_device *dev, int count,
430 431
431 z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */ 432 z_writeb(ENISR_RDC, nic_base + NE_EN0_ISR); /* Ack intr. */
432 ei_status.dmaing &= ~0x01; 433 ei_status.dmaing &= ~0x01;
433 return;
434} 434}
435 435
436static void __devexit zorro8390_remove_one(struct zorro_dev *z) 436static void __devexit zorro8390_remove_one(struct zorro_dev *z)